id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1640324
|
from kolibri.core.errors import KolibriError
class InvalidStorageFilenameError(KolibriError):
pass
class InsufficientStorageSpaceError(KolibriError):
pass
|
1640351
|
from datetime import datetime, timedelta
from typing import Union
from passlib.context import CryptContext
from fastapi import Depends, HTTPException
from fastapi.security import OAuth2PasswordBearer
import jwt
from jwt.exceptions import InvalidSignatureError
from sqlalchemy.orm import Session
from starlette.requests import Request
from starlette.responses import RedirectResponse
from starlette.status import HTTP_401_UNAUTHORIZED
from . import schema
from app.config import JWT_ALGORITHM, JWT_KEY, JWT_MIN_EXP
from app.database.models import User
pwd_context = CryptContext(schemes=["bcrypt"])
oauth_schema = OAuth2PasswordBearer(tokenUrl="/login")
def get_hashed_password(password: bytes) -> str:
"""Hashing user password"""
return pwd_context.hash(password)
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Verifying password and hashed password are equal"""
return pwd_context.verify(plain_password, hashed_password)
async def authenticate_user(
db: Session,
new_user: schema.LoginUser,
) -> Union[schema.LoginUser, bool]:
"""Verifying user is in database and password is correct"""
db_user = await User.get_by_username(db=db, username=new_user.username)
if db_user and verify_password(new_user.password, db_user.password):
return schema.LoginUser(
user_id=db_user.id,
is_manager=db_user.is_manager,
username=new_user.username,
password=<PASSWORD>,
)
return False
def create_jwt_token(
user: schema.LoginUser,
jwt_min_exp: int = JWT_MIN_EXP,
jwt_key: str = JWT_KEY,
) -> str:
"""Creating jwt-token out of user unique data"""
expiration = datetime.utcnow() + timedelta(minutes=jwt_min_exp)
jwt_payload = {
"sub": user.username,
"user_id": user.user_id,
"is_manager": user.is_manager,
"exp": expiration,
}
jwt_token = jwt.encode(jwt_payload, jwt_key, algorithm=JWT_ALGORITHM)
return jwt_token
async def get_jwt_token(
db: Session,
token: str = Depends(oauth_schema),
path: Union[bool, str] = None) -> User:
"""
Check whether JWT token is correct.
Returns jwt payloads if correct.
Raises HTTPException if fails to decode.
"""
try:
jwt_payload = jwt.decode(
token, JWT_KEY, algorithms=JWT_ALGORITHM)
except InvalidSignatureError:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=path,
detail="Your token is incorrect. Please log in again",
)
except jwt.ExpiredSignatureError:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=path,
detail="Your token has expired. Please log in again",
)
except jwt.DecodeError:
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=path,
detail="Your token is incorrect. Please log in again")
return jwt_payload
async def get_authorization_cookie(request: Request) -> str:
"""
Extracts jwt from HTTPONLY cookie, if exists.
Raises HTTPException if not.
"""
if "Authorization" in request.cookies:
return request.cookies["Authorization"]
raise HTTPException(
status_code=HTTP_401_UNAUTHORIZED,
headers=request.url.path,
detail="Please log in to enter this page",
)
async def auth_exception_handler(
request: Request,
exc: HTTP_401_UNAUTHORIZED,
) -> RedirectResponse:
"""
Whenever HTTP_401_UNAUTHORIZED is raised,
redirecting to login route, with original requested url,
and details for why original request failed.
"""
paramas = f"?next={exc.headers}&message={exc.detail}"
url = f"/login{paramas}"
response = RedirectResponse(url=url)
response.delete_cookie("Authorization")
return response
|
1640362
|
import torchsample.transforms as ts
from medseg.dataset_loader._utils.affine_transform import MyRandomFlip, MySpecialCrop, MyPad, MyRandomChoiceRotate
from medseg.dataset_loader._utils.intensity_transform import RandomGamma, MyNormalizeMedicPercentile, MyRandomPurtarbation, MyRandomPurtarbationV2, RandomBrightnessFluctuation
from medseg.dataset_loader._utils.elastic_transform import MyElasticTransform, MyElasticTransformCoarseGrid
class Transformations:
def __init__(self, data_aug_policy_name, pad_size=(80, 80, 1), crop_size=(80, 80, 1)):
self.name = data_aug_policy_name
self.pad_size = pad_size
self.crop_size = crop_size
def get_transformation(self):
# replicate experiment settings (traditional methods: affine,elastic, gamma) from 'task-driven data
# augmentation' https://arxiv.org/pdf/1902.05396.pdf
aug_config = {
'no_aug': self.no_aug,
'gamma': self.gamma_aug,
'gamma_scale': self.gamma_scale,
'affine': self.affine_aug,
'scale': self.scale_aug,
'elastic': self.elastic_aug,
'elastic_scale': self.elastic_scale,
'gamma_elastic': self.gamma_elastic_aug,
'affine_elastic': self.affine_elastic_aug,
'affine_gamma': self.affine_elastic_aug,
'affine_gamma_elastic': self.affine_gamma_elastic_aug,
'ACDC_affine': self.ACDC_affine_aug,
'ACDC_affine_perturb': self.ACDC_affine_perturb_aug,
'ACDC_affine_perturb_v2': self.ACDC_affine_perturb_v2,
'ACDC_affine_elastic': self.ACDC_affine_elastic_aug,
'ACDC_affine_intensity': self.ACDC_affine_intensity_aug,
'ACDC_affine_elastic_intensity': self.ACDC_affine_elastic_intensity_aug,
'ACDC_affine_elastic_intensity_v2': self.ACDC_affine_elastic_intensity_aug_v2,
'ACDC_affine_elastic_bias': self.ACDC_affine_bias_elastic,
'ACDC_affine_all': self.ACDC_affine_bias_elastic_intensity,
'Atrial_basic': self.Atrial_basic,
'Atrial_perturb': self.Atrial_Perturb,
'Prostate_affine_elastic_intensity': self.Prostate_affine_elastic_intensity_aug,
'elastic_v2': self.elasticv2_aug
}[self.name]()
return self.get_transform(aug_config)
def get_transform(self, config):
train_transform = ts.Compose([ts.PadNumpy(size=self.pad_size),
ts.ToTensor(),
ts.ChannelsFirst(),
ts.TypeCast(['float', 'float']),
# geometric transformation
MyRandomFlip(h=config['flip_flag'][0], v=config['flip_flag'][1],
p=config['flip_flag'][2]),
# intensity transformation
MyRandomPurtarbation(p=config['perturb_prob'], max_sigma=config['max_sigma'],
flag=[True, False],
multi_control_points=config['multi_control_points'],
add_noise=config['add_noise'],
epsilon=config['noise_epsilon']),
MyRandomPurtarbationV2(p=config['perturb_v2_prob'], magnitude=config['perturb_v2_bias_magnitude'],
flag=[True, False],
ms_control_point_spacing=config['ms_control_point_spacing'],
add_noise=config['perturb_v2_add_noise'],
epsilon=config['perturb_v2_noise_epsilon'], debug=False),
RandomBrightnessFluctuation(p=config['intensity_prob'], flag=[True, False]),
# geometric transformation
ts.RandomAffine(rotation_range=config['rotate_val'],
translation_range=config['shift_val'],
shear_range=config['shear_val'],
zoom_range=config['scale_val'], interp=('bilinear', 'nearest')),
MyRandomChoiceRotate(values=config['rotate_groups'],
interp=('bilinear', 'nearest')),
MyElasticTransform(is_labelmap=[False, True], p_thresh=config['elastic_prob']),
MyElasticTransformCoarseGrid(
is_labelmap=[False, True], p_thresh=config['elastic_probv2']),
# normalization
MySpecialCrop(size=self.crop_size, crop_type=0),
MyNormalizeMedicPercentile(norm_flag=(True, False), min_val=0.0, max_val=1.0,
perc_threshold=(0.0, 100.0)),
ts.TypeCast(['float', 'long'])
])
valid_transform = ts.Compose([ts.PadNumpy(size=self.pad_size),
ts.ToTensor(),
ts.ChannelsFirst(),
ts.TypeCast(['float', 'float']),
MySpecialCrop(size=self.crop_size, crop_type=0),
MyNormalizeMedicPercentile(norm_flag=(True, False), min_val=0.0, max_val=1.0,
perc_threshold=(0.0, 100.0)),
ts.TypeCast(['float', 'long'])
])
aug_valid_transform = train_transform
# test_transform only support image as input
test_transform = ts.Compose([ts.PadNumpy(size=self.pad_size),
ts.ToTensor(),
ts.ChannelsFirst(),
ts.TypeCast(['float']),
MySpecialCrop(size=self.crop_size, crop_type=0),
MyNormalizeMedicPercentile(norm_flag=(True), min_val=0.0, max_val=1.0,
perc_threshold=(0.0, 100.0)),
ts.TypeCast(['float'])
])
return {'train': train_transform, 'validate': valid_transform, 'test': test_transform,
'aug_validate': aug_valid_transform}
def no_aug(self):
config = {
# affine augmentation
'flip_flag': [False, False, 0.0],
'shift_val': (0., 0.),
'rotate_val': 0,
'scale_val': (1., 1.),
'rotate_groups': [],
# contrast constrast aug
'intensity_prob': 0, # contrast and brightness
'gamma_prob': 0.,
'gamma_range': [0.8, 1.2],
# deformation aug
'elastic_prob': 0.,
'shear_val': 0,
'elastic_probv2': 0,
# perturbation v1
'perturb_prob': 0.,
'max_sigma': 16,
'multi_control_points': [4],
'add_noise': False,
'noise_epsilon': 0.01,
# perturbation v2:
'perturb_v2_prob': 0.,
'perturb_v2_bias_magnitude': 0.2,
'ms_control_point_spacing': [32],
'perturb_v2_add_noise': False,
'perturb_v2_noise_epsilon': 0.01
}
return config
def scale_aug(self):
config = self.no_aug()
config['scale_val'] = (0.8, 1.2)
return config
def affine_aug(self):
config = self.no_aug()
# config['flip_flag'] = [False, True, 0.5]
config['shift_val'] = (0.1, 0.1)
config['rotate_val'] = 15
config['scale_val'] = (0.9, 1.1)
return config
def Atrial_basic(self):
config = self.no_aug()
config['flip_flag'] = [True, True, 0.5]
config['shift_val'] = (0.1, 0.1)
config['rotate_val'] = 10
config['scale_val'] = (0.7, 1.3)
config['gamma_range'] = (0.8, 2.0)
config['gamma_prob'] = 0.5
return config
def Prostate_affine_elastic_intensity_aug(self):
config = self.no_aug()
config['flip_flag'] = [True, True, 0.5]
config['shift_val'] = (0.1, 0.1)
config['rotate_val'] = 15
config['scale_val'] = (0.8, 1.2)
config['intensity_prob'] = 0.5
config['elastic_prob'] = 0.5
return config
def Atrial_Perturb(self):
config = self.no_aug()
config['flip_flag'] = [True, True, 0.5]
config['shift_val'] = (0.1, 0.1)
config['rotate_val'] = 10
config['scale_val'] = (0.7, 1.3)
config['gamma_range'] = (0.8, 2.0)
config['gamma_prob'] = 0.5
config['perturb_prob'] = 0.5
config['max_sigma'] = 16
config['multi_control_points'] = [2, 4, 8]
return config
def ACDC_affine_aug(self):
config = self.no_aug()
config['flip_flag'] = [True, True, 0.2]
config['shift_val'] = (0.1, 0.1)
config['rotate_val'] = 15
config['scale_val'] = (0.8, 1.1)
config['rotate_groups'] = [45 * i for i in range(8)]
return config
def ACDC_affine_intensity_aug(self):
config = self.ACDC_affine_aug()
config['intensity_prob'] = 0.5
return config
def ACDC_affine_elastic_intensity_aug(self):
config = self.ACDC_affine_aug()
config['intensity_prob'] = 0.5
config['elastic_prob'] = 0.5
return config
def ACDC_affine_elastic_intensity_aug_v2(self):
config = self.ACDC_affine_aug()
config['intensity_prob'] = 0.5
config['elastic_probv2'] = 0.5
return config
def ACDC_affine_perturb_aug(self):
config = self.ACDC_affine_aug()
config['perturb_prob'] = 0.5
config['max_sigma'] = 16
config['multi_control_points'] = [2, 4, 8]
config['add_noise'] = True
config['epsilon'] = 0.01
return config
def ACDC_affine_perturb_v2(self):
config = self.ACDC_affine_aug()
config['perturb_v2_prob'] = 0.5
config['perturb_v2_bias_magnitude'] = 0.3
config['ms_control_point_spacing'] = [64, 1]
config['perturb_v2_add_noise'] = True
config['perturb_v2_noise_epsilon'] = 0.01
return config
def ACDC_affine_bias_elastic(self):
config = self.ACDC_affine_aug()
config['perturb_v2_prob'] = 0.5
config['perturb_v2_bias_magnitude'] = 0.3
config['ms_control_point_spacing'] = [64, 1]
config['perturb_v2_add_noise'] = True
config['perturb_v2_noise_epsilon'] = 0.01
config['elastic_prob'] = 0.5
return config
def ACDC_affine_bias_elastic_intensity(self):
config = self.ACDC_affine_bias_elastic()
config['intensity_prob'] = 0.5
return config
def ACDC_affine_elastic_aug(self):
config = self.ACDC_affine_aug()
config['elastic_prob'] = 0.5
return config
def affine_elastic_aug(self):
config = self.affine_aug()
config['elastic_prob'] = 0.5
return config
def affine_gamma_aug(self):
config = self.affine_aug()
config['gamma_prob'] = 0.5
return config
def affine_gamma_elastic_aug(self):
config = self.affine_aug()
config['gamma_prob'] = 0.5
config['elastic_prob'] = 0.5
config['gamma_range'] = [0.8, 1.2]
return config
def gamma_aug(self):
config = self.no_aug()
config['gamma_prob'] = 0.5
config['gamma_range'] = [0.8, 1.2]
return config
def elastic_aug(self):
config = self.no_aug()
config['elastic_prob'] = 1
return config
def elasticv2_aug(self):
config = self.no_aug()
config['elastic_probv2'] = 1
return config
def gamma_scale(self):
config = self.no_aug()
config['gamma_prob'] = 0.5
config['gamma_range'] = [0.8, 1.2]
config['scale_val'] = [0.9, 1.1]
return config
def elastic_scale(self):
config = self.no_aug()
config['elastic_prob'] = 0.5
config['scale_val'] = [0.9, 1.1]
return config
def gamma_elastic_aug(self):
config = self.no_aug()
config['gamma_prob'] = 0.5
config['gamma_range'] = [0.8, 1.2]
config['elastic_prob'] = 0.5
return config
|
1640399
|
with open('log.txt', 'r') as ifs:
lines = filter(lambda line: 'main' in line, ifs.readlines())
record_list = set(map(lambda line: line.split('main:')[1], lines))
record_pair_list = map(lambda record: record.strip().split(','), record_list)
queue_list = filter(lambda ele: 'QUEUE' in ele[0], record_pair_list)
topic_list = filter(lambda ele: 'TOPIC' in ele[0], record_pair_list)
for queue_pair in sorted(queue_list, key=lambda pair: int(pair[0].split('_')[1])):
print queue_pair
print
for topic_pair in sorted(topic_list, key=lambda pair: int(pair[0].split('_')[1])):
print topic_pair
key_value_list = map(lambda lst: (lst[0], int(lst[1])), record_pair_list)
file_size_list = map(lambda my_pair: my_pair[1], key_value_list)
total_size = sum(file_size_list)
print total_size
print float(total_size) / 1024 / 1024 / 1024
print 4.66584196314
|
1640404
|
import sys
import speech_recognition as sr
r = sr.Recognizer()
r.dynamic_energy_threshold = False
r.energy_threshold = 400
value=1
while(value):
try:
with sr.Microphone() as source:
audio = r.listen(source, timeout=2)
try:
print("You said " + r.recognize_google(audio))
sys.stdout.flush()
except OSError as err:
print("OS error: {0}".format(err))
except:
print("Could not understand audio")
print("Unexpected error:", sys.exc_info()[0])
sys.stdout.flush()
except:
print("Time out")
sys.stdout.flush()
else:
print("done")
|
1640454
|
import inspect
from enum import Enum
from typing import (
Any, AnyStr, ClassVar, Dict, List, Literal, Optional, Tuple, Type, Union,
get_args, get_origin
)
from typing_extensions import Annotated
from ...errors import CommandSetupError
from ...models import InteractionChannel, InteractionMember, InteractionUser
from ...utils import MISSING
from ..base import (
ApplicationCommandOption, CommandInteraction, CommandInteractionOption
)
__all__ = ('CommandType', 'OptionClass')
class CommandType(Enum):
chat_input = 1
user = 2
message = 3
class OptionType:
"""Base class for all option types.
Instances of this class hold no special behaviour and the value is just
passed through.
Attributes:
enum: The ApplicationCommandOption value of this type.
"""
__slots__ = ('enum',)
def __init__(self, enum: ApplicationCommandOption) -> None:
self.enum = enum
def __eq__(self, other: object) -> bool:
if isinstance(other, ApplicationCommandOption):
return self.enum == other
elif isinstance(other, self.__class__):
return self.enum == other.enum
return False
class MemberUserUnion(OptionType):
"""Option type marker for Union[InteractionUser, InteractionMember].
This is used to mark special behaviour that will first attempt to grab the
member object, if not found it will grab the user object.
"""
enum = ApplicationCommandOption.user
__slots__ = ()
def __init__(self) -> None:
return
class FloatType(OptionType):
"""Strict float type.
This is because Discord's equivalent `number` type includes both integers
and floats, which can cause unexpected user behaviour when just `float` is
used.
"""
enum = ApplicationCommandOption.number
__slots__ = ()
def __init__(self) -> None:
return
class OptionClass:
"""A user-constructed option to an application command.
For most cases the `Option` helper function should be used.
The OptionClass uses the `MISSING` sentinel for missing values. This can be
tricky to work with and in general users should not play around with this
class unless it is for the purpose of extending it.
Attributes:
type: Application command option type to send to Discord.
name: The name of the option in the Discord client.
description: A description of the option.
required: Whether the option can be omitted.
choices: Strict choices that the user can pick from.
min: Smallest number that can be entered for number types
max: Biggest number that can be entered for number types
default: Default the library will use when the option is omitted.
converter: Simple callable mainly used for enums.
param: The name of the parameter in the callback.
kind: The kind of the parameter in the callback.
type_mapping:
Mapping of primitive types (ie. `int`, `str`) to the application
command option type it will get. This is looked up by
`determine_type()` when the parameter annotation is read.
"""
type: OptionType
name: str
description: str
required: bool
choices: Dict[str, Union[str, int, float]] # Name of the choice to the value
min: int
max: int
default: Any
converter: Any # Simple callable, used for enums to convert the argument
param: str
kind: inspect._ParameterKind
__slots__ = (
'default', 'name', 'description', 'required',
'choices', 'type', 'converter', 'param', 'kind',
'min', 'max'
)
# Mapping of primitive types to their equivalent ApplicationCommandOption
# enum values, placed in the class so that it can be overwritten.
type_mapping: ClassVar[Dict[Type, ApplicationCommandOption]] = {
str: ApplicationCommandOption.string,
int: ApplicationCommandOption.integer,
bool: ApplicationCommandOption.boolean,
float: ApplicationCommandOption.number,
InteractionUser: ApplicationCommandOption.user,
InteractionChannel: ApplicationCommandOption.channel,
InteractionMember: ApplicationCommandOption.user,
}
def __init__(
self,
default: Any = MISSING,
*,
name: str = MISSING,
description: str = MISSING,
required: bool = MISSING,
# This isn't very readable, but it means a list or dictionary of
# strings, integers or floats.
choices: Union[List[Union[str, int, float]], Dict[str, Union[str, int, float]]] = MISSING,
min: int = MISSING,
max: int = MISSING,
type: Type[Any] = MISSING
) -> None:
self.name = name
self.description = description
if default is not MISSING and required is MISSING:
# If required hasn't been set and there's a default we
# should assume the user wants the option to be optional
required = False
self.required = required
if isinstance(choices, list):
choices = {str(value): value for value in choices}
self.choices = choices
self.min = min
self.max = max
self.default = default
self.converter = MISSING
self.param = MISSING
self.kind = MISSING
self.type = MISSING
if type is not MISSING:
self.determine_type(type)
def determine_union(self, args: Tuple[Any, ...]) -> bool:
"""Determine the option type for a union.
This is called by `determine_type` when it receives a union type
because of the extra logic involved.
Args:
args: The arguments that the Union was given.
Returns:
Whether the method could determine a type from the union.
"""
# Optional[X] becomes Union[X, NoneType]. Flake8 thinks we should use
# isinstance() but that won't work (hence the noqa)
if len(args) == 2 and args[-1] == type(None): # noqa: E721
self.required = False if self.required is MISSING else self.required
# Find the typing of X in Optional[X]
return self.determine_type(args[0])
elif len(args) == 2 and int in args and float in args:
# A union with int and float can just be interpreted as a float
# because a float doesn't need decimals.
self.type = OptionType(ApplicationCommandOption.number)
return True
elif len(args) == 2 and InteractionUser in args and InteractionMember in args:
self.type = MemberUserUnion()
return True
return False
def determine_type(self, annotation: Any) -> bool:
"""Determine the application command option type from an annotation.
This method is exposed for the purpose of extending it with advanced
custom behaviour. For simpler cases the `type_mapping` class variable
can be overriden with more values.
Args:
annotation: The annotation of the parameter.
Returns:
Whether an application command type was able to be determined.
"""
if isinstance(annotation, ApplicationCommandOption):
self.type = OptionType(annotation)
return True
try:
self.type = OptionType(self.type_mapping[annotation])
return True
except KeyError:
# It wasn't a primitive type we have in the mapping, continue down
# and try resolving the type
pass
if isinstance(annotation, type) and issubclass(annotation, Enum):
# If the enum is a subclass of another type, such as an IntEnum we
# can infer the type from that.
if self.type is MISSING:
for primitive in self.type_mapping:
if not issubclass(annotation, primitive):
continue
# The enum is a subclass of some other type
self.determine_type(primitive)
# We can use an enum's members as choices.
if self.choices is MISSING:
self.converter = annotation
self.choices = {name: val.value for name, val in annotation.__members__.items()}
return True
elif annotation is AnyStr: # Simple alias
return self.determine_type(str)
origin = get_origin(annotation)
args = get_args(annotation)
if origin is Union:
# The union type has a lot of different and special behaviour
# that has been seperated into another method for readability.
return self.determine_union(args)
elif origin is Annotated:
# Attempt to convert each argument until it is successful,
# excluding the first argument (which is meant for editors).
return any(self.determine_type(attempt) for attempt in args[1:])
elif origin is Literal and args: # Make sure it isn't empty
if self.type is MISSING:
type_ = type(args[0])
for value in args:
if not isinstance(value, type_):
raise TypeError(
f"Literal contains mixed types; expected '{type_}' not '{type(value)}'"
)
self.determine_type(type_)
if self.choices is MISSING:
# Discord wants a name and a value, for Literal we simply have
# to use the arguments for both
self.choices = {str(value): value for value in args}
return True
return False
def update(self, param: inspect.Parameter) -> None:
"""Update the option with new information about the parameter.
The class has no idea about the parameter it is being defined in so it
has to be made aware by the library after the fact. This is called when
the command is being created.
Args:
param: The parameter that this instance was defined in.
"""
self.param = param.name
# Generally the Option instance has priority, unless it is MISSING
self.name = param.name if self.name is MISSING else self.name
if param.default is not param.empty and not isinstance(param.default, self.__class__):
# If the parameter has a default other than an Option class we can
# use it for the option default
self.default = param.default if self.default is MISSING else self.default
self.required = False if self.required is MISSING else self.required
self.kind = param.kind
if param.annotation is not param.empty and self.type is MISSING:
self.determine_type(param.annotation)
def _update_values(
self,
name: str = MISSING,
description: str = MISSING,
required: bool = MISSING,
choices: Union[
List[Union[str, int, float]],
Dict[str, Union[str, int, float]]
] = MISSING,
min: int = MISSING,
max: int = MISSING,
type: Any = MISSING
) -> None:
"""Update internal values of the option.
This is different from `update()`, which is only meant to let the
OptionClass instance aware of the parameter is has been defined in.
"""
if type is not MISSING:
self.determine_type(type)
if name is not MISSING:
self.name = name
if description is not MISSING:
self.description = description
if required is not MISSING:
self.required = required
if choices is not MISSING:
if isinstance(choices, list):
choices = {str(value): value for value in choices}
self.choices = choices
if min is not MISSING:
self.min = min
if max is not MISSING:
self.max = max
def resolve(
self,
interaction: CommandInteraction,
data: Optional[CommandInteractionOption]
) -> Any:
"""Resolve the value to pass to the callback.
The value that this returns is passed directly to the callback in the
option's place.
Args:
interaction: The interaction received from Discord.
data: The option received from Discord or None if it wasn't passed.
Returns:
The resolved value from the interaction and option.
Exceptions:
CommandSetupError: There is no data and a default is missing.
CommandSetupError: The data is of an unexpected type.
CommandSetupError: The data failed to be converted to an enum.
"""
if data is None:
if self.default is MISSING:
raise CommandSetupError(
f"Missing data for option '{self.param}' of command '{interaction.name}'"
)
return self.default
if data.type is not self.type.enum:
raise CommandSetupError(
f"'{self.param}' of '{interaction.name}' received option with wrong type"
)
value = data.value
if value is None:
raise CommandSetupError(
f"Expected command option value for '{self.param}' of '{interaction.name}'"
)
if self.converter is not MISSING:
try:
value = self.converter(value)
except Exception as exc:
raise CommandSetupError('Could not convert argument:', value) from exc
if isinstance(self.type, FloatType):
# We need to make sure it really is a float
value = float(value)
# Some options only pass IDs because Discord asynchronously resolves
# the data for them, these are then passed in a special `resolved`
# field that we need to look them up by.
elif isinstance(self.type, MemberUserUnion):
# This has some special behaviour, we want to attempt to first get
# a member and fall back to a user.
resolved = interaction.resolved.members.get(int(value))
if not resolved:
resolved = interaction.resolved.users.get(int(value))
value = resolved
elif self.type.enum is ApplicationCommandOption.user:
value = interaction.resolved.users.get(int(value))
elif self.type.enum is ApplicationCommandOption.channel:
value = interaction.resolved.channels.get(int(value))
elif self.type is ApplicationCommandOption.user:
value = interaction.resolved.members.get(int(value))
# At this point `value` may be None from our lookups of the resolved
# data
if value is None:
raise CommandSetupError(
"Didn't receive resolved data for command '{interaction.name}'"
)
return value
def to_dict(self) -> Dict[str, Any]:
"""Turn the option into a dictionary to send to Discord."""
data = {
'name': self.name,
'type': self.type.enum.value,
'description': self.description,
'required': True if self.required is MISSING else self.required,
}
if self.choices is not MISSING:
# We store choices with the name as the key and value being the
# value but Discord expects a payload with explicit name and value
# keys so we need to convert it.
choices = [{'name': k, 'value': v} for k, v in self.choices.items()]
data['choices'] = choices
if self.min is not MISSING:
data['min_value'] = self.min
if self.max is not MISSING:
data['max_value'] = self.max
return data
|
1640459
|
import argparse
import jsons
import logging
import os
from overrides import overrides
from typing import List, Tuple
from sacrerouge.commands import RootSubcommand
from sacrerouge.common import Params
from sacrerouge.common.logging import prepare_global_logging
from sacrerouge.common.util import import_module_and_submodules
from sacrerouge.data import EvalInstance, Metrics, MetricsDict
from sacrerouge.data.dataset_readers import DatasetReader
from sacrerouge.io import JsonlWriter
from sacrerouge.metrics import Metric
logger = logging.getLogger(__name__)
def load_metrics(params: Params) -> List[Metric]:
metrics = []
for metric_params in params.pop('metrics'):
metric = Metric.from_params(metric_params)
metrics.append(metric)
return metrics
def get_initial_micro_list(instances: List[EvalInstance]) -> List[Metrics]:
micro_list = []
for instance in instances:
micro_list.append(Metrics(instance.instance_id, instance.summarizer_id, instance.summarizer_type))
return micro_list
def evaluate_instances(instances: List[EvalInstance], metrics: List[Metric]) -> Tuple[MetricsDict, List[Metrics]]:
macro = MetricsDict()
micro_list = get_initial_micro_list(instances)
for metric in metrics:
# Prepare the input arguments
summary_args = []
for field in metric.required_summary_fields:
summary_args.append([instance.fields[field].to_input() for instance in instances])
context_args = []
for field in metric.required_context_fields:
context_args.append([instance.fields[field].to_input() for instance in instances])
# Score all the summaries
this_macro, this_micro_list = metric.evaluate(*summary_args, *context_args)
# Update the global metrics dictionaries
macro.update(this_macro)
for micro, this_micro in zip(micro_list, this_micro_list):
micro.metrics.update(this_micro)
return macro, micro_list
def save_evaluation_results(macro_results: MetricsDict,
micro_results_list: List[Metrics],
macro_output_json: str,
micro_output_jsonl: str,
silent: bool) -> None:
dirname = os.path.dirname(macro_output_json)
if dirname:
os.makedirs(dirname, exist_ok=True)
serialized_macro = jsons.dumps({'metrics': macro_results}, jdkwargs={'indent': 2})
with open(macro_output_json, 'w') as out:
out.write(serialized_macro)
if not silent:
logger.info(serialized_macro)
with JsonlWriter(micro_output_jsonl) as out:
for metrics_dict in micro_results_list:
out.write(metrics_dict)
def add_evaluate_arguments(parser: argparse.ArgumentParser, include_config_arguments: bool) -> None:
if include_config_arguments:
parser.add_argument(
'--config',
type=str,
help='The config file that specifies the dataset reader and metrics',
required=True
)
parser.add_argument(
'--overrides',
type=str,
help='A serialized json that will override the parameters passed in "config"'
)
parser.add_argument(
'--macro-output-json',
type=str,
help='The path to where the system-level metrics should be written',
required=True
)
parser.add_argument(
'--micro-output-jsonl',
type=str,
help='The path to where the input-level metrics should be written',
required=True
)
parser.add_argument(
'--log-file',
type=str,
help='The file where the log should be written'
)
parser.add_argument(
'--silent',
action='store_true',
help='Controls whether the log should be written to stdout'
)
parser.add_argument(
'--include-packages',
nargs='+',
help='A list of additional packages to include'
)
@RootSubcommand.register('evaluate')
class EvaluateSubcommand(RootSubcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction):
description = 'Evaluate a summarization model'
self.parser = parser.add_parser('evaluate', description=description, help=description)
add_evaluate_arguments(self.parser, True)
self.parser.set_defaults(func=self.run)
@overrides
def run(self, args):
prepare_global_logging(file_path=args.log_file, silent=args.silent)
import_module_and_submodules('sacrerouge')
include_packages = args.include_packages or []
for package in include_packages:
import_module_and_submodules(package)
params = Params.from_file(args.config, args.overrides)
dataset_reader = DatasetReader.from_params(params.pop('dataset_reader'))
metrics = load_metrics(params)
input_files = params.pop('input_files')
if isinstance(input_files, str):
input_files = [input_files]
instances = dataset_reader.read(*input_files)
macro, micro_list = evaluate_instances(instances, metrics)
save_evaluation_results(macro, micro_list, args.macro_output_json, args.micro_output_jsonl, args.silent)
|
1640475
|
import os
import re
import subprocess
from collections import namedtuple
import logging
import bisect
from common import SushiError, get_extension
import chapters
MediaStreamInfo = namedtuple('MediaStreamInfo', ['id', 'info', 'default', 'title'])
SubtitlesStreamInfo = namedtuple('SubtitlesStreamInfo', ['id', 'info', 'type', 'default', 'title'])
MediaInfo = namedtuple('MediaInfo', ['video', 'audio', 'subtitles', 'chapters'])
class FFmpeg(object):
@staticmethod
def get_info(path):
try:
process = subprocess.Popen(['ffmpeg', '-hide_banner', '-i', path], stderr=subprocess.PIPE)
out, err = process.communicate()
process.wait()
return err
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
@staticmethod
def demux_file(input_path, **kwargs):
args = ['ffmpeg', '-hide_banner', '-i', input_path, '-y']
audio_stream = kwargs.get('audio_stream', None)
audio_path = kwargs.get('audio_path', None)
audio_rate = kwargs.get('audio_rate', None)
if audio_stream is not None:
args.extend(('-map', '0:{0}'.format(audio_stream)))
if audio_rate:
args.extend(('-ar', str(audio_rate)))
args.extend(('-ac', '1', '-acodec', 'pcm_s16le', audio_path))
script_stream = kwargs.get('script_stream', None)
script_path = kwargs.get('script_path', None)
if script_stream is not None:
args.extend(('-map', '0:{0}'.format(script_stream), script_path))
video_stream = kwargs.get('video_stream', None)
timecodes_path = kwargs.get('timecodes_path', None)
if timecodes_path is not None:
args.extend(('-map', '0:{0}'.format(video_stream), '-f', 'mkvtimestamp_v2', timecodes_path))
logging.info('ffmpeg args: {0}'.format(' '.join(('"{0}"' if ' ' in a else '{0}').format(a) for a in args)))
try:
subprocess.call(args)
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
@staticmethod
def _get_audio_streams(info):
streams = re.findall(r'Stream\s\#0:(\d+).*?Audio:\s*(.*?(?:\((default)\))?)\s*?(?:\(forced\))?\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [MediaStreamInfo(int(x[0]), x[1], x[2] != '', x[3]) for x in streams]
@staticmethod
def _get_video_streams(info):
streams = re.findall(r'Stream\s\#0:(\d+).*?Video:\s*(.*?(?:\((default)\))?)\s*?(?:\(forced\))?\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [MediaStreamInfo(int(x[0]), x[1], x[2] != '', x[3]) for x in streams]
@staticmethod
def _get_chapters_times(info):
return map(float, re.findall(r'Chapter #0.\d+: start (\d+\.\d+)', info))
@staticmethod
def _get_subtitles_streams(info):
maps = {
'ssa': '.ass',
'ass': '.ass',
'subrip': '.srt'
}
streams = re.findall(r'Stream\s\#0:(\d+).*?Subtitle:\s*((\w*)\s*?(?:\((default)\))?\s*?(?:\(forced\))?)\r?\n'
r'(?:\s*Metadata:\s*\r?\n'
r'\s*title\s*:\s*(.*?)\r?\n)?',
info, flags=re.VERBOSE)
return [SubtitlesStreamInfo(int(x[0]), x[1], maps.get(x[2], x[2]), x[3] != '', x[4].strip()) for x in streams]
@classmethod
def get_media_info(cls, path):
info = cls.get_info(path)
video_streams = cls._get_video_streams(info)
audio_streams = cls._get_audio_streams(info)
subs_streams = cls._get_subtitles_streams(info)
chapter_times = cls._get_chapters_times(info)
return MediaInfo(video_streams, audio_streams, subs_streams, chapter_times)
class MkvToolnix(object):
@classmethod
def extract_timecodes(cls, mkv_path, stream_idx, output_path):
args = ['mkvextract', 'timecodes_v2', mkv_path, '{0}:{1}'.format(stream_idx, output_path)]
subprocess.call(args)
class SCXviD(object):
@classmethod
def make_keyframes(cls, video_path, log_path):
try:
ffmpeg_process = subprocess.Popen(['ffmpeg', '-i', video_path,
'-f', 'yuv4mpegpipe',
'-vf', 'scale=640:360',
'-pix_fmt', 'yuv420p',
'-vsync', 'drop', '-'], stdout=subprocess.PIPE)
except OSError as e:
if e.errno == 2:
raise SushiError("Couldn't invoke ffmpeg, check that it's installed")
raise
try:
scxvid_process = subprocess.Popen(['SCXvid', log_path], stdin=ffmpeg_process.stdout)
except OSError as e:
ffmpeg_process.kill()
if e.errno == 2:
raise SushiError("Couldn't invoke scxvid, check that it's installed")
raise
scxvid_process.wait()
class Timecodes(object):
def __init__(self, times, default_fps):
super(Timecodes, self).__init__()
self.times = times
self.default_frame_duration = 1.0 / default_fps if default_fps else None
def get_frame_time(self, number):
try:
return self.times[number]
except IndexError:
if not self.default_frame_duration:
return self.get_frame_time(len(self.times)-1)
if self.times:
return self.times[-1] + (self.default_frame_duration) * (number - len(self.times) + 1)
else:
return number * self.default_frame_duration
def get_frame_number(self, timestamp):
if (not self.times or self.times[-1] < timestamp) and self.default_frame_duration:
return int((timestamp - sum(self.times)) / self.default_frame_duration)
return bisect.bisect_left(self.times, timestamp)
def get_frame_size(self, timestamp):
try:
number = bisect.bisect_left(self.times, timestamp)
except:
return self.default_frame_duration
c = self.get_frame_time(number)
if number == len(self.times):
p = self.get_frame_time(number - 1)
return c - p
else:
n = self.get_frame_time(number + 1)
return n - c
@classmethod
def _convert_v1_to_v2(cls, default_fps, overrides):
# start, end, fps
overrides = [(int(x[0]), int(x[1]), float(x[2])) for x in overrides]
if not overrides:
return []
fps = [default_fps] * (overrides[-1][1] + 1)
for o in overrides:
fps[o[0]:o[1] + 1] = [o[2]] * (o[1] - o[0] + 1)
v2 = [0]
for d in (1.0 / f for f in fps):
v2.append(v2[-1] + d)
return v2
@classmethod
def parse(cls, text):
lines = text.splitlines()
if not lines:
return []
first = lines[0].lower().lstrip()
if first.startswith('# timecode format v2') or first.startswith('# timestamp format v2'):
tcs = [float(x) / 1000.0 for x in lines[1:]]
return Timecodes(tcs, None)
elif first.startswith('# timecode format v1'):
default = float(lines[1].lower().replace('assume ', ""))
overrides = (x.split(',') for x in lines[2:])
return Timecodes(cls._convert_v1_to_v2(default, overrides), default)
else:
raise SushiError('This timecodes format is not supported')
@classmethod
def from_file(cls, path):
with open(path) as file:
return cls.parse(file.read())
@classmethod
def cfr(cls, fps):
class CfrTimecodes(object):
def __init__(self, fps):
self.frame_duration = 1.0 / fps
def get_frame_time(self, number):
return number * self.frame_duration
def get_frame_size(self, timestamp):
return self.frame_duration
def get_frame_number(self, timestamp):
return int(timestamp / self.frame_duration)
return CfrTimecodes(fps)
class Demuxer(object):
def __init__(self, path):
super(Demuxer, self).__init__()
self._path = path
self._is_wav = get_extension(self._path) == '.wav'
self._mi = None if self._is_wav else FFmpeg.get_media_info(self._path)
self._demux_audio = self._demux_subs = self._make_timecodes = self._make_keyframes = self._write_chapters = False
@property
def is_wav(self):
return self._is_wav
@property
def path(self):
return self._path
@property
def chapters(self):
if self.is_wav:
return []
return self._mi.chapters
@property
def has_video(self):
return not self.is_wav and self._mi.video
def set_audio(self, stream_idx, output_path, sample_rate):
self._audio_stream = self._select_stream(self._mi.audio, stream_idx, 'audio')
self._audio_output_path = output_path
self._audio_sample_rate = sample_rate
self._demux_audio = True
def set_script(self, stream_idx, output_path):
self._script_stream = self._select_stream(self._mi.subtitles, stream_idx, 'subtitles')
self._script_output_path = output_path
self._demux_subs = True
def set_timecodes(self, output_path):
self._timecodes_output_path = output_path
self._make_timecodes = True
def set_chapters(self, output_path):
self._write_chapters = True
self._chapters_output_path = output_path
def set_keyframes(self, output_path):
self._keyframes_output_path = output_path
self._make_keyframes = True
def get_subs_type(self, stream_idx):
return self._select_stream(self._mi.subtitles, stream_idx, 'subtitles').type
def demux(self):
if self._write_chapters:
with open(self._chapters_output_path, "w") as output_file:
output_file.write(chapters.format_ogm_chapters(self.chapters))
if self._make_keyframes:
SCXviD.make_keyframes(self._path, self._keyframes_output_path)
ffargs = {}
if self._demux_audio:
ffargs['audio_stream'] = self._audio_stream.id
ffargs['audio_path'] = self._audio_output_path
ffargs['audio_rate'] = self._audio_sample_rate
if self._demux_subs:
ffargs['script_stream'] = self._script_stream.id
ffargs['script_path'] = self._script_output_path
if self._make_timecodes:
def set_ffmpeg_timecodes():
ffargs['video_stream'] = self._mi.video[0].id
ffargs['timecodes_path'] = self._timecodes_output_path
if get_extension(self._path).lower() == '.mkv':
try:
MkvToolnix.extract_timecodes(self._path,
stream_idx=self._mi.video[0].id,
output_path=self._timecodes_output_path)
except OSError as e:
if e.errno == 2:
set_ffmpeg_timecodes()
else:
raise
else:
set_ffmpeg_timecodes()
if ffargs:
FFmpeg.demux_file(self._path, **ffargs)
def cleanup(self):
if self._demux_audio:
os.remove(self._audio_output_path)
if self._demux_subs:
os.remove(self._script_output_path)
if self._make_timecodes:
os.remove(self._timecodes_output_path)
if self._write_chapters:
os.remove(self._chapters_output_path)
@classmethod
def _format_stream(cls, stream):
return '{0}{1}: {2}'.format(stream.id, ' (%s)' % stream.title if stream.title else '', stream.info)
@classmethod
def _format_streams_list(cls, streams):
return '\n'.join(map(cls._format_stream, streams))
def _select_stream(self, streams, chosen_idx, name):
if not streams:
raise SushiError('No {0} streams found in {1}'.format(name, self._path))
if chosen_idx is None:
if len(streams) > 1:
default_track = next((s for s in streams if s.default), None)
if default_track:
logging.warning('Using default track {0} in {1} because there are multiple candidates'
.format(self._format_stream(default_track), self._path))
return default_track
raise SushiError('More than one {0} stream found in {1}.'
'You need to specify the exact one to demux. Here are all candidates:\n'
'{2}'.format(name, self._path, self._format_streams_list(streams)))
return streams[0]
try:
return next(x for x in streams if x.id == chosen_idx)
except StopIteration:
raise SushiError("Stream with index {0} doesn't exist in {1}.\n"
"Here are all that do:\n"
"{2}".format(chosen_idx, self._path, self._format_streams_list(streams)))
|
1640479
|
import jinja2
from jinja2 import Environment, select_autoescape
templateLoader = jinja2.FileSystemLoader( searchpath="/" )
something = ''
Environment(loader=templateLoader, load=templateLoader, autoescape=True)
templateEnv = jinja2.Environment(autoescape=True,
loader=templateLoader )
Environment(loader=templateLoader, load=templateLoader, autoescape=something)
templateEnv = jinja2.Environment(autoescape=False, loader=templateLoader )
Environment(loader=templateLoader,
load=templateLoader,
autoescape=False)
Environment(loader=templateLoader,
load=templateLoader)
Environment(loader=templateLoader, autoescape=select_autoescape())
Environment(loader=templateLoader,
autoescape=select_autoescape(['html', 'htm', 'xml']))
def fake_func():
return 'foobar'
Environment(loader=templateLoader, autoescape=fake_func())
|
1640493
|
import smbus
import gevent
from gevent.lock import BoundedSemaphore
from Node import Node
READ_ADDRESS = 0x00
READ_RSSI = 0x01
READ_FREQUENCY = 0x03
READ_TRIGGER_RSSI = 0x04
READ_LAP = 0x05
READ_TIMING_SERVER_MODE = 0x06
WRITE_TRIGGER_RSSI = 0x53
WRITE_FREQUENCY = 0x56
WRITE_TIMING_SERVER_MODE = 0x57
UPDATE_SLEEP = 0.1
I2C_CHILL_TIME = 0.05
I2C_RETRY_SLEEP = 0.05
I2C_RETRY_COUNT = 5
def unpack_16(data):
result = data[0]
result = (result << 8) | data[1]
return result
def pack_16(data):
part_a = (data >> 8)
part_b = (data & 0xFF)
return [part_a, part_b]
def unpack_32(data):
result = data[0]
result = (result << 8) | data[1]
result = (result << 8) | data[2]
result = (result << 8) | data[3]
return result
def validate_checksum(data):
if data == None:
return False
checksum = sum(data[:-1]) & 0xFF
return checksum == data[-1]
class Delta5Interface:
def __init__(self):
self.update_thread = None
self.pass_record_callback = None
self.hardware_log_callback = None
self.semaphore = BoundedSemaphore(1)
# Start i2c bus
self.i2c = smbus.SMBus(1)
self.nodes = []
i2c_addrs = [8, 10, 12, 14, 16, 18, 20, 22]
for index, addr in enumerate(i2c_addrs):
try:
self.i2c.read_i2c_block_data(addr, READ_ADDRESS, 1)
print ("Node FOUND at address {0}".format(addr))
gevent.sleep(I2C_CHILL_TIME)
node = Node()
node.i2c_addr = addr
node.index = index
self.nodes.append(node)
self.get_frequency_node(node)
self.get_trigger_rssi_node(node)
self.enable_timing_server_mode(node)
except IOError as err:
print ("No node at address {0}".format(addr))
gevent.sleep(I2C_CHILL_TIME)
def read_block(self, addr, offset, size):
success = False
retry_count = 0
data = None
while success == False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore:
data = self.i2c.read_i2c_block_data(addr, offset, size + 1)
if validate_checksum(data):
success = True
gevent.sleep(I2C_CHILL_TIME)
data = data[:-1]
else:
self.log('Invalid Checksum ({0}): {1}'.format(retry_count, data))
retry_count = retry_count + 1
gevent.sleep(I2C_RETRY_SLEEP)
except IOError as err:
self.log(err)
retry_count = retry_count + 1
gevent.sleep(I2C_RETRY_SLEEP)
return data
def write_block(self, addr, offset, data):
success = False
retry_count = 0
data_with_checksum = data
data_with_checksum.append(sum(data) & 0xFF)
while success == False and retry_count < I2C_RETRY_COUNT:
try:
with self.semaphore:
self.i2c.write_i2c_block_data(addr, offset, data_with_checksum)
success = True
gevent.sleep(I2C_CHILL_TIME)
except IOError as err:
self.log(err)
retry_count = retry_count + 1
gevent.sleep(I2C_RETRY_SLEEP)
def update_loop(self):
while True:
self.update()
gevent.sleep(UPDATE_SLEEP)
def update(self):
for node in self.nodes:
data = self.read_block(node.i2c_addr, READ_LAP, 7)
lap_id = data[0]
ms_since_lap = unpack_32(data[1:])
node.current_rssi = unpack_16(data[5:])
if lap_id != node.last_lap_id:
if (callable(self.pass_record_callback)):
self.pass_record_callback(node, ms_since_lap)
node.last_lap_id = lap_id
def start(self):
if self.update_thread is None:
self.log('starting background thread')
self.update_thread = gevent.spawn(self.update_loop)
def enable_timing_server_mode(self, node):
success = False
retry_count = 0
while success == False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, WRITE_TIMING_SERVER_MODE, [1])
data = self.read_block(node.i2c_addr, READ_TIMING_SERVER_MODE, 1)
if data[0] == 1:
print('Timing Server Mode Set')
success = True
else:
retry_count = retry_count + 1
print('Timing Server Mode Not Set ({0})'.format(retry_count))
gevent.sleep(I2C_RETRY_SLEEP)
return node.trigger_rssi
def get_frequencies(self):
for node in self.nodes:
self.get_frequency_node(node)
def get_frequency_node(self, node):
data = self.read_block(node.i2c_addr, READ_FREQUENCY, 2)
node.frequency = unpack_16(data)
return node.frequency
def set_frequency_index(self, node_index, frequency):
success = False
retry_count = 0
node = self.nodes[node_index]
while success == False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, WRITE_FREQUENCY, pack_16(frequency))
if self.get_frequency_node(node) == frequency:
success = True
else:
retry_count = retry_count + 1
self.log('Frequency Not Set ({0})'.format(retry_count))
return node.frequency
def get_trigger_rssis(self):
for node in self.nodes:
self.get_trigger_rssi_node(node);
def get_trigger_rssi_node(self, node):
data = self.read_block(node.i2c_addr, READ_TRIGGER_RSSI, 2)
node.trigger_rssi = unpack_16(data)
return node.trigger_rssi
def set_trigger_rssi_index(self, node_index, trigger_rssi):
success = False
retry_count = 0
node = self.nodes[node_index]
while success == False and retry_count < I2C_RETRY_COUNT:
self.write_block(node.i2c_addr, WRITE_TRIGGER_RSSI, pack_16(trigger_rssi))
if self.get_trigger_rssi_node(node) == trigger_rssi:
success = True
else:
retry_count = retry_count + 1
self.log('RSSI Not Set ({0})'.format(retry_count))
return node.trigger_rssi
def capture_trigger_rssi_index(self, node_index):
node = self.nodes[node_index]
return self.set_trigger_rssi_index(node_index, node.current_rssi)
def log(self, message):
if (callable(self.hardware_log_callback)):
string = 'Delta5: {0}'.format(message)
self.hardware_log_callback(string)
def get_settings_json(self):
settings = [node.get_settings_json() for node in self.nodes]
return settings
def get_heartbeat_json(self):
return { 'current_rssi': [node.current_rssi for node in self.nodes]}
def get_hardware_interface():
return Delta5Interface()
|
1640508
|
import logging
#import traceback
from pypes.component import Component
from pypesvds.lib.extras.pdfparser import PDFConverter
log = logging.getLogger(__name__)
class PDFReader(Component):
__metatype__ = 'ADAPTER'
def __init__(self):
# initialize parent class
Component.__init__(self)
# create an instance of the pdf converter
self._converter = PDFConverter()
log.info('Component Initialized: %s' % self.__class__.__name__)
def run(self):
# Define our components entry point
while True:
# for each document waiting on our input port
for doc in self.receive_all('in'):
try:
data = doc.get('data')
mime = doc.get_meta('mimetype')
# if there is no data, move on to the next doc
if data is None:
continue
# if this is not a file, move on to the next doc
if mime != 'application/pdf':
continue
# do the conversion
# if it fails the converter will return an empty string
body = self._converter.convert(data)
if body:
# write out the body as unicode string
doc.set('body', body.decode('utf-8'))
else:
log.debug('PDF conversion failed')
# delete the binary data
doc.delete('data')
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
#log.error(traceback.print_exc())
# send the document to the next component
self.send('out', doc)
# yield the CPU, allowing another component to run
self.yield_ctrl()
|
1640510
|
import os, sys, glob
name = 'pydbVar'
version = '0.1'
from distutils.core import setup
from distutils.extension import Extension
metadata = {
'name': name,
'version': version,
'cmdclass': cmdclass,
}
if __name__ == '__main__':
pass
|
1640533
|
from setuptools import setup, find_packages
with open("README.md", "r") as fp:
long_description = fp.read()
setup(
name='pymlir',
version='0.3',
url='https://github.com/spcl/pymlir',
author='SPCL @ ETH Zurich',
author_email='<EMAIL>',
description='',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
packages=find_packages(
exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={
'': ['lark/mlir.lark']
},
include_package_data=True,
install_requires=[
'lark-parser', 'parse'
],
tests_require=['pytest', 'pytest-cov'],
test_suite='pytest',
scripts=[])
|
1640589
|
import os
import json
import random
import numpy as np
import tensorflow as tf
import src.core.constants as constants
import src.retina_net.anchor_generator.box_utils as box_utils
import src.retina_net.datasets.dataset_utils as dataset_utils
from src.core.abstract_classes.dataset_handler import DatasetHandler
from src.retina_net.anchor_generator.fpn_anchor_generator import FpnAnchorGenerator
class BddDatasetHandler(DatasetHandler):
def __init__(self, config, train_val_test):
"""
Initializes directories, and loads the sample list
:param config: configuration dictionary
:param train_val_test (string): 'train', 'val', or 'test'
"""
super().__init__(config)
# Define Dicts
self._MEANS_DICT = constants.MEANS_DICT
# Load configs
self.anchor_gen_config = config['anchor_generator']
self.training_data_config = config['bdd']['training_data_config']
# Define paths to dataset
paths_config = config['bdd']['paths_config']
self.dataset_dir = os.path.expanduser(paths_config['dataset_dir'])
data_set_size = paths_config['100k_or_10k']
if train_val_test == 'train':
self.data_split_dir = train_val_test
self.label_file_name = 'train.json'
self.frac_training_data = self.training_data_config['frac_training_data']
else:
self.data_split_dir = 'val'
self.label_file_name = 'val.json'
self.frac_training_data = 1.0
self.im_dir = os.path.join(
self.dataset_dir, 'images', data_set_size, self.data_split_dir)
self.gt_label_dir = os.path.join(
self.dataset_dir, 'labels')
# Make sure dataset directories exist
dataset_utils.check_data_dirs([self.im_dir, self.gt_label_dir])
# Get sample ids
self._load_sample_ids()
self.epoch_size = len(self.sample_ids)
self.labels = json.load(open(os.path.join(
self.gt_label_dir, self.label_file_name), 'r'))
# Create placeholder for dataset
self.dataset = None
# Create flag if train\val or just inference
self.is_testing = (train_val_test == 'test')
def _load_sample_ids(self):
"""
loads sample ids to read dataset
"""
sample_ids = os.listdir(self.im_dir)
# Random shuffle here is much more computationally efficient than randomly shuffling a dataset iterator.
if self.frac_training_data != 1.0 and self.data_split_dir == 'train':
percent_samples = int(len(sample_ids) * self.frac_training_data)
inds = np.random.choice(
len(sample_ids), percent_samples, replace=False)
self.sample_ids = [sample_ids[ind] for ind in inds]
elif self.data_split_dir == 'train':
random.shuffle(sample_ids)
self.sample_ids = sample_ids
else:
self.sample_ids = sample_ids
# Create a list of image paths from sample ids
self.im_paths = [
self.im_dir +
'/' +
sample for sample in self.sample_ids]
def set_sample_id(self, sample_index):
self.im_paths = [self.im_paths[sample_index]]
self.sample_ids = [self.sample_ids[sample_index]]
def create_dataset(self):
"""
Create dataset using tf.dataset API
:return: dataset : dataset object
"""
# Set data path lists
im_paths = self.im_paths
sample_ids = self.sample_ids
# Create dataset using API
dataset = tf.data.Dataset.from_tensor_slices((im_paths, sample_ids))
# Create sample dictionary
self.dataset = dataset.map(
self.create_sample_dict,
num_parallel_calls=10)
return self.dataset
def create_sample_dict(
self,
im_path,
sample_id):
"""
Creates sample dictionary for a single sample
:param im_path: left image path
:param sample_id: ground truth sample id
:return: sample_dict: Sample dictionary filled with input tensors
"""
with tf.name_scope('input_data'):
# Read image
image_as_string = tf.io.read_file(im_path)
image = tf.image.decode_jpeg(image_as_string, channels=3)
image = tf.cast(image, tf.float32)
image_norm = dataset_utils.mean_image_subtraction(
image, self._MEANS_DICT[self.im_normalization])
# Flip channels to BGR since pretrained weights use this
# configuration.
channels = tf.unstack(image_norm, axis=-1)
image_norm = tf.stack(
[channels[2], channels[1], channels[0]], axis=-1)
boxes_class_gt, boxes_2d_gt, no_gt = tf.py_function(
self._read_labels, [sample_id], [
tf.float32, tf.float32, tf.bool])
# Create_sample_dict
sample_dict = dict()
sample_dict.update({constants.IMAGE_NORMALIZED_KEY: image_norm})
sample_dict.update(
{constants.ORIGINAL_IM_SIZE_KEY: tf.shape(image)})
# Create prior anchors and anchor targets
generator = FpnAnchorGenerator(self.anchor_gen_config)
boxes_2d_gt_vuhw = box_utils.vuvu_to_vuhw(boxes_2d_gt)
anchors_list = []
anchors_class_target_list = []
anchors_box_target_list = []
anchors_positive_mask_list = []
anchors_negative_mask_list = []
for layer_number in self.anchor_gen_config['layers']:
anchors = generator.generate_anchors(
tf.shape(image_norm), layer_number)
anchors_list.append(anchors)
if not self.is_testing:
anchor_corners = box_utils.vuhw_to_vuvu(anchors)
ious = box_utils.bbox_iou_vuvu(anchor_corners, boxes_2d_gt)
positive_anchor_mask, negative_anchor_mask, max_ious = generator.positive_negative_batching(
ious, self.anchor_gen_config['min_positive_iou'],
self.anchor_gen_config['max_negative_iou'])
anchors_positive_mask_list.append(positive_anchor_mask)
anchors_negative_mask_list.append(negative_anchor_mask)
anchor_box_targets, anchor_class_targets = generator.generate_anchor_targets(
anchors, boxes_2d_gt_vuhw, boxes_class_gt, max_ious,
positive_anchor_mask)
anchors_box_target_list.append(anchor_box_targets)
anchors_class_target_list.append(anchor_class_targets)
# Sample dict is stacked from p3 --> p7, this is essential to
# memorize for stacking the predictions later on
sample_dict.update(
{constants.ANCHORS_KEY: tf.concat(anchors_list, axis=0)})
if not self.is_testing:
sample_dict.update({constants.ANCHORS_BOX_TARGETS_KEY: tf.concat(
anchors_box_target_list, axis=0),
constants.ANCHORS_CLASS_TARGETS_KEY: tf.concat(
anchors_class_target_list, axis=0),
constants.POSITIVE_ANCHORS_MASK_KEY: tf.concat(
anchors_positive_mask_list, axis=0),
constants.NEGATIVE_ANCHOR_MASK_KEY: tf.concat(
anchors_negative_mask_list, axis=0)})
return sample_dict
def _read_labels(self, sample_id):
"""
Reads ground truth labels and parses them into one hot class representation and groundtruth 2D bounding box.
"""
sample_id = sample_id.numpy()
# Extract the list
no_gt = False
categories = self.training_data_config['categories']
boxes_class_gt = []
sample_id = sample_id.decode("utf-8")
frame_labels = [label for label in self.labels if
label['name'] == sample_id and label[
'category'] in categories]
boxes_2d_gt = np.array([[label['bbox'][1],
label['bbox'][0],
label['bbox'][3],
label['bbox'][2]] for label in frame_labels])
categories_gt = [label['category'] for label in frame_labels]
if boxes_2d_gt.size == 0:
cat_one_hot = [0 for e in range(len(categories) + 1)]
boxes_2d_gt = np.array([0.0, 0.0, 1.0, 1.0])
boxes_class_gt.append(cat_one_hot)
no_gt = True
else:
for elem in categories_gt:
cat_one_hot = [0 for e in range(len(categories) + 1)]
cat_idx = categories.index(elem.lower())
cat_one_hot[cat_idx] = 1
boxes_class_gt.append(cat_one_hot)
# one-hot representation dependent on config file
if len(boxes_2d_gt.shape) == 1:
boxes_2d_gt = np.expand_dims(boxes_2d_gt, axis=0)
return [np.array(boxes_class_gt).astype(np.float32),
np.array(boxes_2d_gt).astype(np.float32),
no_gt]
|
1640719
|
from py_algorithms.data_structures import new_max_heap
class TestHeap:
def test_properties(self):
ds = new_max_heap()
assert ds.size == 0
assert ds.is_empty is True
def test_push(self):
ds = new_max_heap()
ds.push(1, 11)
ds.push(2, 12)
ds.push(3, 13)
assert ds.is_empty is False
assert ds.next_key == 3
assert ds.contains_key(2) is True
assert ds.contains_key('the key') is False
def test_pop(self):
ds = new_max_heap()
ds.push(1, 11)
ds.push(2, 12)
ds.push(3, 13)
assert ds.pop() == 13
assert ds.pop() == 12
assert ds.push(-1, 10)
assert ds.next == 11
assert ds.push(100, 99)
assert ds.next == 99
assert ds.contains_key(100) is True
assert ds.contains_key(101) is False
|
1640734
|
from __future__ import unicode_literals
try:
from http.cookiejar import CookieJar
except ImportError:
from cookielib import CookieJar
import bottle
from webtest import TestApp
from bottle_utils import ajax
bottle.debug()
app = bottle.default_app()
app.config.update({str('csrf.secret'): 'foo'})
# Test handler
@app.get('/ajax_only')
@ajax.ajax_only
def ajax_only_handler():
return 'success'
test_app = TestApp(app, cookiejar=CookieJar())
|
1640795
|
import os
import yaml
from flask import current_app
def load_fixture(fixture_name) -> dict:
fixture_name: str = os.sep.join(
(current_app.config['PROJECT_PATH'], 'fixtures', fixture_name),
)
with open(fixture_name) as f_stream:
return yaml.safe_load(f_stream)
|
1640804
|
from common import *
import array
import sys
import math
from ctypes import create_string_buffer
# Useful Stuff
u32 = struct.Struct('>I')
u16 = struct.Struct('>H')
zero32 = u32.pack(0)
def RGBA8Encode(tex):
tex = tex.toImage()
w, h = tex.width(), tex.height()
padW = (w + 3) & ~3
padH = (h + 3) & ~3
destBuffer = create_string_buffer(padW * padH * 4)
shortstruct = struct.Struct('>H')
sspack = shortstruct.pack_into
offset = 0
for ytile in xrange(0, padH, 4):
for xtile in xrange(0, padW, 4):
for ypixel in xrange(ytile, ytile + 4):
for xpixel in xrange(xtile, xtile + 4):
if xpixel >= w or ypixel >= h:
sspack(destBuffer, offset, 0)
sspack(destBuffer, offset+32, 0)
else:
pixel = tex.pixel(xpixel, ypixel)
sspack(destBuffer, offset, pixel>>16)
sspack(destBuffer, offset+32, pixel&0xFFFF)
offset += 2
offset += 32
return destBuffer.raw
def RGB5A3Encode(tex):
tex = tex.toImage()
w, h = tex.width(), tex.height()
padW = (w + 3) & ~3
padH = (h + 3) & ~3
destBuffer = create_string_buffer(padW * padH * 2)
shortstruct = struct.Struct('>H')
sspack = shortstruct.pack_into
offset = 0
for ytile in xrange(0, padH, 4):
for xtile in xrange(0, padW, 4):
for ypixel in xrange(ytile, ytile + 4):
for xpixel in xrange(xtile, xtile + 4):
if xpixel >= w or ypixel >= h:
rgbDAT = 0x7FFF
else:
pixel = tex.pixel(xpixel, ypixel)
a = pixel >> 24
r = (pixel >> 16) & 0xFF
g = (pixel >> 8) & 0xFF
b = pixel & 0xFF
if a < 245: #RGB4A3
alpha = a/32
red = r/16
green = g/16
blue = b/16
rgbDAT = (blue) | (green << 4) | (red << 8) | (alpha << 12)
else: # RGB555
red = r/8
green = g/8
blue = b/8
rgbDAT = (blue) | (green << 5) | (red << 10) | (0x8000) # 0rrrrrgggggbbbbb
sspack(destBuffer, offset, rgbDAT)
offset += 2
return destBuffer.raw
class KPMapExporter:
class LayerExporter:
def __init__(self, layer):
self.layer = layer
class TileLayerExporter(LayerExporter):
def buildSectors(self, sectors, indices):
# we'll use the cache held by the layer: why reinvent the wheel?
layer = self.layer
layer.updateCache()
cache = layer.cache
# first off, get all the info and figure out the sector bounds
layerX, layerY = layer.cacheBasePos
layerWidth, layerHeight = layer.cacheSize
sectorLeft = layerX / 16
sectorTop = layerY / 16
sectorRight = (layerX + layerWidth - 1) / 16
sectorBottom = (layerY + layerHeight - 1) / 16
rawSectors = []
for i in xrange(sectorBottom - sectorTop + 1):
rawSectors.append([None for j in xrange(sectorRight - sectorLeft + 1)])
tileset = KP.tileset(layer.tileset)
optMappings = tileset.optMappings
# copy every tile index over
for srcY in xrange(layerHeight):
srcRow = cache[srcY]
worldY = srcY + layerY
sectorY = worldY / 16
destY = worldY % 16
destRow = rawSectors[sectorY - sectorTop]
for srcX in xrange(layerWidth):
worldX = srcX + layerX
sectorX = worldX / 16
destX = worldX % 16
tile = srcRow[srcX]
if tile == -1: continue
tile = optMappings[tile]
if tile == -1: continue
destSector = destRow[sectorX - sectorLeft]
if destSector is None:
destSector = [[-1 for j in xrange(16)] for i in xrange(16)]
destRow[sectorX - sectorLeft] = destSector
destSector[destY][destX] = tile
# now add the created sectors to the data
count = reduce(lambda x,y: x+len(y), rawSectors, 0)
sectorMap = [0xFFFF for i in xrange(count)]
destIdx = 0
for srcRow in rawSectors:
for sector in srcRow:
if sector is not None:
# see if it's a duplicate or not
sectorKey = '|'.join(map(lambda x: ','.join(map(str, x)), sector))
try:
sectorMap[destIdx] = indices[sectorKey]
except KeyError:
indices[sectorKey] = len(sectors)
sectorMap[destIdx] = len(sectors)
sectors.append(sector)
destIdx += 1
self.sectorBounds = (sectorLeft, sectorTop, sectorRight, sectorBottom)
self.realBounds = (layerX, layerY, layerX+layerWidth-1, layerY+layerHeight-1)
self.sectorMap = sectorMap
class DoodadLayerExporter(LayerExporter):
pass
class PathLayerExporter(LayerExporter):
pass
def __init__(self, mapObj):
self.map = mapObj
self.tileAssociates = {}
self.doodadAssociates = {}
output = []
for layer in self.map.layers:
if isinstance(layer, KPTileLayer) and len(layer.objects) > 0:
output.append(KPMapExporter.TileLayerExporter(layer))
elif isinstance(layer, KPDoodadLayer) and len(layer.objects) > 0:
output.append(KPMapExporter.DoodadLayerExporter(layer))
elif isinstance(layer, KPPathLayer):
output.append(KPMapExporter.PathLayerExporter(layer))
for iLayer in self.map.associateLayers:
if len(iLayer.objects) > 0:
tl = KPMapExporter.TileLayerExporter(iLayer)
self.tileAssociates[iLayer.associate] = tl
output.append(tl)
if len(iLayer.doodads) > 0:
dl = KPMapExporter.DoodadLayerExporter(iLayer)
self.doodadAssociates[iLayer.associate] = dl
output.append(dl)
self.layers = output
def build(self):
requiredFixUps = []
stringsToAdd = set()
textures = set()
texInfo = set()
tilesets = set()
offsets = {None: 0xFFFFFFFF}
# first off, build the sectors
sectors = []
sectorIndices = {}
for layer in self.layers:
if isinstance(layer, self.TileLayerExporter):
layer.buildSectors(sectors, sectorIndices)
sectorData = self._packSectorData(sectors)
# now that we've got that, we can pack the first part of the file
version = 2
headerSize = 0x2C
tsInfoOffsetInHeader = 0x10
data = bytearray(struct.pack('>4sIIIIIIIIII', 'KP_m', version, len(self.layers), headerSize + len(sectorData), 0, 0, 0, headerSize, 0, 0, len(self.map.worlds)))
requiredFixUps.append((0x18, 'UnlockBytecode'))
requiredFixUps.append((0x20, self.map.bgName))
requiredFixUps.append((0x24, '_WorldDefList'))
stringsToAdd.add(self.map.bgName)
# list of layer pointers goes here.. or will, later
data += sectorData
for layer in self.layers:
requiredFixUps.append((len(data), layer))
data += zero32
# now build the layers
for eLayer in self.layers:
layer = eLayer.layer
offsets[eLayer] = len(data)
offsets[layer] = len(data)
if isinstance(eLayer, self.TileLayerExporter):
data += u32.pack(0)
data += u32.pack(0xFF000000)
# tileset name
tileset = '/Maps/Texture/%s.bin' % layer.tileset
tilesets.add(tileset)
stringsToAdd.add(tileset)
requiredFixUps.append((len(data), ('tileset', tileset)))
data += zero32
# sector info
data += struct.pack('>IIII', *eLayer.sectorBounds)
data += struct.pack('>IIII', *eLayer.realBounds)
data += ''.join(map(u16.pack, eLayer.sectorMap))
pad = (4 - (len(data) & 3)) % 4
data += ('\0' * pad)
elif isinstance(eLayer, self.DoodadLayerExporter):
data += u32.pack(1)
data += u32.pack(0xFF000000)
# doodad list
try:
doodadList = layer.doodads
except AttributeError:
doodadList = layer.objects
data += u32.pack(len(doodadList))
for doodad in doodadList:
requiredFixUps.append((len(data), doodad))
data += zero32
# now pack them ...
for doodad in doodadList:
offsets[doodad] = len(data)
x, y = doodad.position
w, h = doodad.size
data += struct.pack('>fffffii', x, y, w, h, doodad.angle, 0, len(doodad.animations))
is_rgba8 = doodad.source[0].startswith('Cloud') or \
doodad.source[0].startswith('Tiling_Cloud')
texInfo.add((doodad.source[0], doodad.source[1].height() * doodad.source[1].width() * (4 if is_rgba8 else 2)))
texture = doodad.source[1]
textures.add((is_rgba8, texture))
requiredFixUps.append((len(data) - 8, texture))
for anim in doodad.animations:
rLoop, rCurve, rFrames, rType, rStart, rEnd, rDelay, rDelayOffset = anim
loopid = self.ANIM_LOOPS.index(rLoop)
curveid = self.ANIM_CURVES.index(rCurve)
typeid = self.ANIM_TYPES.index(rType)
data += struct.pack('>iiiiiiiiii', loopid, curveid, rFrames, typeid, rStart, rEnd, rDelay, rDelayOffset, 0, 0)
elif isinstance(eLayer, self.PathLayerExporter):
data += u32.pack(2)
data += zero32
# before we do anything, build the list of secret levels
# we'll need that
levelsWithSecrets = set()
for path in layer.paths:
if hasattr(path, 'unlockSpec') and path.unlockSpec is not None:
self._checkSpecForSecrets(path.unlockSpec, levelsWithSecrets)
# lists
current = len(data)
nodeArray = current + 16
pathArray = nodeArray + (len(layer.nodes) * 4)
data += struct.pack('>IIII', len(layer.nodes), nodeArray, len(layer.paths), pathArray)
for node in layer.nodes:
requiredFixUps.append((len(data), node))
data += zero32
for path in layer.paths:
requiredFixUps.append((len(data), path))
data += zero32
# now do the actual structs
for node in layer.nodes:
offsets[node] = len(data)
x, y = node.position
current = len(data)
data += struct.pack('>hhiiiiii', x+12, y+12, 0, 0, 0, 0, 0, 0)
# figure out the exits by direction
exits = [None, None, None, None]
left, right, up, down = 0, 1, 2, 3
for exit in node.exits:
start, end = exit._startNodeRef(), exit._endNodeRef()
opposite = end if (start == node) else start
oX, oY = opposite.position
deltaX, deltaY = oX-x, oY-y
angle = math.degrees(math.atan2(deltaX, deltaY)) % 360
print "Here: %d,%d Opposite %d,%d Delta: %d,%d Angle: %d" % (x,y,oX,oY,deltaX,deltaY,angle)
# Left = 270, Right = 90, Up = 180, Down = 0
if angle >= 225 and angle <= 315:
direction = left
elif angle >= 45 and angle <= 135:
direction = right
elif angle > 135 and angle < 225:
direction = up
elif angle > 315 or angle < 45:
direction = down
if exits[direction]:
print "Conflicting directions!"
while exits[direction]:
direction = (direction + 1) % 4
exits[direction] = exit
requiredFixUps.append((current+4, exits[0]))
requiredFixUps.append((current+8, exits[1]))
requiredFixUps.append((current+12, exits[2]))
requiredFixUps.append((current+16, exits[3]))
if node in self.tileAssociates:
requiredFixUps.append((current+20, self.tileAssociates[node]))
if node in self.doodadAssociates:
requiredFixUps.append((current+24, self.doodadAssociates[node]))
if node.isStop():
if node.level:
level1, level2 = node.level
hasSecret = (1 if ((level1,level2) in levelsWithSecrets) else 0)
# i i i b b b b: node type, isNew, Extra pointer, world, level, hasSecret, padding
data += struct.pack('>iiibbbb', 2, 0, 0, level1, level2, hasSecret, 0)
elif node.mapChange:
data += u32.pack(3) # node type
destMap = node.mapChange
requiredFixUps.append((len(data)+8, destMap))
stringsToAdd.add(destMap)
# i i i b b b b: isNew, Extra pointer, dest map, map ID, foreign ID, transition, padding
data += struct.pack('>iiibbbb', 0, 0, 0, node.mapID, node.foreignID, node.transition, 0)
else:
data += u32.pack(1) # node type
data += zero32 # isNew
data += zero32 # Extra pointer
elif node.worldDefID != None:
# i i i b b b b: node type, isNew, Extra pointer, world def ID, padding
data += struct.pack('>iiibbbb', 4, 0, 0, node.worldDefID, 0, 0, 0)
else:
data += zero32 # node type
data += zero32 # isNew
data += zero32 # Extra pointer
pathIndices = {}
for i, path in enumerate(layer.paths):
pathIndices[path] = i
offsets[path] = len(data)
start = path._startNodeRef()
end = path._endNodeRef()
current = len(data)
requiredFixUps.append((current, start))
requiredFixUps.append((current+4, end))
if path in self.tileAssociates:
requiredFixUps.append((current+8, self.tileAssociates[path]))
if path in self.doodadAssociates:
requiredFixUps.append((current+12, self.doodadAssociates[path]))
data += (zero32 * 4)
available = 0
if (not hasattr(path, 'unlockSpec')) or path.unlockSpec is None:
available = 3
data += struct.pack('>bbbbfi', available, 0, 0, 0, path.movementSpeed, path.animation)
# align it to 4 bytes before we write the world defs
padding = ((len(data) + 4) & ~4) - len(data)
data += ('\0' * padding)
offsets['_WorldDefList'] = len(data)
for world in self.map.worlds:
requiredFixUps.append((len(data), world.name))
stringsToAdd.add(world.name)
data += zero32
fst1,fst2 = world.fsTextColours
fsh1,fsh2 = world.fsHintColours
ht1,ht2 = world.hudTextColours
htf = world.hudHintTransform
try:
convertedWorldID = int(world.worldID)
except ValueError:
convertedWorldID = ord(world.worldID) - ord('A') + 10
parseCrap = world.titleScreenID.split('-')
tsW = int(parseCrap[0])
tsL = int(parseCrap[1])
data += struct.pack('>BBBB BBBB BBBB BBBB BBBB BBBB hbb BBB BB BBB',
fst1[0],fst1[1],fst1[2],fst1[3],
fst2[0],fst2[1],fst2[2],fst2[3],
fsh1[0],fsh1[1],fsh1[2],fsh1[3],
fsh2[0],fsh2[1],fsh2[2],fsh2[3],
ht1[0],ht1[1],ht1[2],ht1[3],
ht2[0],ht2[1],ht2[2],ht2[3],
htf[0],htf[1],htf[2],
world.uniqueKey, world.musicTrackID,
convertedWorldID,
tsW - 1, tsL - 1,
0, 0, 0
)
# now that we're almost done... pack the strings
for string in stringsToAdd:
offsets[string] = len(data)
data += str(string)
data += '\0'
# textures
texA = sorted(texInfo, key=lambda x: x[1])
s = 0
d = 0
for texItem in texA:
print texItem[1]/1000, "kb:", texItem[0]
s += texItem[1]
d += 1
if d == 10:
d = 0
print "So far:", s/1000, "kb"
print "Total:", s/1000, "kb"
texPadding = ((len(data) + 0x1F) & ~0x1F) - len(data)
data += ('\0' * texPadding)
texHeaderStartOffset = len(data)
texDataStartOffset = texHeaderStartOffset + ((len(textures) + len(tilesets)) * 0x20)
currentTexOffset = texDataStartOffset
imageData = []
struct.pack_into('>ii', data, tsInfoOffsetInHeader, len(tilesets), len(data))
for setname in tilesets:
offsets[('tileset', setname)] = len(data)
if 'RGBA8' in setname:
data += self._buildGXTexObjRGBA8(896, 448, offsets[setname])
else:
data += self._buildGXTexObjRGB5A3(896, 448, offsets[setname])
for is_rgba8, tex in textures:
offsets[tex] = len(data)
if is_rgba8:
data += self._buildGXTexObjRGBA8(tex.width(), tex.height(), currentTexOffset)
converted = RGBA8Encode(tex)
else:
data += self._buildGXTexObjRGB5A3(tex.width(), tex.height(), currentTexOffset)
converted = RGB5A3Encode(tex)
imageData.append(converted)
currentTexOffset += len(converted)
for piece in imageData:
data += piece
# at the end comes the unlock bytecode
offsets['UnlockBytecode'] = len(data)
# first off, build a map of unlocks
unlockLists = {}
from unlock import stringifyUnlockData
for path in self.map.pathLayer.paths:
if not hasattr(path, 'unlockSpec'):
continue
spec = path.unlockSpec
if spec is None:
continue
# we stringify it first because the specs become lists when
# imported from the kpmap (not tuples) and those can't be
# used as dict keys
spec = stringifyUnlockData(spec)
try:
lst = unlockLists[spec]
except KeyError:
lst = []
unlockLists[spec] = lst
lst.append(path)
# now produce the thing
from unlock import parseUnlockText, packUnlockSpec
for spec, lst in unlockLists.iteritems():
data += packUnlockSpec(parseUnlockText(spec))
data += chr(len(lst))
for p in lst:
data += u16.pack(pathIndices[p])
data += chr(0)
# to finish up, correct every offset
for offset, target in requiredFixUps:
u32.pack_into(data, offset, offsets[target])
return data
ANIM_LOOPS = ['Contiguous', 'Loop', 'Reversible Loop']
ANIM_CURVES = ['Linear', 'Sinusoidial', 'Cosinoidial']
ANIM_TYPES = ['X Position', 'Y Position', 'Angle', 'X Scale', 'Y Scale', 'Opacity']
def _checkSpecForSecrets(self, spec, levelSet):
kind = spec[0]
if kind == 'level':
k, one, two, secret = spec
if secret:
levelSet.add((one, two))
elif kind == 'and' or kind == 'or':
for term in spec[1]:
self._checkSpecForSecrets(term, levelSet)
def _buildGXTexObjRGB5A3(self, width, height, imgOffset):
# Format: RGB5A3 (5)
# Wrap: CLAMP (0)
return struct.pack('>IIIIIIIHH',
0x90, 0,
(0x500000 | ((height - 1) << 10) | (width - 1)),
0x10000000 + imgOffset, # (imgptr >> 5)
0, 0, 0,
(((width + 3) / 4) * ((height + 3) / 4)) & 0x7FFF,
0x0202
)
def _buildGXTexObjRGBA8(self, width, height, imgOffset):
# Format: RGBA8 (6)
# Wrap: CLAMP (0)
return struct.pack('>IIIIIIIHH',
0x90, 0,
(0x600000 | ((height - 1) << 10) | (width - 1)),
0x10000000 + imgOffset, # (imgptr >> 5)
0, 0, 0,
(((width + 3) / 4) * ((height + 3) / 4)) & 0x7FFF,
0x0202
)
def _packSectorData(self, sectors):
rowStruct = struct.Struct('>16h')
output = []
for sector in sectors:
for row in sector:
output.append(rowStruct.pack(*row))
return ''.join(output)
|
1640805
|
import click
from testplan.cli.utils.actions import ProcessResultAction
from testplan.cli.utils.command_list import CommandList
from testplan.exporters.testing import (
JSONExporter,
WebServerExporter,
PDFExporter,
)
from testplan.report import TestReport
from testplan.report.testing.styles import StyleArg
from testplan.common.utils import logger
writer_commands = CommandList()
class ToJsonAction(ProcessResultAction):
def __init__(self, output: str):
self.output = output
def __call__(self, result: TestReport) -> TestReport:
exporter = JSONExporter(json_path=self.output)
exporter.export(result)
return result
@writer_commands.command(name="tojson")
@click.argument("output", type=click.Path())
def to_json(output):
"""
write a Testplan json result
"""
return ToJsonAction(output=output)
class ToPDFAction(ProcessResultAction, logger.Loggable):
def __init__(self, filename: str, style: StyleArg):
logger.Loggable.__init__(self) # Enable logging via self.logger
self.filename = filename
self.style = style
def __call__(self, result: TestReport) -> TestReport:
exporter = PDFExporter(
pdf_path=self.filename, pdf_style=self.style.value
)
exporter.create_pdf(result)
self.logger.test_info(f"PDF written to {self.filename}")
return result
@writer_commands.command(name="topdf")
@click.argument("filename", required=True, type=click.Path())
@click.option(
"--pdf-style",
default="summary",
type=click.Choice(
["result", "summary", "extended", "detailed"], case_sensitive=False
),
help="""result - only the result of the run will be shown\n
summary - test details will be shown\n
extended - passing tests will include testcase detail, while failing tests will include assertion detail\n
detailed - passing tests will include assertion detail, while failing tests will include assertion detail\n
""",
)
def to_pdf(filename, pdf_style):
"""
write a Testplan pdf result
"""
if pdf_style == "result":
return ToPDFAction(filename=filename, style=StyleArg.RESULT_ONLY)
elif pdf_style == "summary":
return ToPDFAction(filename=filename, style=StyleArg.SUMMARY)
elif pdf_style == "extended":
return ToPDFAction(filename=filename, style=StyleArg.EXTENDED_SUMMARY)
elif pdf_style == "detailed":
return ToPDFAction(filename=filename, style=StyleArg.DETAILED)
class DisplayAction(ProcessResultAction):
def __init__(self, port: int):
self.port = port
def __call__(self, result: TestReport) -> TestReport:
exporter = WebServerExporter(ui_port=self.port)
exporter.export(result)
exporter.wait_for_kb_interrupt()
return result
@writer_commands.command(name="display")
@click.option(
"--port",
"-p",
type=int,
default=0,
help="the local port the webserver is using",
)
def display(port):
"""
serve the result through a local webui.
"""
return DisplayAction(port)
|
1640913
|
from pyngrok import ngrok
import pychromecast
import threading
import socket
import time
import os
global http_tunnel
http_tunnel = None
hostname = socket.gethostname()
YourPrivateIpAddress = socket.gethostbyname(hostname)
print(YourPrivateIpAddress)
name = "googlecontroller"
__all__ = 'GoogleAssistant'
def httpserver(path):
os.chdir(path)
if os.name == 'nt':
os.system('cmd /k "python -m http.server 80 --bind 127.0.0.1"')
else:
os.system('cmd /k "python -m SimpleHTTPServer 80 --bind 127.0.0.1"')
class GoogleAssistant:
def __init__(self, host = None):
try:
if host != None:
self.cc = pychromecast.Chromecast(host)
else:
print('For a host input the ip and have the option like: host = "192.168.0.whatever" home = GoogleHome(host=host) ')
except:
print("Some sort of error occured most likely a bad connection or ip adress")
def play(self, url, ignore = False, contenttype = 'audio/mp3'):
if self.cc.media_controller.status.player_state != "PLAYING" or ignore == True:
self.cc.wait()
media = self.cc.media_controller
media.play_media(url, contenttype)
media.block_until_active()
def serve_media(self, media, folder, opentunnel = 1):
global http_tunnel
if opentunnel == 0:
serverhttp = threading.Thread(target=httpserver, args=(folder,))
serverhttp.start()
http_tunnel = ngrok.connect(bind_tls=True)
time.sleep(3)
print("You are all set now!")
url = "http://" + YourPrivateIpAddress + ":8000/" + str(media)
http_tunnels = str(http_tunnel)
spliced = http_tunnels.split('"')[1]
ngrokurlpartone = spliced.split('"')[0]
ngrokurl = ngrokurlpartone + "/" + str(media)
print(ngrokurl)
self.play(ngrokurl)
time.sleep(1)
def say(self, text, speed = 1, ignore = False, lang = 'en-US'):
speed = str(speed)
url = u"https://translate.google.com/translate_tts?ie=UTF-8&q=" + text + "%21&tl=" + lang + "&ttsspeed=" + speed + "&total=1&idx=0&client=tw-ob&textlen=14&tk=594228.1040269"
self.play(url, ignore)
def volume(self, volumelevel):
volumelevel = volumelevel / 100
self.cc.set_volume(volumelevel)
|
1640921
|
import pytest
from swarmcg.simulations import get_settings
def test_get_settings_fail(ns_opt):
# when:
ns = ns_opt(sim_type="NO_VALID")
print(ns.sim_type)
# then:
with pytest.raises(ValueError):
_ = get_settings(ns)
def test_get_settings_optimal(ns_opt):
# when:
ns = ns_opt(sim_type="OPTIMAL", cg_itp={"nb_constraints": 2, "nb_bonds": 2, "nb_angles": 2, "nb_dihedrals": 2})
# then:
sim_types, opti_cycles, sim_cycles, particle_setter = get_settings(ns)
# then:
assert sim_cycles == [0, 1, 2]
assert particle_setter([1, 2, 3, 4]) == 4
assert opti_cycles == [["constraint", "bond", "angle"], ["angle", "dihedral"], ["constraint", "bond", "angle", "dihedral"]]
def test_get_settings_test(ns_opt):
# when:
ns = ns_opt(sim_type="TEST")
# then:
sim_types, opti_cycles, sim_cycles, particle_setter = get_settings(ns)
# then:
assert sim_cycles == [0, 1, 2]
assert particle_setter([1, 2, 3, 4]) == 2
assert particle_setter(list(range(50))) == 2
assert opti_cycles == [["constraint", "bond", "angle"], ["dihedral"], ["constraint", "bond", "angle", "dihedral"]]
|
1640926
|
import sys
import os
import argparse
def gen_txt(dir_path):
dataname = "datasets"
### generator .txt file according to dirs
dirs = os.listdir(os.path.join(dir_path, '{}'.format(dataname)))
print(dirs)
for d in dirs:
txt_file = d + '.txt'
txt_dir = os.path.join(dir_path, dataname)
f = open(os.path.join(txt_dir, txt_file), 'w')
for fil in os.listdir(os.path.join(txt_dir, d)):
wl = d + '/' + fil + '\n'
f.write(wl)
f.close()
sys.stderr.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--output_dir', type=str, default="data", help='Path to output pic directory.')
args = parser.parse_args()
gen_txt(args.output_dir)
|
1640942
|
import os
import glob
from tqdm import tqdm
import subprocess
import shutil
# Replacing them as your own folder
dataset_video_root_path = '/p300/tpami/iPER_ICCV_TEST/iPER_256_video_release'
save_images_root_path = '/p300/tpami/iPER_ICCV_TEST/images'
def extract_one_video(video_path, save_dir):
os.makedirs(save_dir, exist_ok=True)
# os.system('ffmpeg -i %s -start_number 0 %s/frame%%08d.png > /dev/null 2>&1' % (video_path, save_dir))
cmd = [
"ffmpeg",
"-i", video_path,
"-start_number", "0",
"{save_dir}/frame_%08d.png".format(save_dir=save_dir),
"-loglevel", "quiet"
]
print(" ".join(cmd))
subprocess.run(cmd)
# rename it to comparable with Protocol his_evaluators
images_names = os.listdir(save_dir)
images_names.sort()
num_images = len(images_names)
num_digits = len(str(num_images))
image_name_template = '{:0>%d}.jpg' % num_digits
for i, img_name in enumerate(images_names):
src_img_path = os.path.join(save_dir, img_name)
tgt_img_path = os.path.join(save_dir, image_name_template.format(i))
shutil.move(src_img_path, tgt_img_path)
# print(src_img_path, tgt_img_path)
def main():
global dataset_video_root_path, save_images_root_path
video_path_list = sorted(glob.glob('%s/*.mp4' % dataset_video_root_path))
for video_path in tqdm(video_path_list):
video_name = os.path.split(video_path)[-1][:-4]
actor_id, cloth_id, action_type = video_name.split('_')
video_images_dir = os.path.join(save_images_root_path, actor_id, cloth_id, action_type)
extract_one_video(video_path, video_images_dir)
# import ipdb
# ipdb.set_trace()
if __name__ == '__main__':
main()
|
1640974
|
from parsr.query.boolean import TRUE, FALSE, pred
def test_true():
assert TRUE(None)
def test_false():
assert not FALSE(None)
def test_bad_pred():
def boom(v):
raise Exception()
boom = pred(boom)
assert not boom(None)
def test_caseless_predicate():
def is_blue(v):
return v == "blue"
is_blue = pred(is_blue, ignore_case=True)
assert is_blue("BLUE")
assert not is_blue(None)
|
1640999
|
import logging
import importlib
from uuid import UUID
from sqlalchemy.ext.asyncio import AsyncSession
from starlette_context import context
from api.core.event_bus import Event
from api.core.profile import Profile
from api.db.errors import DoesNotExist
from api.db.models.tenant_workflow import (
TenantWorkflowRead,
TenantWorkflowUpdate,
)
from api.db.repositories.tenant_workflows import TenantWorkflowsRepository
from api.endpoints.models.tenant_workflow import (
TenantWorkflowTypeType,
TenantWorkflowStateType,
)
from api.services.tenant_workflow_notifier import TenantWorkflowNotifier
logger = logging.getLogger(__name__)
def instantiate_workflow_class(workflow_type: TenantWorkflowTypeType):
"""Create an instance of a workflow class."""
module_name, class_name = workflow_type.rsplit(".", 1)
WorkflowClass = getattr(importlib.import_module(module_name), class_name)
return WorkflowClass
def instantiate_workflow_class_instance(
db: AsyncSession, tenant_workflow: TenantWorkflowRead
):
"""Create an instance of a workflow class."""
workflow_type = tenant_workflow.workflow_type
WorkflowClass = instantiate_workflow_class(workflow_type)
instance = WorkflowClass(db, tenant_workflow)
return instance
class BaseWorkflow:
"""Base class for workflows."""
@classmethod
async def handle_workflow_events(cls, profile: Profile, event: Event):
raise NotImplementedError()
@classmethod
async def find_workflow_id(cls, profile: Profile, webhook_message: dict):
raise NotImplementedError()
@classmethod
async def next_workflow_step(
cls,
db: AsyncSession,
workflow_id: UUID = None,
tenant_workflow: TenantWorkflowRead = None,
webhook_message: dict = None,
with_error: bool = False,
with_error_msg: str = None,
) -> TenantWorkflowRead:
"""Poke the workflow to run the next step."""
workflow_repo = TenantWorkflowsRepository(db_session=db)
if not tenant_workflow:
if workflow_id:
tenant_workflow = await workflow_repo.get_by_id(workflow_id)
if not tenant_workflow:
raise DoesNotExist(f"Workflow not found for {workflow_id}")
# check if our tenant is in context
context_bearer_token = (context.get("TENANT_WALLET_TOKEN"),)
if (not context_bearer_token) or (
not context_bearer_token == tenant_workflow.wallet_bearer_token
):
context["TENANT_WALLET_TOKEN"] = tenant_workflow.wallet_bearer_token
workflow = instantiate_workflow_class_instance(db, tenant_workflow)
if with_error:
# ping workflow to execute next step
tenant_workflow = await workflow.run_cancel_step(
webhook_message=webhook_message, error_msg=with_error_msg
)
else:
# ping workflow to execute next step
tenant_workflow = await workflow.run_step(webhook_message=webhook_message)
return tenant_workflow
def __init__(self, db: AsyncSession, tenant_workflow: TenantWorkflowRead):
"""
Initialize a new `SchemaWorkflow` instance.
"""
self._db = db
self._tenant_workflow = tenant_workflow
self._workflow_repo = TenantWorkflowsRepository(db_session=db)
self._workflow_notifier = TenantWorkflowNotifier(db=db)
@property
def db(self) -> AsyncSession:
"""Accessor for db session instance."""
return self._db
@property
def tenant_workflow(self) -> TenantWorkflowRead:
"""Accessor for tenant_workflow instance."""
return self._tenant_workflow
@property
def workflow_repo(self) -> TenantWorkflowsRepository:
"""Accessor for workflow_repo instance."""
return self._workflow_repo
@property
def workflow_notifier(self) -> TenantWorkflowNotifier:
"""Accessor for workflow_notifier instance."""
return self._workflow_notifier
async def run_step(self, webhook_message: dict = None) -> TenantWorkflowRead:
raise NotImplementedError()
async def run_cancel_step(
self, webhook_message: dict = None, error_msg: str = None
) -> TenantWorkflowRead:
raise NotImplementedError()
async def start_workflow(self):
# update the workflow status as "in_progress"
logger.debug(">>> starting workflow ...")
update_workflow = TenantWorkflowUpdate(
id=self.tenant_workflow.id,
workflow_state=TenantWorkflowStateType.in_progress,
wallet_bearer_token=self.tenant_workflow.wallet_bearer_token,
)
self._tenant_workflow = await self.workflow_repo.update(update_workflow)
async def complete_workflow(self):
# finish off our workflow
logger.debug(">>> completing workflow ...")
update_workflow = TenantWorkflowUpdate(
id=self.tenant_workflow.id,
workflow_state=TenantWorkflowStateType.completed,
wallet_bearer_token=None,
)
self._tenant_workflow = await self.workflow_repo.update(update_workflow)
async def complete_workflow_error(self, error_msg: str):
# finish off our workflow
logger.debug(">>> completing workflow with error ...")
update_workflow = TenantWorkflowUpdate(
id=self.tenant_workflow.id,
workflow_state=TenantWorkflowStateType.error,
workflow_state_msg=error_msg,
wallet_bearer_token=None,
)
self._tenant_workflow = await self.workflow_repo.update(update_workflow)
|
1641040
|
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
# Code from <NAME>
# https://github.com/ehtsham/recsys19vlm/blob/master/RecSys2019-VLMPaper.ipynb
class VLM(object):
def __init__(self, num_users, num_items, num_tags, num_factors, var_prior, reg, video_metadata_array):
self.num_users = num_users
self.num_items = num_items
self.num_tags = num_tags
self.num_factors = num_factors
self.var_prior = var_prior
self.reg = reg
self.video_metadata_array_const = tf.constant(video_metadata_array, dtype = tf.float32)
self.construct_placeholders()
def construct_placeholders(self):
# Placeholders for training samples
self.users_ph = tf.placeholder(dtype=tf.int32, shape=[None])
self.played_videos_ph = tf.placeholder(dtype=tf.float32, shape=[None, self.num_items])
def construct_model_variables(self):
# Mean for user latent factors
self.Mu_Zu = tf.Variable(dtype=tf.float32,
initial_value=tf.random_normal(shape=[self.num_users, self.num_factors]),
name = 'mean_latent_factors_zu')
# Log(std-deviation) for user latent factors
self.lsdev_Zu = tf.Variable(dtype=tf.float32,
initial_value=tf.random_normal(shape=[self.num_users, 1]), name='lsdev_Zu')
# Mean for item latent factors
self.Mu_Zv = tf.Variable(dtype=tf.float32,
initial_value=tf.random_normal(shape=[self.num_items, self.num_factors]),
name = 'mean_latent_factors_zv')
# Mean for tag latent factors
self.Mu_Zt = tf.Variable(dtype=tf.float32,
initial_value=tf.random_normal(shape=[self.num_tags, self.num_factors]),
name = 'mean_latent_factors_zt')
def compute_kl_div(self, lsdev_Zu_batch, Mu_Zu_batch):
# KL Divergence needed for ELBO
sdev_Zu_batch = tf.exp(lsdev_Zu_batch)
comp1 = self.num_factors * (0.5 * tf.math.log(self.var_prior) - lsdev_Zu_batch)
comp2 = (self.num_factors / (2 * self.var_prior)) * (tf.pow(sdev_Zu_batch, 2))
comp3 = (1.0 / (2 * self.var_prior)) * tf.reduce_sum(tf.pow(Mu_Zu_batch, 2), axis=1, keep_dims = True)
comp4 = (self.num_factors / 2.0)
return comp1 + comp2 + comp3 - comp4
def construct_graph(self):
# Boilerplate Tensorflow
self.construct_model_variables()
# Mean, log(std-deviation) and Gaussian noise for user latent factors
Mu_Zu_batch = tf.gather(self.Mu_Zu, self.users_ph)
lsdev_Zu_batch = tf.gather(self.lsdev_Zu, self.users_ph)
Eps_u_ph = tf.random_normal(shape = [tf.size(self.users_ph), self.num_factors],
mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name="eps")
Zu_batch = Mu_Zu_batch + Eps_u_ph * tf.exp(lsdev_Zu_batch)
# Tag factors mapped to items
Mu_Zv_hat = tf.matmul(self.video_metadata_array_const, self.Mu_Zt)
batch_logits = tf.matmul(Zu_batch, self.Mu_Zv + Mu_Zv_hat, transpose_b=True)
batch_logits_validation = tf.matmul(Mu_Zu_batch, self.Mu_Zv + Mu_Zv_hat, transpose_b=True)
log_softmax = tf.nn.log_softmax(batch_logits)
num_items_per_document = tf.reduce_sum(self.played_videos_ph, axis=1, keep_dims=True)
batch_conditional_log_likelihood = tf.reduce_sum(self.played_videos_ph * log_softmax, axis = 1, keep_dims=True)
batch_kl_div = self.compute_kl_div(lsdev_Zu_batch, Mu_Zu_batch)
batch_elbo = (1.0 / num_items_per_document) * (batch_conditional_log_likelihood - batch_kl_div)
avg_loss = -1 * tf.reduce_mean(batch_elbo) + self.reg * (tf.nn.l2_loss(self.Mu_Zv) +
tf.nn.l2_loss(self.Mu_Zt))
return batch_logits, batch_logits_validation, log_softmax, avg_loss, batch_conditional_log_likelihood, batch_kl_div, num_items_per_document
|
1641043
|
from .Layer import *
class FC(Layer):
def __init__(self, model, *args, **kwargs):
Layer.__init__(self, model, *args, **kwargs)
self.W = None
self.b = None
self.XN = 0
self.C = 0
self.dim_out = kwargs["dim_out"]
def reshape(self):
# NCHW
self.XN = self.X.shape[0]
self.C = self.X.size // self.XN
self.Y = np.zeros((self.XN, self.dim_out))
if self.W is None:
self.W = Xavier((self.dim_out ,self.C))
self.b = np.zeros((self.dim_out, ))
def forward(self):
# Y = W * X + b
self.Y = np.dot(self.X.reshape((self.XN, self.C)), self.W.T) + self.b.reshape((1, self.dim_out))
def backward(self):
self.dX = np.dot(self.dY, self.W)
self.dW = np.dot(self.dY.T, self.X.reshape((self.XN, self.C)))
self.db = np.sum(self.dY, 0).reshape(self.b.shape)
@property
def params(self):
return [self.W, self.b]
@property
def grads(self):
return [self.dW, self.db]
|
1641053
|
import numpy as np
import matplotlib.pyplot as plt
from math import sqrt
class grid_map:
def __init__(self, map_matrix=None, reward_matrix=None, start_index=(2, 2), goal_index=(16, 16), reward_bound=-5, reward_collision=-0.1):
self.map_matrix = map_matrix
self.state_space = map_matrix.shape[0:2]
self.action_space = [ (1, 0), (-1, 0), (0, 1), (0, -1) ]
self.reward_matrix = reward_matrix
self.reward_bound = reward_bound
self.reward_collision = reward_collision
self.start_index = start_index
self.goal_index = goal_index
def step(self, cur_state_index, action_index, state_trans_prob=1):
others = (1 - state_trans_prob) / 3
action_prob_list = [others, others, others, others]
action_prob_list[action_index] = state_trans_prob
real_action_index = np.random.choice([0, 1, 2, 3], p=action_prob_list)
action_prob = action_prob_list[real_action_index]
action = self.action_space[action_index]
done = False
next_x = cur_state_index[0] + action[0]
next_y = cur_state_index[1] + action[1]
if next_x > self.state_space[0] - 1:
next_x = self.state_space[0] - 1
reward = self.reward_bound
done = True
elif next_x < 0:
next_x = 0
reward = self.reward_bound
done = True
elif next_y > self.state_space[1] - 1:
next_y = self.state_space[1] - 1
reward = self.reward_bound
done = True
elif next_y < 0:
next_y = 0
reward = self.reward_bound
done = True
else:
reward = self.reward_matrix[next_x, next_y]
## Extra Credit
# ----------------------------------------------------------------
# You can add the heuristic reward here, such as the dwa reward, astar reward, or distance-to-goal reward learned from previous lectures to achieve a regular policy as you expected (such as keeping moving away from the obstacle.). In addition, if the self.reward_matrix[next_x, next_y] == reward_collision, there should the obstacle grid index.
pass
# example: reward = self.reward_matrix[next_x, next_y] + heuristic_reward
# ----------------------------------------------------------------
done = False
if next_x == self.goal_index[0] and next_y == self.goal_index[1]:
done = True
next_state = (next_x, next_y)
return next_state, reward, action_prob, done
def set_path(self, index):
self.map_matrix[index[0], index[1], :] = 255
def show_map(self):
plt.imshow(self.map_matrix)
plt.show()
def draw_map(self, time=0.01):
plt.imshow(self.map_matrix)
plt.pause(time)
|
1641106
|
import json
import os
import shutil
import tempfile
import traceback
from typing import (
Callable,
NamedTuple,
)
from galaxy.datatypes.registry import Registry
from galaxy.files import ConfiguredFileSources
from galaxy.job_execution.compute_environment import SharedComputeEnvironment
from galaxy.job_execution.setup import JobIO
from galaxy.metadata.set_metadata import (
get_metadata_params,
get_object_store,
validate_and_load_datatypes_config,
)
from galaxy.model import store
from galaxy.model.store import SessionlessContext
from galaxy.objectstore import ObjectStore
from galaxy.structured_app import MinimalToolApp
from galaxy.tool_util.parser.factory import get_tool_source
from galaxy.tools import (
create_tool_from_source,
evaluation,
)
from galaxy.tools.data import ToolDataTableManager
from galaxy.util.bunch import Bunch
from galaxy.util.dbkeys import GenomeBuilds
class ToolAppConfig(NamedTuple):
name: str
tool_data_path: str
galaxy_data_manager_data_path: str
nginx_upload_path: str
len_file_path: str
builds_file_path: str
root: str
is_admin_user: Callable
admin_users: list = []
class ToolApp(MinimalToolApp):
"""Dummy App that allows loading tools"""
name = 'tool_app'
def __init__(
self,
sa_session: SessionlessContext,
tool_app_config: ToolAppConfig,
datatypes_registry: Registry,
object_store: ObjectStore,
tool_data_table_manager: ToolDataTableManager,
file_sources: ConfiguredFileSources,
):
self.model = Bunch(context=sa_session)
self.config = tool_app_config
self.datatypes_registry = datatypes_registry
self.object_store = object_store
self.genome_builds = GenomeBuilds(self)
self.tool_data_tables = tool_data_table_manager
self.file_sources = file_sources
def main(TMPDIR, WORKING_DIRECTORY, IMPORT_STORE_DIRECTORY):
metadata_params = get_metadata_params(WORKING_DIRECTORY)
datatypes_config = metadata_params["datatypes_config"]
if not os.path.exists(datatypes_config):
datatypes_config = os.path.join(WORKING_DIRECTORY, 'configs', datatypes_config)
datatypes_registry = validate_and_load_datatypes_config(datatypes_config)
object_store = get_object_store(WORKING_DIRECTORY)
import_store = store.imported_store_for_metadata(IMPORT_STORE_DIRECTORY)
# TODO: clean up random places from which we read files in the working directory
job_io = JobIO.from_json(os.path.join(IMPORT_STORE_DIRECTORY, 'job_io.json'), sa_session=import_store.sa_session)
tool_app_config = ToolAppConfig(
name='tool_app',
tool_data_path=job_io.tool_data_path,
galaxy_data_manager_data_path=job_io.galaxy_data_manager_data_path,
nginx_upload_path=TMPDIR,
len_file_path=job_io.len_file_path,
builds_file_path=job_io.builds_file_path,
root=TMPDIR,
is_admin_user=lambda _: job_io.user_context.is_admin)
with open(os.path.join(IMPORT_STORE_DIRECTORY, 'tool_data_tables.json')) as data_tables_json:
tdtm = ToolDataTableManager.from_dict(json.load(data_tables_json))
app = ToolApp(
sa_session=import_store.sa_session,
tool_app_config=tool_app_config,
datatypes_registry=datatypes_registry,
object_store=object_store,
tool_data_table_manager=tdtm,
file_sources=job_io.file_sources,
)
# TODO: could try to serialize just a minimal tool variant instead of the whole thing ?
tool_source = get_tool_source(tool_source_class=job_io.tool_source_class, raw_tool_source=job_io.tool_source)
tool = create_tool_from_source(app, tool_source=tool_source, tool_dir=job_io.tool_dir)
tool_evaluator = evaluation.RemoteToolEvaluator(app=app, tool=tool, job=job_io.job, local_working_directory=WORKING_DIRECTORY)
tool_evaluator.set_compute_environment(compute_environment=SharedComputeEnvironment(job_io=job_io, job=job_io.job))
with open(os.path.join(WORKING_DIRECTORY, 'tool_script.sh'), 'a') as out:
command_line, extra_filenames, environment_variables = tool_evaluator.build()
out.write(command_line)
if __name__ == "__main__":
TMPDIR = tempfile.mkdtemp()
WORKING_DIRECTORY = os.getcwd()
WORKING_PARENT = os.path.join(WORKING_DIRECTORY, os.path.pardir)
if not os.path.isdir("working") and os.path.isdir(os.path.join(WORKING_PARENT, 'working')):
# We're probably in pulsar
WORKING_DIRECTORY = WORKING_PARENT
METADATA_DIRECTORY = os.path.join(WORKING_DIRECTORY, 'metadata')
IMPORT_STORE_DIRECTORY = os.path.join(METADATA_DIRECTORY, 'outputs_new')
EXPORT_STORE_DIRECTORY = os.path.join(METADATA_DIRECTORY, 'outputs_populated')
try:
main(TMPDIR, WORKING_DIRECTORY, IMPORT_STORE_DIRECTORY)
except Exception:
os.makedirs(EXPORT_STORE_DIRECTORY, exist_ok=True)
with open(os.path.join(EXPORT_STORE_DIRECTORY, 'traceback.txt'), 'w') as out:
out.write(traceback.format_exc())
raise
finally:
shutil.rmtree(TMPDIR, ignore_errors=True)
|
1641114
|
import os
import re
import json
import stat
import glob
from pathlib import Path
import jinja2
from git import Repo
from git.exc import NoSuchPathError
from git.exc import InvalidGitRepositoryError
import hypergol
from hypergol import DatasetFactory
from hypergol import RepoData
from hypergol.utils import Mode
from hypergol.utils import create_text_file
from hypergol.utils import create_directory
from hypergol.name_string import NameString
DATASET_TEMPLATE = """sys.path.insert(0, '{projectDirectory}')
from data_models.{dataTypeFile} import {dataType}
from hypergol import Dataset
from hypergol import RepoData
ds=Dataset(
dataType={dataType},
location='{location}',
project='{project}',
branch='{branch}',
name='{name}',
chunkCount={chunkCount},
repoData=RepoData(
branchName='{branchName}',
commitHash='{commitHash}',
commitMessage='{commitMessage}',
comitterName='{comitterName}',
comitterEmail='{comitterEmail}'
)
)"""
def locate(fname):
return Path(hypergol.__path__[0], 'cli', 'templates', fname)
class RepoManager:
"""Wrapper class around git that provides all information about the repo connected to the project.
"""
def __init__(self, repoDirectory=None, raiseIfDirty=True):
"""
Parameters
----------
repoDirectory : string
directory where the the `.git` directory is located
raiseIfDirty : bool
if set and the repo contains uncommitted code, it raises an error
"""
self.repoDirectory = repoDirectory
self.raiseIfDirty = raiseIfDirty
self.repoExists = False
try:
repo = Repo(path=self.repoDirectory)
self.repoExists = True
except NoSuchPathError:
print(f'Directory {self.repoDirectory} does not exist')
return
except InvalidGitRepositoryError:
print(f'No git repository in {self.repoDirectory}')
return
if repo.is_dirty():
if self.raiseIfDirty:
raise ValueError("The current git repo is dirty; please commit your work before you run the pipeline.")
print('Warning! The current git repo is dirty; this will result in incorrect commit hash in datasets.')
try:
commit = repo.commit()
except ValueError as ex:
print('No commits in this repo; please create an initial commit.')
raise ex
self.commitHash = commit.hexsha
self.commitMessage = commit.message
self.comitterName = commit.committer.name
self.comitterEmail = commit.committer.email
try:
self.branchName = repo.active_branch.name
except TypeError:
self.branchName = 'DETACHED'
class HypergolProject:
"""Owner of all information about the project
CLI functions define what needs to be created, and this class creates them. It also consistently handles the mode flags (normal/dryrun/force)
It also verifies if a requested class exists in the respective directory (data_models, tasks) and identifies its type, e.g.: for ``HelloWorld`` it checks if ``data_models/hello_world.py`` or ``tasks/hello_world.py`` exists and assumes its role from that. Used in :func:`.create_data_model` and :func:`.create_pipeline`
"""
def __init__(self, projectDirectory=None, dataDirectory='.', chunkCount=16, dryrun=None, force=None, repoManager=None):
"""
Parameters
----------
projectDirectory : string
location of the project: e.g.: ``~/repo_name``, models will be in ``~/repo_name/models``
projectDirectory : string
location of the data for the project project: e.g.: ``~/data``, files will be stored in ``~/data/repo_name``
dryrun : bool (default=None)
If set to ``True`` it returns the generated code as a string
force : bool (default=None)
If set to ``True`` it overwrites the target file
"""
if force and dryrun:
raise ValueError('Both force and dryrun are set')
if projectDirectory is None:
projectDirectory = os.getcwd()
if projectDirectory.endswith('/'):
projectDirectory = projectDirectory[:-1]
if dataDirectory.endswith('/'):
dataDirectory = dataDirectory[:-1]
if repoManager is None:
repoManager = RepoManager(repoDirectory=projectDirectory, raiseIfDirty=not force)
self.repoManager = repoManager
self.projectName = NameString(os.path.basename(projectDirectory))
self.projectDirectory = projectDirectory
self.dataDirectory = dataDirectory
self.dataModelsPath = Path(projectDirectory, 'data_models')
self.tasksPath = Path(projectDirectory, 'tasks')
self.pipelinesPath = Path(projectDirectory, 'pipelines')
self.modelsPath = Path(projectDirectory, 'models')
self.blocksPath = Path(projectDirectory, 'models', 'blocks')
self.testsPath = Path(projectDirectory, 'tests')
self._init_known_class_lists()
self.templateEnvironment = jinja2.Environment(
loader=jinja2.FileSystemLoader(
searchpath=Path(hypergol.__path__[0], 'cli', 'templates')
)
)
self.mode = Mode.DRY_RUN if dryrun else Mode.FORCE if force else Mode.NORMAL
if not self.repoManager.repoExists:
self.datasetFactory = None
self.tensorboardPath = None
self.modelDataPath = None
print('Repo does not exist, data related functionality disabled.')
return
self.datasetFactory = DatasetFactory(
location=self.dataDirectory,
project=self.projectName.asSnake,
branch=self.repoManager.branchName,
chunkCount=chunkCount,
repoData=RepoData(
branchName=self.repoManager.branchName,
commitHash=self.repoManager.commitHash,
commitMessage=self.repoManager.commitMessage,
comitterName=self.repoManager.comitterName,
comitterEmail=self.repoManager.comitterEmail
)
)
self.tensorboardPath = Path(dataDirectory, self.projectName.asSnake, 'tensorboard', self.repoManager.branchName)
self.modelDataPath = Path(dataDirectory, self.projectName.asSnake, self.repoManager.branchName, 'models')
def _init_known_class_lists(self):
self._dataModelClasses = []
self._taskClasses = []
self._modelBlockClasses = []
if os.path.exists(self.dataModelsPath):
dataModelFiles = glob.glob(str(Path(self.dataModelsPath, '[!_][!_]*.py')))
self._dataModelClasses = [NameString(os.path.split(filePath)[1][:-3]) for filePath in dataModelFiles]
if os.path.exists(self.tasksPath):
taskFiles = glob.glob(str(Path(self.projectDirectory, 'tasks', '[!_][!_]*.py')))
self._taskClasses = [NameString(os.path.split(filePath)[1][:-3]) for filePath in taskFiles]
if os.path.exists(self.blocksPath):
blockFiles = glob.glob(str(Path(self.projectDirectory, 'models', 'blocks', '[!_][!_]*.py')))
self._modelBlockClasses = [NameString(os.path.split(filePath)[1][:-3]) for filePath in blockFiles]
@property
def isDryRun(self):
return self.mode == Mode.DRY_RUN
@property
def modeMessage(self):
if self.mode == Mode.NORMAL:
return ''
return f' - Mode: {self.mode}'
def cli_final_message(self, creationType, name, content):
creationPath = None
if creationType == 'Model':
creationPath = self.modelsPath
elif creationType == 'Class':
creationPath = self.dataModelsPath
elif creationType == 'ModelBlock':
creationPath = self.modelsPath
elif creationType == 'PipeLine':
creationPath = self.pipelinesPath
elif creationType == 'Project':
creationPath = self.projectDirectory
elif str(creationType) in ['Source', 'Task']:
creationPath = self.tasksPath
if creationPath is None:
raise ValueError(f'{creationType} is an unknown type')
print('')
print(f'{creationType} {name} was created in directory {creationPath}.{self.modeMessage}')
print('')
if self.isDryRun:
return content
return None
def create_model_directory(self, modelName):
create_directory(path=Path(self.modelsPath, modelName.asSnake), mode=self.mode)
def create_project_directory(self):
create_directory(path=self.projectDirectory, mode=self.mode)
def create_data_models_directory(self):
create_directory(path=self.dataModelsPath, mode=self.mode)
def create_tasks_directory(self):
create_directory(path=self.tasksPath, mode=self.mode)
def create_pipelines_directory(self):
create_directory(path=self.pipelinesPath, mode=self.mode)
def create_blocks_directory(self):
create_directory(path=self.blocksPath, mode=self.mode)
def create_models_directory(self):
create_directory(path=self.modelsPath, mode=self.mode)
def create_tests_directory(self):
create_directory(path=self.testsPath, mode=self.mode)
def is_data_model_class(self, value: NameString):
"""Checks if a name is a data_model class (based on if the snakecase .py file exists)"""
return value in self._dataModelClasses
def is_task_class(self, value: NameString):
"""Checks if a name is in tasks class (based on if the snakecase .py file exists)"""
return value in self._taskClasses
def is_model_block_class(self, value: NameString):
"""Checks if a name is in blocks class (based on if the snakecase .py file exists)"""
return value in self._modelBlockClasses
def check_dependencies(self, dependencies):
"""Raises an error if any dependency is unknown"""
for dependency in dependencies:
if dependency not in self._dataModelClasses + self._taskClasses + self._modelBlockClasses:
raise ValueError(f'Unknown dependency {dependency}')
def create_text_file(self, filePath, content):
create_text_file(filePath=filePath, content=content, mode=self.mode)
def render(self, templateName, templateData, filePath):
"""Creates a file from a template using jinja2
Parameters
----------
templateName : string
filename of the template
templateData : dict
data to fill the template with
filePath : Path
full path of the destination file (ignored if self.mode != Mode.DRY_RUN)
"""
content = self.templateEnvironment.get_template(templateName).render(templateData)
if len(content) > 0 and content[-1] != '\n':
content += '\n'
self.create_text_file(filePath=filePath, content=content)
return content
def make_file_executable(self, filePath):
print(f'Making file {filePath} executable.{self.modeMessage}')
self._test_existence(path=filePath, objectName='File')
if self.mode != Mode.DRY_RUN:
fileStat = os.stat(filePath)
if os.getuid() == fileStat.st_uid:
os.chmod(filePath, fileStat.st_mode | stat.S_IXUSR)
def _test_existence(self, path, objectName):
if not os.path.exists(path):
if self.mode == Mode.DRY_RUN:
print(f'{objectName} {path} does not exist.{self.modeMessage}')
else:
raise ValueError(f'{objectName} {path} does not exist.{self.modeMessage}')
def render_executable(self, templateName, templateData, filePath):
content = self.render(templateName=templateName, templateData=templateData, filePath=filePath)
self.make_file_executable(filePath=filePath)
return content
def render_simple(self, templateName, filePath):
return self.render(templateName=templateName, templateData={'name': self.projectName}, filePath=filePath)
def list_datasets(self, pattern=None, asCode=False):
"""Convenience function to list datasets for a project
Returns a list of data loaded from the ``.def`` files in the directory
Parameters
----------
pattern : string (None)
Regex pattern to filter on dataset names, if unspecified, defaults to ``.*``
asCode : bool (False)
If True prints a code snippet that allows the dataset to be loaded (with imports and path updates)
"""
if pattern is None:
pattern = '.*'
dataPath = Path(self.dataDirectory, self.projectName.asSnake)
result = []
for pathName, _, fileNames in os.walk(dataPath):
for fileName in fileNames:
if fileName.endswith('.def') and re.match(pattern, fileName[:-4]) is not None:
data = json.load(open(Path(pathName, fileName), 'rt'))
result.append(data)
if asCode:
values = {**data, **data['repo']}
values['location'] = self.dataDirectory
values['commitMessage'] = values['commitMessage'].replace('\n', '\\n')
values['dataTypeFile'] = NameString(name=values['dataType']).asSnake
values['projectDirectory'] = self.projectDirectory
print(DATASET_TEMPLATE.format(**values))
return result
def diff_data_model(self, commit, *args):
"""Convenience function to compare old data model class definitions to the current one
Prints the diffs from the specified commit to the current commit
Parameters
----------
commit : string
The git commit from where the comparison starts
*args : List[string]
List of class names to compare, if empty it compares all
"""
if len(args) == 0:
names = self._dataModelClasses
else:
names = [NameString(name) for name in args]
repo = Repo(self.projectDirectory)
if repo.is_dirty():
print('Warning! Current git repo is dirty, this will result in incorrect diff')
currentCommit = repo.commit().hexsha
for name in names:
print(f'------ data_models/{name.asSnake}.py ------')
print(repo.git.diff(commit, currentCommit, f'data_models/{name.asSnake}.py'))
def create_old_data_model(self, commit, *args):
"""Convenience function to generate data model classes at an old commit to be able to load datasets created then
Full commit hash required.
``project.create_old_data_model(commit='fbd8110b7194425e2323f68ef54dac15bb01ee7b', 'OneClass', 'TwoClass')``
Will create ``data_models/one_class_fbd8110.py`` and ``data_models/two_class_fbd8110.py`` and replaces all occurences of ``OneClass`` and ``TwoClass`` to ``OneClassFBD8110`` and ``TwoClassFBD8110`` in each file.
Parameters
----------
commit : string
git commit to retrieve classes from
args : List[string]
List of class names to generate, if empty it generates all
"""
if len(args) == 0:
names = self._dataModelClasses
else:
names = [NameString(name) for name in args]
result = []
repo = Repo(self.projectDirectory)
if repo.is_dirty():
print('Warning! The current git repo is dirty; this will result in incorrect data_model_files created.')
for name in names:
content = repo.git.show(f'{commit}:data_models/{name.asSnake}.py')
for oldName in names:
content = content.replace(oldName.asClass, f'{oldName.asClass}{commit[:7].upper()}')
content = content.replace(f'data_models.{oldName.asSnake}', f'data_models.{oldName.asSnake}_{commit[:7]}')
if self.isDryRun:
result.append(content)
print(f'DRYRUN - Creating class {name.asClass}{commit[:7].upper()} in {name.asSnake}_{commit[:7]}.py')
print(content+'\n')
else:
print(f'Creating class {name.asClass}{commit[:7].upper()} in {name.asSnake}_{commit[:7]}.py')
with open(Path(self.dataModelsPath, f'{name.asSnake}_{commit[:7]}.py'), 'wt') as outFile:
outFile.write(content+'\n')
self._init_known_class_lists()
return result
|
1641148
|
import pya #KLayout Python interface package
import sys
sys.path.append(r"C:\Users\wzhao\AppData\Local\Continuum\Anaconda3\envs\py34\Lib") ##Added other Python package, for KLayout v0.24, Python version is 3.4.2
sys.path.append(r"C:\Users\wzhao\AppData\Local\Continuum\Anaconda3\envs\py34\Lib\site-packages")
import numpy
layout = pya.Layout()
layout.dbu = 0.01
top = layout.create_cell("TOP")
layer = layout.layer(1, 0)
top.shapes(layer).insert(pya.Box(0, 0, 1000, 2000))
layout.write(r"C:\Localdata\temp\test.gds")
print("write gds done")
|
1641263
|
from deidentify.surrogates.generators import LocationSurrogates, RandomData
from deidentify.surrogates.generators.location import (NUMBER_REGEX, ZIP_REGEX,
Location,
LocationDatabase,
_strip, parse_location)
from .util import RandomDataMock
def test_location_database():
location_database = LocationDatabase()
assert len(location_database.countries) >= 255
assert 'Marokko' in location_database.countries
assert 'Duitsland' in location_database.countries
assert len(location_database.places) >= 2400
assert 'Amsterdam' in location_database.places
assert 'Enschede' in location_database.places
assert 'Bennebroek' in location_database.places
assert 'Haarlem' in location_database.places
assert len(location_database.zip_codes) >= 452000
assert '7141DC' in location_database.zip_codes
assert len(location_database.streetnames) >= 127000
assert 'Parallelweg' in location_database.streetnames
assert '<NAME>' in location_database.streetnames
def test_zip_regex():
assert ZIP_REGEX.match('1234AB').group(0) == '1234AB'
assert ZIP_REGEX.match('1234 AB').group(0) == '1234 AB'
assert ZIP_REGEX.match('1234 ab').group(0) == '1234 ab'
def test_number_regex():
assert NUMBER_REGEX.search('Parallelweg 122-3 a Groenlo').group(0) == '122-3 a'
assert NUMBER_REGEX.search('Parallelweg 122-3 Groenlo').group(0) == '122-3'
assert NUMBER_REGEX.search('Parallelweg 122A').group(0) == '122A'
assert NUMBER_REGEX.search('Parallelweg 1').group(0) == '1'
def test_strip():
assert _strip(' This is, a test, ') == 'This is, a test'
assert _strip('This is, a test') == 'This is, a test'
assert _strip('This is, a test,') == 'This is, a test'
def test_parse_location():
# Case 1: ZIP Code => Left of ZIP is street, right is place and country
assert parse_location('Parallelweg 2, 7141 DC Groenlo') == Location(
raw='Parallelweg 2, 7141 DC Groenlo',
country='',
zip_code='7141 DC',
place='Groenlo',
street='Parallelweg',
house_number='2')
assert parse_location('Parallelweg 2, 7141 DC') == Location(
raw='Parallelweg 2, 7141 DC',
country='',
zip_code='7141 DC',
place='',
street='Parallelweg',
house_number='2')
assert parse_location('7141DC') == Location(
raw='7141DC', country='', zip_code='7141DC', place='', street='', house_number='')
assert parse_location('7141 DC Groenlo') == Location(
raw='7141 DC Groenlo', country='', zip_code='7141 DC', place='Groenlo', street='',
house_number=''
)
assert parse_location('7141 DC te Haarlem Nederland') == Location(
raw='7141 DC te Haarlem Nederland',
country='Nederland',
zip_code='7141 DC',
place='te Haarlem',
street='',
house_number=''
)
# Case 2: No ZIP but number, split into left and right. Left of number (inclusive) is street,
# right is place and country.
assert parse_location('Parallelweg 2, Groenlo') == Location(
raw='Parallelweg 2, Groenlo',
country='',
zip_code='',
place='Groenlo',
street='Parallelweg',
house_number='2')
assert parse_location('Parallelweg 2') == Location(
raw='Parallelweg 2',
country='',
zip_code='',
place='',
street='Parallelweg',
house_number='2')
assert parse_location('Parallelweg 2A') == Location(
raw='Parallelweg 2A',
country='',
zip_code='',
place='',
street='Parallelweg',
house_number='2A')
assert parse_location('Waterkant 11-3') == Location(
raw='Waterkant 11-3',
country='',
zip_code='',
place='',
street='Waterkant',
house_number='11-3')
assert parse_location('Parallelweg 2 te Haarlem Nederland') == Location(
raw='Parallelweg 2 te Haarlem Nederland',
country='Nederland',
zip_code='',
place='te Haarlem',
street='Parallelweg',
house_number='2')
# Case 3: no number, no ZIP code, split on road suffixes (e.g., weeg, straat). Left of suffix
# is street, right is place and country.
assert parse_location('Parallelweg Groenlo') == Location(
raw='Parallelweg Groenlo',
country='',
zip_code='',
place='Groenlo',
street='Parallelweg',
house_number='')
assert parse_location('Parallelweg') == Location(
raw='Parallelweg',
country='',
zip_code='',
place='',
street='Parallelweg',
house_number='')
assert parse_location('Parallelweg te Haarlem Nederland') == Location(
raw='Parallelweg te Haarlem Nederland',
country='Nederland',
zip_code='',
place='te Haarlem',
street='Parallelweg',
house_number='')
assert parse_location('De singel te Haarlem Nederland') == Location(
raw='De singel te Haarlem Nederland',
country='Nederland',
zip_code='',
place='te Haarlem',
street='De singel',
house_number='')
# Case 4: no number, no ZIP code, no street suffix => assume everything is a place,
# except for names of countries
assert parse_location('De diagonaal te Haarlem Nederland') == Location(
raw='De diagonaal te Haarlem Nederland',
country='Nederland',
zip_code='',
place='De diagonaal te Haarlem',
street='',
house_number='')
assert parse_location('Groenlo') == Location(
raw='Groenlo',
country='',
zip_code='',
place='Groenlo',
street='',
house_number='')
assert parse_location('Oostenrijk') == Location(
raw='Oostenrijk',
country='Oostenrijk',
zip_code='',
place='',
street='',
house_number='')
assert parse_location('CARL MUCKSTRAAT') == Location(
raw='CARL MUCKSTRAAT',
country='',
zip_code='',
place='',
street='CARL MUCKSTRAAT',
house_number=''
)
assert parse_location('Waterkant 28-3, 7521PL Enschede') == Location(
raw='Waterkant 28-3, 7521PL Enschede',
country='',
zip_code='7521PL',
place='Enschede',
street='Waterkant',
house_number='28-3'
)
assert parse_location('Indonesië (Molukken)') == Location(
raw='Indonesië (Molukken)',
country='Indonesië',
zip_code='',
place='Molukken',
street='',
house_number=''
)
assert parse_location('De Giezen 15-002, 7461 BB') == Location(
raw='De Giezen 15-002, 7461 BB',
country='',
zip_code='7461 BB',
place='',
street='De Giezen',
house_number='15-002'
)
assert parse_location('Arnhem-Zuid') == Location(
raw='Arnhem-Zuid',
country='',
zip_code='',
place='Arnhem-Zuid',
street='',
house_number=''
)
assert parse_location('De Giezen 15-002, Arnhem-Zuid') == Location(
raw='De Giezen 15-002, Arnhem-Zuid',
country='',
zip_code='',
place='Arnhem-Zuid',
street='De Giezen',
house_number='15-002'
)
def test_replace_all():
locations = [
('7141DC', 'Groenlo', 'Parallelweg'),
]
location_database = LocationDatabase(locations=locations)
given_expected = [
('Waterkant 28-3, 7521PL Enschede', 'Parallelweg 11-1, 7141DC Groenlo'),
('Waterkant 28-3', 'Parallelweg 11-1'),
('7521PL Enschede', '7141DC Groenlo'),
('7521 PL Enschede', '7141 DC Groenlo'),
('Enschede', 'Groenlo'),
('Arnhem-Zuid', 'Groenlo'),
# countries are be completely ignored during surrogate generation
('Oostenrijk', 'Oostenrijk'),
('Duitsland', 'Duitsland')
]
annotations = [given for given, _ in given_expected]
expected = [expected for _, expected in given_expected]
location_surrogates = LocationSurrogates(annotations, location_database=location_database,
random_data=RandomDataMock())
surrogates = location_surrogates.replace_all()
assert surrogates == expected
|
1641290
|
from dexy.doc import Doc
from dexy.node import Node
from dexy.node import PatternNode
from dexy.wrapper import Wrapper
from tests.utils import wrap
import dexy.doc
import dexy.node
import os
import time
def test_create_node():
with wrap() as wrapper:
node = dexy.node.Node.create_instance(
"doc",
"foo.txt",
wrapper,
[],
# kwargs
foo='bar',
contents="these are contents"
)
assert node.__class__ == dexy.doc.Doc
assert node.args['foo'] == 'bar'
assert node.wrapper == wrapper
assert node.inputs == []
assert len(node.hashid) == 32
def test_node_arg_caching():
with wrap() as wrapper:
wrapper.nodes = {}
node = dexy.node.Node("foo", wrapper, [], foo='bar', baz=123)
wrapper.add_node(node)
assert node.hashid == 'acbd18db4cc2f85cedef654fccc4a4d8'
assert node.args['foo'] == 'bar'
assert node.args['baz'] == 123
assert node.sorted_arg_string() == '[["baz", 123], ["foo", "bar"]]'
assert os.path.exists(wrapper.artifacts_dir)
assert not os.path.exists(wrapper.node_argstrings_filename())
wrapper.save_node_argstrings()
assert os.path.exists(wrapper.node_argstrings_filename())
wrapper.load_node_argstrings()
assert not node.check_args_changed()
node.args['baz'] = 456
assert node.check_args_changed()
wrapper.save_node_argstrings()
wrapper.load_node_argstrings()
assert not node.check_args_changed()
SCRIPT_YAML = """
script:scriptnode:
- start.sh|shint
- middle.sh|shint
- end.sh|shint
"""
def test_script_node_caching__slow():
with wrap():
with open("start.sh", "w") as f:
f.write("pwd")
with open("middle.sh", "w") as f:
f.write("echo `time`")
with open("end.sh", "w") as f:
f.write("echo 'done'")
with open("dexy.yaml", "w") as f:
f.write(SCRIPT_YAML)
wrapper1 = Wrapper(log_level="DEBUG")
wrapper1.run_from_new()
for node in list(wrapper1.nodes.values()):
assert node.state == 'ran'
wrapper2 = Wrapper()
wrapper2.run_from_new()
for node in list(wrapper2.nodes.values()):
assert node.state == 'consolidated'
time.sleep(1.1)
with open("middle.sh", "w") as f:
f.write("echo 'new'")
wrapper3 = Wrapper()
wrapper3.run_from_new()
for node in list(wrapper1.nodes.values()):
assert node.state == 'ran'
# TODO mock out os.stat to get different mtimes without having to sleep?
def test_node_caching__slow():
with wrap() as wrapper:
with open("hello.py", "w") as f:
f.write("print(1+2)\n")
with open("doc.txt", "w") as f:
f.write("1 + 1 = {{ d['hello.py|py'] }}")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert str(doc_txt.output_data()) == "1 + 1 = 3\n"
assert str(hello_py.output_data()) == "3\n"
assert hello_py.state == 'ran'
assert doc_txt.state == 'ran'
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'consolidated'
assert doc_txt.state == 'consolidated'
time.sleep(1.1)
with open("doc.txt", "w") as f:
f.write("1 + 1 = {{ d['hello.py|py'] }}\n")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'consolidated'
assert doc_txt.state == 'ran'
time.sleep(1.1)
with open("hello.py", "w") as f:
f.write("print(1+1)\n")
wrapper = Wrapper(log_level='DEBUG')
hello_py = Doc("hello.py|py", wrapper)
doc_txt = Doc("doc.txt|jinja",
wrapper,
[hello_py]
)
wrapper.run_docs(doc_txt)
assert hello_py.state == 'ran'
assert doc_txt.state == 'ran'
def test_node_init_with_inputs():
with wrap() as wrapper:
node = Node("foo.txt",
wrapper,
[Node("bar.txt", wrapper)]
)
assert node.key == "foo.txt"
assert node.inputs[0].key == "bar.txt"
expected = {
0 : "bar.txt",
1 : "foo.txt"
}
for i, n in enumerate(node.walk_inputs()):
assert expected[i] == n.key
def test_doc_node_populate():
with wrap() as wrapper:
node = Node.create_instance(
'doc', "foo.txt", wrapper,
[], contents='foo')
assert node.key_with_class() == "doc:foo.txt"
def test_doc_node_with_filters():
with wrap() as wrapper:
node = Node.create_instance('doc',
"foo.txt|outputabc", wrapper, [], contents='foo')
assert node.key_with_class() == "doc:foo.txt|outputabc"
def test_pattern_node():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
with open("bar.txt", "w") as f:
f.write("bar!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt",
wrapper,
[],
foo="bar")
assert node.args['foo'] == 'bar'
wrapper.run_docs(node)
assert len(node.children) == 2
for child in node.children:
assert child.__class__.__name__ == "Doc"
assert child.args['foo'] == 'bar'
assert child.key_with_class() in ["doc:foo.txt", "doc:bar.txt"]
assert child.filters == []
def test_pattern_node_multiple_filters():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt|dexy|dexy|dexy", wrapper=wrapper)
doc = node.children[0]
assert doc.key == "foo.txt|dexy|dexy|dexy"
assert doc.filter_aliases == ['dexy', 'dexy', 'dexy']
assert doc.parent == node
def test_pattern_node_one_filter():
with wrap() as wrapper:
with open("foo.txt", "w") as f:
f.write("foo!")
wrapper = Wrapper(log_level='DEBUG')
wrapper.to_valid()
wrapper.nodes = {}
wrapper.roots = []
wrapper.batch = dexy.batch.Batch(wrapper)
wrapper.filemap = wrapper.map_files()
node = PatternNode("*.txt|dexy", wrapper=wrapper)
doc = node.children[0]
assert doc.key == "<KEY>"
assert doc.filter_aliases == ['dexy']
assert doc.parent == node
|
1641309
|
import numpy as np
import pandas as pd
import os
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
from tensorflow.python.keras.layers import Dense, GlobalAveragePooling2D
from tensorflow.python.keras.applications.vgg16 import VGG16
from tensorflow.python.keras.preprocessing import image
from tensorflow.python.keras.applications.vgg16 import preprocess_input
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.optimizers import Adam
from PIL import Image
import warnings
warnings.filterwarnings('ignore')
from tensorflow.python.keras.models import load_model
model = load_model('/root/glioAI/glioai/models/tumor_prediction.h5')
# route to any of the labaled malignant images that model hasn't seen before
img_path = ('/root/glioAI/data/tumortest/8 no.jpg')
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(224,224))
x = image.img_to_array(img)
x = np.expand_dims(x,axis=0)
img_data = preprocess_input(x)
# make prediction
rs = model.predict(img_data)
print(rs)
rs[0][0]
rs[0][1]
if rs[0][0] >= 0.9:
prediction = 'This image is NOT tumorous.'
elif rs[0][0] < 0.9:
prediction = 'Warning! This image IS tumorous.'
print(prediction)
|
1641324
|
from .artifact import Artifact
from .cluster import Cluster
from .dataset import Dataset, DatasetRef
from .dataset_tag import DatasetTag, DatasetVersionSummary
from .dataset_version import DatasetVersion, DatasetVersionPreSignedS3Call, DatasetVersionPreSignedURL, \
DatasetVersionTagSummary
from .deployment import Deployment, AutoscalingDefinition, AutoscalingMetric
from .experiment import BaseExperiment, MultiNodeExperiment, SingleNodeExperiment, MpiMultiNodeExperiment
from .experiment_dataset import ExperimentDataset, VolumeOptions
from .hyperparameter import Hyperparameter
from .job import Job, JobDataset
from .log import LogRow
from .machine import Machine, MachineEvent, MachineUtilization
from .model import Model, ModelFile
from .notebook import Notebook, NotebookStart
from .pagination import Pagination
from .project import Project
from .secret import Secret
from .storage_provider import StorageProvider
from .tag import Tag
from .tensorboard import Instance, Tensorboard
from .vm_type import VmType, VmTypeGpuModel
from .workflows import Workflow, WorkflowRun, WorkflowSpec
|
1641372
|
from typing import Any, Optional
class ValueObject:
def __repr__(self) -> str:
return "<{}: {}>".format(self.__class__.__name__, self)
def __eq__(self, other: Any) -> bool:
if isinstance(other, self.__class__):
return hash(self) == hash(other)
else:
return False
def __hash__(self) -> int:
return hash(str(self))
class Module(ValueObject):
"""
A Python module.
"""
def __init__(self, name: str) -> None:
"""
Args:
name: The fully qualified name of a Python module, e.g. 'package.foo.bar'.
"""
self.name = name
def __str__(self) -> str:
return self.name
@property
def root_package_name(self) -> str:
return self.name.split(".")[0]
@property
def parent(self) -> "Module":
components = self.name.split(".")
if len(components) == 1:
raise ValueError("Module has no parent.")
return Module(".".join(components[:-1]))
def is_child_of(self, module: "Module") -> bool:
try:
return module == self.parent
except ValueError:
# If this module has no parent, then it cannot be a child of the supplied module.
return False
def is_descendant_of(self, module: "Module") -> bool:
return self.name.startswith(f"{module.name}.")
def is_package(self) -> bool:
"""
Whether the module can contain other modules.
Practically, this corresponds to whether a module is an __init__.py file.
"""
raise NotImplementedError
class DirectImport(ValueObject):
"""
An import between one module and another.
"""
def __init__(
self,
*,
importer: Module,
imported: Module,
line_number: Optional[int] = None,
line_contents: Optional[str] = None,
) -> None:
self.importer = importer
self.imported = imported
self.line_number = line_number
self.line_contents = line_contents
def __str__(self) -> str:
if self.line_number:
return "{} -> {} (l. {})".format(self.importer, self.imported, self.line_number)
else:
return "{} -> {}".format(self.importer, self.imported)
def __hash__(self) -> int:
return hash((str(self), self.line_contents))
class ImportExpression(ValueObject):
"""
A user-submitted expression describing an import or set of imports.
Sets of imports are notated using * wildcards.
These wildcards can stand in for a module name or part of a name, but they do
not extend to subpackages.
For example, "mypackage.*" refers to every child subpackage of mypackage.
It does not, however, include more distant descendants such as mypackage.foo.bar.
"""
def __init__(self, importer: str, imported: str) -> None:
self.importer = importer
self.imported = imported
def has_wildcard_expression(self) -> bool:
return "*" in self.imported or "*" in self.importer
def __str__(self) -> str:
return "{} -> {}".format(self.importer, self.imported)
|
1641402
|
import feed.handlers
import firenado.tornadoweb
class FeedComponent(firenado.tornadoweb.TornadoComponent):
def get_handlers(self):
return [
(r'/', feed.handlers.IndexHandler),
]
|
1641409
|
from ._title import Title
from plotly.graph_objs.scatterpolar.marker.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
1641417
|
from tkinter import *
from .PyEditor import PyEditor
class MenuManager:
import Bindings
def __init__(self,parent):
self.parent=parent
self.menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("format", "F_ormat"),
("command", "_Command"),
("help", "_Help")
]
def createmenubar(self):
parent=self.parent
# deactivate the tearOff's
parent.root.option_add('*tearOff', FALSE)
parent.menubar=Menu(parent.root)
self.menudict = menudict = {}
for name, label in self.menu_specs:
underline, label = prepstr(label)
menudict[name] = menu = Menu(parent.menubar, name=name)
parent.menubar.add_cascade(label=label, menu=menu, underline=underline)
self.fill_menus()
parent.recent_files_menu = Menu(parent.menubar)
self.menudict['file'].insert_cascade(3, label='Recent Files',
underline=0,
menu=parent.recent_files_menu)
parent.root.config(menu=parent.menubar)
def fill_menus(self):
#import pdb ; pdb.set_trace()
menudefs = self.Bindings.menudefs
keydefs=self.Bindings.default_keydefs
menudict = self.menudict
for mname, entrylist in menudefs:
menu = menudict.get(mname)
if not menu:
continue
for entry in entrylist:
if not entry:
menu.add_separator()
else:
label, eventname = entry
checkbutton = (label[:1] == '!')
if checkbutton:
label = label[1:]
underline, label = prepstr(label)
accelerator = get_accelerator(keydefs, eventname)
def command(text=self.parent.view, eventname=eventname):
text.event_generate(eventname)
if checkbutton:
menu.add_command(label=label,underline=underline,command=command,accelerator=accelerator)
else:
menu.add_command(label=label,underline=underline,command=command,accelerator=accelerator)
keynames = {
'bracketleft': '[',
'bracketright': ']',
'slash': '/',
}
def get_accelerator(keydefs, eventname):
keylist = keydefs.get(eventname)
# issue10940: temporary workaround to prevent hang with OS X Cocoa Tk 8.5
# if not keylist:
if (not keylist) or (eventname in {
"<<open-module>>",
"<<goto-line>>",
"<<change-indentwidth>>"}):
return ""
s = keylist[0]
s = re.sub(r"-[a-z]\b", lambda m: m.group().upper(), s)
s = re.sub(r"\b\w+\b", lambda m: keynames.get(m.group(), m.group()), s)
s = re.sub("Key-", "", s)
s = re.sub("Cancel","Ctrl-Break",s) # <EMAIL>
s = re.sub("Control-", "Ctrl-", s)
s = re.sub("-", "+", s)
s = re.sub("><", " ", s)
s = re.sub("<", "", s)
s = re.sub(">", "", s)
return s
def prepstr(s):
# Helper to extract the underscore from a string, e.g.
# prepstr("Co_py") returns (2, "Copy").
i = s.find('_')
if i >= 0:
s = s[:i] + s[i+1:]
return i, s
|
1641465
|
import csv
import json
import os
import sys
import threading
import numpy as np
import pandas as pd
import pymongo
import QUANTAXIS as QA
import requests
from flask import Flask, render_template
from flask_socketio import SocketIO, emit
from tabulate import tabulate
import queue
app = Flask(__name__)
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
@socketio.on('my event')
def test_message(message):
emit('my response', {'data': 'got it!'})
@app.route("/")
def hello():
return "Hello World!"
@app.route('/backtest/[cookie_id]')
def query_backtest_by_id(cookie_id):
pass
def main():
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5050), app, handler_class=WebSocketHandler)
server.serve_forever()
|
1641515
|
import pytest
from genomics_data_index.storage.util.ListSliceIter import ListSliceIter
def test_slice_list():
# Case 3 elements
data = [1, 2, 3]
assert [[1], [2], [3]] == list(ListSliceIter(data, slice_size=1).islice())
assert [[1, 2], [3]] == list(ListSliceIter(data, slice_size=2).islice())
assert [[1, 2, 3]] == list(ListSliceIter(data, slice_size=3).islice())
assert [[1, 2, 3]] == list(ListSliceIter(data, slice_size=4).islice())
# Trying slice < 1
with pytest.raises(Exception) as execinfo:
ListSliceIter(data, slice_size=0)
assert 'must be a positive integer' in str(execinfo.value)
# Case empty data
data = []
assert [] == list(ListSliceIter(data, slice_size=1).islice())
assert [] == list(ListSliceIter(data, slice_size=2).islice())
# Case 5 elements and strings
data = ['A', 'B', 'C', 'D', 'E']
assert [['A'], ['B'], ['C'], ['D'], ['E']] == list(ListSliceIter(data, slice_size=1).islice())
assert [['A', 'B'], ['C', 'D'], ['E']] == list(ListSliceIter(data, slice_size=2).islice())
assert [['A', 'B', 'C'], ['D', 'E']] == list(ListSliceIter(data, slice_size=3).islice())
assert [['A', 'B', 'C', 'D'], ['E']] == list(ListSliceIter(data, slice_size=4).islice())
assert [['A', 'B', 'C', 'D', 'E']] == list(ListSliceIter(data, slice_size=5).islice())
assert [['A', 'B', 'C', 'D', 'E']] == list(ListSliceIter(data, slice_size=6).islice())
|
1641523
|
import torch
from torch import nn
import torch.nn.functional as F
def smooth_loss(pred_map):
def gradient(pred):
D_dy = pred[:, :, 1:] - pred[:, :, :-1]
D_dx = pred[:, :, :, 1:] - pred[:, :, :, :-1]
return D_dx, D_dy
loss = 0
weight = 1.
dx, dy = gradient(pred_map)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss += (dx2.abs().mean() + dxdy.abs().mean() + dydx.abs().mean() + dy2.abs().mean())*weight
return loss
|
1641551
|
import unittest
import unittest.mock as mock
import io
import os
import split_audiobook
from split_audiobook import * # pylint: disable=W0614
DUMMY_CHAPTERS = """Chapter 1,0,360
"Beers,tears and queers",360,1000
"One/Two:Three",1000,1:01:01.1
"""
class Tests(unittest.TestCase):
def test_secs(self):
s1 = secs_from_time("11.5")
self.assertEqual(11.5, s1)
s2 = secs_from_time("21:11.5")
self.assertEqual(21*60+11.5, s2)
s3 = secs_from_time("2:21:11.5")
self.assertEqual(2*3600+21*60+11.5, s3)
def test_chapters_file(self):
cs = file_to_chapters_iter(io.StringIO(DUMMY_CHAPTERS), False)
cs = list(cs)
self.assertEqual(3, len(cs))
self.assertEqual("Beers,tears and queers", cs[1][0])
self.assertEqual("One-Two-Three", cs[2][0])
self.assertEqual(360, cs[0][2])
self.assertTrue(type(cs[0][1]) == float)
self.assertEqual(3661.1, cs[2][2])
def test_chapters_file2(self):
with open("test_data/external_chapters.csv") as f:
cs = file_to_chapters_iter(f, False)
cs = list(cs)
self.assertEqual(17, len(cs))
self.assertEqual(cs[1][2], 29*60+46.1)
self.assertTrue(cs[16][2] is None)
def test_cue_file(self):
with open("test_data/external_chapters.cue") as f:
cs = file_to_chapters_iter(f, True)
cs = list(cs)
self.assertEqual(17, len(cs))
self.assertEqual(cs[1][2], 29*60+46+8/75)
self.assertTrue(cs[16][2] is None)
def test_chapters_meta(self):
with open("test_data/chapters.txt", "rb") as f:
data = f.read()
with mock.patch.object(split_audiobook, "_run_ffprobe_for_chapters") as mocked_run_ffprobe:
mocked_run_ffprobe.return_value = data
cs = meta_to_chapters_iter("some.mp3")
cs = list(cs)
self.assertEqual(59, len(cs))
for l in cs:
self.assertTrue(l[1] < l[2])
def test_split_by_silence(self):
with open('test_data/silences.txt', 'rb') as f:
data = f.read()
with mock.patch.object(split_audiobook.SilenceDetector, "_run_ffmpeg") as mocked_run_ffmpeg:
mocked_run_ffmpeg.return_value = data
opts = mock.MagicMock(length=1800, silence=30, silence_duration=2)
chapters = calc_split_points("test_file.mp3", opts)
chapters = list(chapters)
self.assertEqual(16, len(chapters))
prev_end = 0
for _chap, start, end in chapters:
self.assertEqual(prev_end, start)
if end is not None:
dur = end - start
diff = abs(dur - opts.length)
self.assertTrue(
diff < opts.length / 2, "diff %0.2f is smaller then half of length %0.2f" % (diff, opts.length / 2))
prev_end = end
def test_detect_silence(self):
with open('test_data/silences.txt', 'rb') as f:
data = f.read()
with mock.patch.object(SilenceDetector, "_run_ffmpeg") as mocked_run_ffmpeg:
mocked_run_ffmpeg.return_value = data
d = SilenceDetector("somefile.mp3")
s = list(d)
self.assertTrue(len(s) > 50)
s2 = d.find_after(1800)
self.assertEqual(1813.24, s2[0][0])
self.assertEqual(1815.3, s2[0][1])
s1 = d.find_before(1800)
self.assertEqual(1507.13, s1[0][0])
self.assertEqual(1509.17, s1[0][1])
if __name__ == "__main__":
unittest.main()
|
1641579
|
import json
import numpy as np
from nose import with_setup
from pybbn.generator.bbngenerator import generate_singly_bbn, convert_for_exact_inference
from pybbn.graph.dag import Dag, BbnUtil, Bbn
from pybbn.graph.edge import Edge, EdgeType
from pybbn.graph.node import Node
def setup():
"""
Setup.
:return: None.
"""
np.random.seed(37)
def teardown():
"""
Teardown.
:return: None.
"""
pass
@with_setup(setup, teardown)
def test_dag_creation():
"""
Tests DAG creation.
:return: None.
"""
n0 = Node(0)
n1 = Node(1)
n2 = Node(2)
e0 = Edge(n0, n1, EdgeType.DIRECTED)
e1 = Edge(n1, n2, EdgeType.DIRECTED)
e2 = Edge(n2, n0, EdgeType.DIRECTED)
g = Dag()
g.add_node(n0)
g.add_node(n1)
g.add_edge(e0)
g.add_edge(e1)
g.add_edge(e2)
print(g)
assert len(g.get_nodes()) == 3
assert len(g.get_edges()) == 2
assert len(list(g.get_neighbors(0))) == 1
assert len(list(g.get_neighbors(1))) == 2
assert len(list(g.get_neighbors(2))) == 1
assert 1 in g.get_neighbors(0)
assert 0 in g.get_neighbors(1)
assert 2 in g.get_neighbors(1)
assert 1 in g.get_neighbors(2)
assert g.edge_exists(0, 1) == 1
assert g.edge_exists(1, 2) == 1
assert g.edge_exists(0, 2) == 0
assert len(g.get_parents(0)) == 0
assert len(g.get_parents(1)) == 1
assert len(g.get_parents(2)) == 1
assert 0 in g.get_parents(1)
assert 1 in g.get_parents(2)
assert len(g.get_children(0)) == 1
assert len(g.get_children(1)) == 1
assert len(g.get_children(2)) == 0
assert 1 in g.get_children(0)
assert 2 in g.get_children(1)
@with_setup(setup, teardown)
def test_csv_serde():
"""
Tests CSV serde.
:return: None.
"""
try:
lhs = BbnUtil.get_huang_graph()
Bbn.to_csv(lhs, 'huang.csv')
rhs = Bbn.from_csv('huang.csv')
assert len(lhs.get_nodes()) == len(rhs.get_nodes())
assert len(lhs.get_edges()) == len(rhs.get_edges())
lhs_nodes = set([str(node) for node in lhs.get_nodes()])
rhs_nodes = set([str(node) for node in rhs.get_nodes()])
for n in lhs_nodes:
assert n in rhs_nodes
lhs_edges = set([str(edge) for edge in lhs.get_edges()])
rhs_edges = set([str(edge) for edge in rhs.get_edges()])
for e in lhs_edges:
assert e in rhs_edges
except:
assert False
finally:
import os
try:
os.remove('huang.csv')
except:
pass
@with_setup(setup, teardown)
def test_to_dict():
"""
Tests creating serializable dictionary representation.
:return: None.
"""
bbn = BbnUtil.get_huang_graph()
d = Bbn.to_dict(bbn)
j = json.dumps(d, sort_keys=True, indent=2)
e = """{
"edges": [
{
"ch": 1,
"pa": 0
},
{
"ch": 2,
"pa": 0
},
{
"ch": 3,
"pa": 1
},
{
"ch": 4,
"pa": 2
},
{
"ch": 5,
"pa": 3
},
{
"ch": 5,
"pa": 4
},
{
"ch": 6,
"pa": 2
},
{
"ch": 7,
"pa": 4
},
{
"ch": 7,
"pa": 6
}
],
"nodes": {
"0": {
"probs": [
0.5,
0.5
],
"variable": {
"id": 0,
"name": "a",
"values": [
"on",
"off"
]
}
},
"1": {
"probs": [
0.5,
0.5,
0.4,
0.6
],
"variable": {
"id": 1,
"name": "b",
"values": [
"on",
"off"
]
}
},
"2": {
"probs": [
0.7,
0.3,
0.2,
0.8
],
"variable": {
"id": 2,
"name": "c",
"values": [
"on",
"off"
]
}
},
"3": {
"probs": [
0.9,
0.1,
0.5,
0.5
],
"variable": {
"id": 3,
"name": "d",
"values": [
"on",
"off"
]
}
},
"4": {
"probs": [
0.3,
0.7,
0.6,
0.4
],
"variable": {
"id": 4,
"name": "e",
"values": [
"on",
"off"
]
}
},
"5": {
"probs": [
0.01,
0.99,
0.01,
0.99,
0.01,
0.99,
0.99,
0.01
],
"variable": {
"id": 5,
"name": "f",
"values": [
"on",
"off"
]
}
},
"6": {
"probs": [
0.8,
0.2,
0.1,
0.9
],
"variable": {
"id": 6,
"name": "g",
"values": [
"on",
"off"
]
}
},
"7": {
"probs": [
0.05,
0.95,
0.95,
0.05,
0.95,
0.05,
0.95,
0.05
],
"variable": {
"id": 7,
"name": "h",
"values": [
"on",
"off"
]
}
}
}
}"""
assert len(j) == len(e)
assert j == e
@with_setup(setup, teardown)
def test_generated_serde():
"""
Tests serde of generated BBN.
:return: Nonde.
"""
g, p = generate_singly_bbn(100, max_iter=10)
e_bbn = convert_for_exact_inference(g, p)
d = Bbn.to_dict(e_bbn)
s = json.dumps(d, sort_keys=True, indent=2)
d = json.loads(s)
o_bbn = Bbn.from_dict(d)
assert len(e_bbn.get_nodes()) == len(o_bbn.get_nodes())
assert len(e_bbn.get_edges()) == len(o_bbn.get_edges())
@with_setup(setup, teardown)
def test_from_dict():
"""
Tests creating BBN from dictionary (deserialized from JSON).
:return: None.
"""
e_bbn = BbnUtil.get_huang_graph()
o_bbn = Bbn.from_dict(Bbn.to_dict(e_bbn))
assert len(e_bbn.get_nodes()) == len(o_bbn.get_nodes())
assert len(e_bbn.get_edges()) == len(o_bbn.get_edges())
|
1641655
|
from project import db
import datetime
from project.corsi.models import Corso
# Tabella di relazione 1 Corso : N Serate
class Serata(db.Model):
__tablename__ = "serata"
__table_args__ = (db.UniqueConstraint("id", "data", name="constraint_serata"),)
id = db.Column(db.Integer(), primary_key=True)
nome = db.Column(db.String(255), nullable=False)
descrizione = db.Column(db.String(255), nullable=False)
data = db.Column(db.DateTime(), nullable=False)
link_partecipazione = db.Column(db.String(255), nullable=True)
link_registrazione = db.Column(db.String(255), nullable=True)
corso_id = db.Column(db.Integer(), db.ForeignKey("corso.id"))
def __init__(self, nome, descrizione, data, link_partecipazione='', link_registrazione=''):
self.nome = nome
self.descrizione = descrizione
self.data = data
self.link_partecipazione = link_partecipazione
self.link_registrazione = link_registrazione
def __repr__(self):
return "<Descrizione '{}'. Link registrazione>".format(self.descrizione, self.link_registrazione)
@staticmethod
def insert_test_serate():
lista_serate = [
("Flask 1", "Introduzione a Flask e ai web server con Jinja Base", datetime.datetime(2020, 10, 12, hour=20), '', 'https://www.youtube.com/watch?v=FPI5-oGKiVI&t=759s'),
("Flask 2", "Jinja avanzato e Forms", datetime.datetime(2020, 10, 19, hour=20), '', 'https://www.youtube.com/watch?v=C-iEkd-BpE4'),
("Flask 3", "Flask con Database", datetime.datetime(2020, 10, 26, hour=20), '', 'https://www.youtube.com/watch?v=rCXhuSiOcZU'),
("Flask 4", "Review con Andrea", datetime.datetime(2020, 11, 2, hour=20), '', 'https://www.youtube.com/watch?v=izIKXOrbI5U'),
("Flask 5", "Review con Mario", datetime.datetime(2020, 11, 9, hour=20), '', 'https://vimeo.com/478050019'),
("Flask 6", "Blueprints, refactoring e tests con Mario", datetime.datetime(2020, 11, 16, hour=20), 'https://zoom.us/j/99953652561?pwd=<PASSWORD>', 'https://vimeo.com/480155611'),
("Flask 7", "Autenticazione con Mario", datetime.datetime(2020, 11, 23, hour=20), 'https://zoom.us/j/95155339456?pwd=<PASSWORD>', 'https://vimeo.com/483066531'),
("Flask 8", "Profili, ruoli e blog con Mario", datetime.datetime(2020, 11, 30, hour=20), 'https://zoom.us/j/98250996690?pwd=<PASSWORD>', ''),
("Flask 9", "Deploy in produzione con Andrea", datetime.datetime(2020, 12, 7, hour=20), '', ''),
("Flask 10", "REST API con ?", datetime.datetime(2020, 12, 14, hour=20), '', ''),
]
corso_flask = Corso.query.filter_by(nome="Flask").first()
for serata in lista_serate:
serata_db = Serata.query.filter_by(nome=serata[0]).first()
if serata_db is None:
serata_db = Serata(*serata)
serata_db.corso_id = corso_flask.id
db.session.add(serata_db)
db.session.commit()
|
1641657
|
import random
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
num_games = 1000
ot_shots = 10
##########################
# Team 1 - Stronger Team #
##########################
team1 = {
'2pt rate': .50,
'3pt rate': .50,
'2pt%': .55,
'3pt%': .38,
'orbd': .275
}
#############################
# Team 2 - Out Matched Team #
############################
team2 = {
'2pt rate': .50,
'3pt rate': .50,
'2pt%': .50,
'3pt%': .33,
'orbd': .225
}
def points(team):
roll_shot_type = random.random()
roll_make = random.random()
if roll_shot_type <= team['2pt rate']:
if roll_make <= team['2pt%']:
return 2
else:
if roll_make <= team['3pt%']:
return 3
roll_orbd = random.random()
if roll_orbd < team['orbd']:
return points(team)
return 0
def play_game(shots_to_take):
t1_points_in_game = 0
t2_points_in_game = 0
for shot in range(shots_to_take):
t1_points_in_game += points(team1)
t2_points_in_game += points(team2)
return t1_points_in_game, t2_points_in_game
results = []
for rate3 in [0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6]:
team2['3pt rate'] = rate3
team2['2pt rate'] = 1 - rate3
for shots in range(60, 121):
for game in range(num_games):
t1_points, t2_points = play_game(shots)
while t1_points == t2_points:
t1_new, t2_new = play_game(ot_shots)
t1_points += t1_new
t2_points += t2_new
result = {
'shots': shots,
'3rate': rate3,
'team1': t1_points,
'team2': t2_points,
'game': game,
'team2_win': t2_points > t1_points,
}
results.append(result)
frame = pd.DataFrame(results)
wins = frame.groupby(by=['shots', '3rate'])['team2_win'].sum().reset_index()
wins['win%'] = wins['team2_win']/num_games
fig = plt.figure()
ax = Axes3D(fig) #<-- Note the difference from your original code...
# Data for three-dimensional scattered points
zdata = wins['win%']
xdata = wins['shots']
ydata = wins['3rate']
ax.plot_trisurf(xdata, ydata, zdata, cmap=cm.coolwarm, linewidth=0, antialiased=False)
ax.set_xlabel('Shots')
ax.set_ylabel('3pt Attempt Rate')
ax.set_zlabel('Wins Percent')
plt.show()
|
1641685
|
from django.contrib.auth.models import User
from django.db import models
class Organization(models.Model):
"""Groups users and apps"""
name = models.CharField(max_length=140)
slug = models.SlugField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.slug
class AblatorUser(models.Model):
"""Extension of the regular Django User Model with Ablator-Specific addons"""
user = models.OneToOneField(User, on_delete=models.CASCADE)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.user.username
|
1641694
|
from kedro.pipeline import Pipeline, node
from kedro.pipeline.modular_pipeline import pipeline
from .nodes import (
aggregate_company_data,
apply_types_to_companies,
apply_types_to_reviews,
apply_types_to_shuttles,
combine_shuttle_level_information,
)
def create_pipeline(**kwargs) -> Pipeline:
"""This method imports the python functions which accept raw data,
add types and wrangle into primary layer outputs. The pipeline
inputs and outputs are appropriately namespaced and the
input/output datasets are mapped to the right catalog values.
Returns:
Pipeline: A set of nodes which take data from the raw to
the intermediate then primary layers.
"""
return pipeline(
[
node(
func=apply_types_to_companies,
inputs="companies",
outputs="int_typed_companies",
),
node(
func=apply_types_to_shuttles,
inputs="shuttles",
outputs="int_typed_shuttles",
),
node(
func=apply_types_to_reviews,
inputs=["reviews", "params:typing.reviews.columns_as_floats"],
outputs="int_typed_reviews",
),
node(
func=aggregate_company_data,
inputs="int_typed_companies",
outputs="prm_agg_companies",
name="company_agg",
),
node(
func=combine_shuttle_level_information,
inputs={
"shuttles": "int_typed_shuttles",
"reviews": "int_typed_reviews",
"companies": "prm_agg_companies",
},
outputs=["prm_shuttle_company_reviews", "prm_spine_table"],
name="combine_step",
),
node(
func=lambda x: x,
inputs="prm_spine_table",
outputs="prm_spine_table_clone",
)
],
namespace="ingestion", # provide inputs
inputs={"reviews", "shuttles", "companies"}, # map inputs outside of namespace
outputs={
"prm_spine_table",
"prm_shuttle_company_reviews",
},
)
|
1641696
|
def get_related_to(js):
info = get_info(js)
film_name = info[0]
film_id = info[1]
related_list = []
try:
relateds = js['related']['relatedTo']
if relateds != None:
for r in relateds:
title = r['title']
cosmo_id = r['ids']['cosmoId']
director_name = ''
director_id = ''
release_year = r['releaseYear']
rating = r['rating']
directors = r['directors']
if directors != None:
director_id = directors[0]['id']
director_name = directors[0]['name']
entry = (film_name,film_id,title,cosmo_id,release_year,rating,
director_name,director_id)
related_list.append(entry)
except:
None
return related_list
|
1641793
|
from utils import *
if __name__ == '__main__':
prots = [
'np',
'h1',
'gag',
'cov',
'cyc',
'glo',
'pgk',
'eno',
'ser'
]
for prot in prots:
fname_esm1b = f'target/ev_cache/{prot}_pseudotime.txt'
fname_tape = f'target/ev_cache/{prot}_tape_pseudotime.txt'
x_esm1b = np.loadtxt(fname_esm1b)
x_tape = np.loadtxt(fname_tape)
tprint(prot)
tprint('Spearman r = {}, P = {}'.format(*ss.spearmanr(x_esm1b, x_tape)))
tprint('')
#if prot == 'np':
# fname_onehot = f'target/ev_cache/{prot}_onehot_pseudotime.txt'
# x_onehot = np.loadtxt(fname_onehot)
# tprint(prot + ' onehot')
# tprint('Spearman r = {}, P = {}'
# .format(*ss.spearmanr(x_esm1b, x_onehot)))
# tprint('')
#
# fname_blosum = f'target/ev_cache/{prot}_blosum62_pseudotime.txt'
# x_blosum = np.loadtxt(fname_blosum)
# tprint(prot + ' blosum')
# tprint('Spearman r = {}, P = {}'
# .format(*ss.spearmanr(x_esm1b, x_blosum)))
# tprint('')
#
# fname_blosum = f'target/ev_cache/{prot}_onehot_blosum62_pseudotime.txt'
# x_blosum = np.loadtxt(fname_blosum)
# tprint(prot + ' onehot and blosum')
# tprint('Spearman r = {}, P = {}'
# .format(*ss.spearmanr(x_esm1b, x_blosum)))
# tprint('')
|
1641827
|
import time
def print_msg_box(msg, indent=1, width=None, title=None):
"""Print message-box with optional title."""
lines = msg.split('\n')
space = " " * indent
if not width:
width = max(map(len, lines))
box = f'╔{"═" * (width + indent * 2)}╗\n' # upper_border
if title:
box += f'║{space}{title:<{width}}{space}║\n' # title
box += f'║{space}{"-" * len(title):<{width}}{space}║\n' # underscore
box += ''.join([f'║{space}{line:<{width}}{space}║\n' for line in lines])
box += f'╚{"═" * (width + indent * 2)}╝' # lower_border
print(box)
class sort:
# bubble sort algorithm
def bubble_sort(self,arr,hint=False):
start = time.time()
for i in range(len(arr)-1):
for j in range(len(arr)-i-1):
if arr[j] > arr[j+1] :
arr[j],arr[j+1] = arr[j+1],arr[j]
print(arr)
end = time.time()
print("Bubble Sort Runtime = {}".format(end-start))
if(hint is True):
self.bubble_sort_hint()
return arr
def bubble_sort_hint(self):
message ="""
Bubble Sort
------------------------------------
Purpose : sorting in increasing order
Method : Bubble Making, Swapping
Time Complexity: Worst Case - O(n^2)
Hint :
Try to kick out the greater value to the rightmost position by using loops
and value swapping.
Pseudocode:
--> for i in [0,length of array]
for j in [0,length of array - 1]
if(array[j] > array[i])
swap array[j] & array[i]
Visualization:
Given Array :
+-----+-----+-----+
| 5 | 4 | 3 |
+-----+-----+-----+
First Iteration :
+-----+-----+-----+
| 4 | 5 | 3 |
+-----+-----+-----+
Second Iteration :
+-----+-----+-----+
| 4 | 3 | 5 |
+-----+-----+-----+
Third Iteration :
+-----+-----+-----+
| 3 | 4 | 5 |
+-----+-----+-----+
Learn More Here - https://en.wikipedia.org/wiki/Bubble_sort
"""
print_msg_box(message)
# selection Sort Algorithm
def selection_sort(self,arr,hint=False):
start = time.time()
for i in range(len(arr)-1):
minimum = i
for j in range(i+1,len(arr)):
if arr[j] < arr[minimum]:
minimum = j
arr[minimum],arr[i] = arr[i],arr[minimum]
print(arr)
end = time.time()
print("Selection Sort Runtime = {}".format(end-start))
if(hint is True):
self.selection_sort_hint()
return arr
def selection_sort_hint(self):
message ="""
selection Sort
------------------------------------
Purpose : sorting in increasing order
Method : Pick Up minimum, swap with minimum
Time Complexity: Worst Case - O(n^2)
Hint :
In every iteration the minimum element from the unsorted subarray is picked and
moved to the sorted subarray.
Pseudocode:
--> for i in [0,length of array]
minimum = i
for j in [i+1,length of array]
if arr[j] < arr[minimum]
minimum = j
swap arr[i] & arr[minimum]
Visualization:
Given Array :
+-----+-----+-----+
| 5 | 4 | 3 |
+-----+-----+-----+
We have two buckets,
| | | |
| Unsorted | | sorted |
| | | |
| 5,4,3 | | empty |
-------------- --------------
Select the minimum from the unsorted bucket and put that in sorted bucket
| | | |
| Unsorted | | sorted |
| | | |
| 5,4 | | 3 |
-------------- --------------
Again select the minimum from the unsorted bucket and put that in
sorted bucket
| | | |
| Unsorted | | sorted |
| | | |
| 5 | | 3,4 |
-------------- --------------
Repeat the same till the unsorted bucket is empty
| | | |
| Unsorted | | sorted |
| | | |
| | | 3,4,5 |
-------------- --------------
Finally you have the sorted array.
Learn More Here - https://en.wikipedia.org/wiki/Selection_sort
"""
print_msg_box(message)
class string_algorithms:
def isUnique(self,input_string,hint=False):
mapp = []
for i in input_string:
if i not in mapp:
mapp.append(i)
if(hint is True):
self.isUnique_hint()
return len(mapp) == len(input_string)
def isUnique_hint(self):
message ="""
Unique Character Checking
------------------------------------
Purpose : checking if all the characters in a given string are unique
Method : list comprehension
Time Complexity: Worst Case - O(n), n = length of the input string
Hint :
How about using the inbuilt list data structure ?
Pseudocode:
--> create an empty list named mapp
--> for i in input string
if i not in mapp
add i to the empty list
--> The string is unique only when the
length of the map after the total
iterations is same as that of the
length of the input string
Visualization:
Given String :
"aabcc"
Empty List:
----------------
| |
----------------
after first iteration :
----------------
| a |
----------------
after second iteration :
----------------
| a |
----------------
[because a was already in the list]
after third iteration :
----------------
| a b |
----------------
Finally :
----------------
| a b c |
----------------
size = 3 which is not equal to length of "aabcc"
Learn More about Lists Below -
https://docs.python.org/3/tutorial/datastructures.html
"""
print_msg_box(message)
def isPermutation(self,input1,input2,hint=False):
if(hint is True):
self.isPermutation_hint()
if(len(input1)!=len(input2)):
return False
mapp1 = []
mapp2 = []
for i in input1:
mapp1.append(i)
for j in input2:
mapp2.append(j)
mapp1.sort()
mapp2.sort()
return mapp1==mapp2
def isPermutation_hint(self):
message = """
Two String Permutations
------------------------------------
Purpose : checking if one string is consisting of the permutation of
the characters in the other string
Method : list comprehension
Time Complexity: Worst Case - O(n), n = length of the strings
Hint :
How about using two inbuilt list data structure ?
Pseudocode:
--> check if length(string1) != len(string2)
return False
--> create two empty lists named mapp1 & mapp2
--> for i in input string 1
add i to mapp1
--> for i in input string 2
add i to mapp2
--> sort mapp1
--> sort mapp2
--> return true if mapp1 and mapp2 are equal
Visualization:
Given Two String :
"aabcc"
"abcac"
Two Empty List:
List 1 List 2
---------------- ----------------
| | | |
---------------- ----------------
After Filling Lists :
List 1 List 2
---------------- ----------------
| a a b c c | | a b c a c |
---------------- ----------------
Applying sort function :
List 1 List 2
---------------- ----------------
| a a b c c | | a a b c c |
---------------- ----------------
Final check :
------------------ +------+
| List 1 == List 2 | -------> | True |
------------------ +------+
Learn More about Lists Below -
https://docs.python.org/3/tutorial/datastructures.html
"""
print_msg_box(message)
def URLify(self,input_str,key,hint=False):
if(hint is True):
self.URLify_hint()
input2 = ""
for i in range(len(input_str)):
if(input_str[i] != ' '):
input2+=input_str[i]
elif((input_str[i]==' ') and (input_str[i+1] == ' ')):
return input2
elif((input_str[i]==' ') and (input_str[i+1] != ' ')):
input2 += key
return input2
def URLify_hint(self):
message = """
Making a URL From a String
------------------------------------
Purpose : Making a URL by replacing the spaces with a key value entered
by the user
Method : string manipulation
Time Complexity : Worst Case - O(n), n = length of the string
Hint :
Take a blank string, and add data from the input string to the blank
string to prepare the final URL
Pseudocode :
--> Take a blank string s2
--> for i in [0,length of input string]
if(not a whitespace)
add to s2
elif(whitespace and next place is also whitespace)
return s2
elif(whitespace and next place not whitespace)
add the key value to the blank string
Visualization:
Given String To Make URL :
"Python is love"
Key : "%20"
Break The Given String : /*/ ----> whitespace
+--------+-------+----+-------+------+
| Python | /*/ | is | /*/ | love |
+--------+-------+----+-------+------+
^ ^ ^
^ ^ ^
^ ^ ^
1 2 3
We will take 1, 2 and 3 sucessively and in place of whitespaces we will
concatenate the key value.
Empty String Addition :
+-+ +--------+ +-------+ +----+ +-------+ +------+
| | + | Python | + | %20 | + | is | + | %20 | + | love |
+-+ +--------+ +-------+ +----+ +-------+ +------+
Learn More about String Concatenation Below -
https://en.wikipedia.org/wiki/Concatenation
"""
print_msg_box(message)
def isPalindromicPermutation(self,input1,hint=False):
if(hint is True):
self.isPalindromicPermutation_hint()
mapp = {}
for i in range(len(input1)):
key = input1[i]
if(key in mapp.keys()):
mapp[key] += 1
else:
mapp.update({key:1})
flag = 0
for i in mapp.keys():
if(mapp[i] %2 == 1):
flag+=1
return flag<=1
def isPalindromicPermutation_hint(self):
message = """
Palindromic Permutation
------------------------------------
Purpose :To check if the permutation of the characters in a string can
make it palindromic
Method : string manipulation, palindromic behaviour
Time Complexity : Worst Case - O(n), n = length of the string
Hint :
Make a dictionary of characters and their repeatations.
Pseudocode :
--> Take a blank dictionary
--> for i in [0,length of input string]
key = input[i]
if(key in dictionary)
dictionary[key]+=1
else
push {key:1} inside dictionary
--> Check if dictioary[i] %2 == 1
Visualization:
Given String :
"abbca"
Making a table using dictionary :
Step 1 - create a blank dictionary - {}
Step 2 - check if the key exists
yes --> add 1
no --> push {key:1} inside the dictionary
Step 3 - You have the following table
+----------+----------------+
| key | repeatations |
+----------+----------------+
| a | 2 | --> rem = 0, flag = 0
-----------------------------
| b | 2 | --> rem = 0, flag = 0
-----------------------------
| c | 1 | --> rem = 0, flag = 1
-----------------------------
Step 4 - check reminder, set flag = 0, initially
Step 5 - return boolean
Learn More about Python Dictionaries Below -
https://www.w3schools.com/python/python_dictionaries.asp
"""
print_msg_box(message)
def oneEditAwayInsert(self,input1,input2):
index1 = 0
index2 = 0
while((index2 < len(input2)) and (index1 < len(input1))):
if(input1[index1] != input2[index2]):
if(index1 != index2):
return False
index2+=1
else:
index1+=1
index2+=1
return True
def oneEditAwayReplace(self,input1,input2):
flag = False
for i in range(len(input1)):
if(input2[i]!=input1[i]):
if(flag):
return False
flag = True
return True
def oneEditAway(self,input1,input2,hint=False):
if(hint is True):
self.oneEditAway_hint()
if(len(input1)==len(input2)):
return self.oneEditAwayReplace(input1,input2)
elif(len(input1)+1==len(input2)):
return self.oneEditAwayInsert(input1,input2)
elif(len(input1)-1==len(input2)):
return self.oneEditAwayInsert(input2,input1)
return False
def oneEditAway_hint(self):
message = """
Palindromic Permutation
------------------------------------
Purpose : Check if two strings are one edit (or zero) away,where edit
means the following three methods,
- inserting a character
- removing a character
- replacing a character
Method : string manipulation
Time Complexity : Worst Case - O(n), n = length of the greater string
Hint :
Divide the problem in three cases of insert, remove and replace
and solve the problem.
Pseudocode :
For checking "replace" :
--> flag = False
--> for i in range(len(input1)):
if(input2[i]!=input1[i]):
if(flag):
return False
flag = True
For checking "insert" & "remove" :
--> index1 = 0
--> index2 = 0
--> while((index2 < len(input2)) and (index1 < len(input1))):
if(input1[index1] != input2[index2]):
if(index1 != index2):
return False
index2+=1
else:
index1+=1
index2+=1
return True
"""
print_msg_box(message)
def compressedString(self,input1,hint=False):
if(hint is True):
self.compressedString_hint()
mapp = {}
output = ""
for i in range(len(input1)):
key = input1[i]
if(key in mapp.keys()):
mapp[key]+=1
else:
mapp.update({key:1})
for key, value in mapp.items():
output = output + key + str(value)
if(len(output) <= len(input1)):
return output
else:
return input1
def compressedString_hint(self):
message = """
Compress The String
------------------------------------
Purpose :To compress the size of string by making a summary of the
repeatation of the characters
Method : string manipulation, python dictionary
Time Complexity : Worst Case - O(n), n = length of the string
Hint :
Make a dictionary of characters and their repeatations. Finaally forge a
new string and return it
Pseudocode :
--> Take a blank dictionary
--> Take a blank string output
--> for i in [0,length of input string]
key = input[i]
if(key in dictionary)
dictionary[key]+=1
else
push {key:1} inside dictionary
--> prepare the output string
Visualization:
Given String :
"aabbcccdddeeef"
Making a table using dictionary :
Step 1 - create a blank dictionary - {}
Step 2 - check if the key exists
yes --> add 1
no --> push {key:1} inside the dictionary
Step 3 - You have the following table
+----------+----------------+
| key | repeatations |
+----------+----------------+
| a | 2 |
-----------------------------
| b | 2 |
-----------------------------
| c | 3 |
-----------------------------
| d | 3 |
-----------------------------
| e | 3 |
-----------------------------
| f | 1 |
-----------------------------
Step 4 - prepare the output string as "a2b2c3d3e3f1"
Learn More about Python Dictionaries Below -
https://www.w3schools.com/python/python_dictionaries.asp
"""
print_msg_box(message)
def rotateImage(self,img_arr,n,hint=False):
if(hint is True):
self.rotateImage_hint()
for layer in range(int(n/2)):
first = layer
last = n-1-layer
for i in range(first,last):
offset = i - first
top = img_arr[first][i]
img_arr[first][i] = img_arr[last - offset][first]
img_arr[last - offset][first] = img_arr[last][last - offset]
img_arr[last][last - offset] = img_arr[i][last]
img_arr[i][last] = top
def rotateImage_hint(self):
message = """
Rotate The Image
------------------------------------
Purpose :To rotate a N x N 2D array representing an image without
using any external space
Method : 2D array, time-space complexity
Time Complexity : Worst Case - O(n^2), n = number of rows in a matrix
Space Complexity : O(1)
Hint :
Try implementing rotation in layers
Pseudocode :
for layer in range(int(n/2)):
first = layer
last = n-1-layer
for i in range(first,last):
offset = i - first
top = img_arr[first][i]
img_arr[first][i] = img_arr[last - offset][first]
img_arr[last - offset][first] = img_arr[last][last - offset]
img_arr[last][last - offset] = img_arr[i][last]
img_arr[i][last] = top
Visualization:
Given image :
1 2 3 1 4 1
4 8 9 ---> 8 8 2
1 8 9 9 9 3
Find the pivot (if any) :
1 2 3
+---+
4 | 8 | 9 ---> 8 is the constant position
+---+
1 8 9
Rotate Layer Wise using temp variable :
+---+
1 | 2 | 3
+---+
+---+ +---+
| 4 | 8 | 9 | -----> rotate the highlighted layer in 90 degree
+---+ +---+
+---+
1 | 8 | 9
+---+
Rotate Next layer :
+---+ +---+
| 1 | 4 | 3 |
+---+ +---+
8 8 2 -----> rotate the highlighted layer in 90 degree
+---+ +---+
| 1 | 9 | 9 |
+---+ +---+
Finally you have the desired rotated array.
"""
print_msg_box(message)
def setZeros(self,matrix,row,column):
row_arr = [False] * row
col_arr = [False] * column
for i in range(row):
for j in range(column):
if(matrix[i][j] == 0):
row_arr[i] = True
col_arr[j] = True
for i in range(row):
if(row_arr[i]):
for j in range(column):
matrix[i][j] = 0
for i in range(column):
if(row_arr[i]):
for j in range(row):
matrix[j][i] = 0
|
1641828
|
from waitress import serve
from flask import Flask, redirect, url_for, render_template, request, jsonify
from model import Generator
import os
model = Generator()
model_name = 'model-5-epochs-256-neurons.h5'
model_file = os.path.join(os.getcwd(),model_name)
model.load_weights(model_name)
app = Flask(__name__)
@app.route("/", methods=["POST", "GET"])
def index():
if request.method == "POST":
seed = request.form["nm"]
generatedText = model.predict(start_seed=seed, gen_size=1000, temp=1.0)
if len(seed) != 0:
return redirect(f"/generate/{generatedText}")
else:
return redirect("/generate/please enter a valid seed")
else:
return render_template("landingpage.html")
@app.route('/generate/<lyrics>')
def generate(lyrics):
lyricsSplit = lyrics.split('\n')
return render_template('result.html',lyrics=lyricsSplit)
@app.route('/api/generate/<seed>')
def api(seed):
generatedText = model.predict(start_seed=seed, gen_size=1000, temp=1.0)
return jsonify({'lyrics' : generatedText})
port = 8080
if __name__ == '__main__':
serve(app=app, port=port)
|
1641858
|
import time
import seq2science
def log_welcome(logger, workflow):
ascii_logo = (
f"""\
____ ____ __
/ ___)( __) / \
\___ \ ) _) ( O )
(____/(____) \__\)
____
(___ \
/ __/
(____)
____ ___ __ ____ __ _ ___ ____
/ ___) / __)( )( __)( ( \ / __)( __)
\___ \( (__ )( ) _) / /( (__ ) _)
(____/ \___)(__)(____)\_)__) \___)(____)
workflow: {workflow}
version: {seq2science.__version__}
docs: https://vanheeringen-lab.github.io/seq2science
"""
)
logger.info(ascii_logo)
# give people a second to appreciate this beautiful ascii art
time.sleep(1)
|
1641876
|
from typing import Tuple
from gym import spaces
import numpy as np
from omegaconf import DictConfig
import torch
import torch.nn as nn
from rlcycle.common.abstract.action_selector import ActionSelector
from rlcycle.common.utils.common_utils import np2tensor
class DQNActionSelector(ActionSelector):
"""DQN arg-max action selector"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
if state.ndim == 1:
state = state.reshape(1, -1)
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
qvals = policy.forward(state)
qvals = qvals.cpu().detach().numpy()
action = np.argmax(qvals)
return action
class QRActionSelector(ActionSelector):
"""Action selector for Quantile Q-value representations"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
if state.ndim == 1:
state = state.reshape(1, -1)
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
qvals = policy.forward(state).mean(dim=2)
qvals = qvals.cpu().numpy()
action = np.argmax(qvals)
return action
class CategoricalActionSelector(ActionSelector):
"""Action selector for categorical Q-value presentations"""
def __init__(self, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
def __call__(self, policy: nn.Module, state: np.ndarray) -> Tuple[np.ndarray, ...]:
state = np2tensor(state, self.use_cuda).unsqueeze(0)
with torch.no_grad():
dist = policy.forward(state)
weights = dist * policy.support
qvals = weights.sum(dim=2).cpu().numpy()
action = np.argmax(qvals)
return action
class EpsGreedy(ActionSelector):
"""ActionSelector wrapper for epsilon greedy policy
Attributes:
action_selector (ActionSelector): action selector to wrap
action_space (???): gym environment action space
eps (float): epsilon value for epsilon greedy
eps_final (float): minimum epsilon value to reach
eps_decay (float): decay rate for epsilon
"""
def __init__(
self,
action_selector: ActionSelector,
action_space: spaces.Discrete,
hyper_params: DictConfig,
):
ActionSelector.__init__(self, action_selector.use_cuda)
self.action_selector = action_selector
self.action_space = action_space
self.eps = hyper_params.eps
self.eps_final = hyper_params.eps_final
self.eps_decay = (
self.eps - self.eps_final
) / hyper_params.max_exploration_frame
def __call__(self, policy: nn.Module, state: np.ndarray) -> np.ndarray:
"""Return exploration action if eps > random.uniform(0,1)"""
if self.eps > np.random.random() and self.exploration:
return self.action_space.sample()
return self.action_selector(policy, state)
def decay_epsilon(self):
"""Decay epsilon as learning progresses"""
eps = self.eps - self.eps_decay
self.eps = max(eps, self.eps_final)
|
1641908
|
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
import tornado.ioloop
from tornado.options import define, options, parse_command_line
from karura.server.server import application
define("port", default=8888, help="run on the given port", type=int)
define("debug", default=False, help="run in debug mode")
def main():
parse_command_line()
app = application(options.debug)
port = int(os.environ.get("PORT", options.port))
print("karura server running on {}".format(port))
app.listen(port)
tornado.ioloop.IOLoop.current().start()
if __name__ == "__main__":
main()
|
1641940
|
import os
here = os.path.dirname(__file__)
ini_path = os.environ.get('INI_FILE')
if not ini_path:
ini_path = os.path.join(here, '../../production.ini')
from paste.deploy import loadapp
application = loadapp('config:%s' % ini_path, relative_to='.')
import logging
import logging.config
logging.config.fileConfig(ini_path)
|
1642008
|
import requests
import os
import gzip
import math
import json
from typing import List, Dict
mapping = {
"earn": {
"metric": "iearn",
"labels": ["vault", "param", "address", "version"]
},
"ib": {
"metric": "ironbank",
"labels": ["vault", "param", "address", "version"]
},
"v1": {
"metric": "yearn",
"labels": ["vault", "param", "address", "version"]
},
"v2": {
"metric": "yearn_vault",
"labels": ["vault", "param", "address", "version", "experimental"]
},
"v2_strategy": {
"metric": "yearn_strategy",
"labels": ["vault", "strategy", "param", "address", "version", "experimental"]
},
"special": {
"metric": "yearn_vault",
"labels": ["vault", "param", "address", "version", "experimental"]
}
}
simple_products = ["v1", "earn", "ib", "special"]
def export(timestamp, data):
metrics_to_export = []
for product in simple_products:
metric = mapping[product]["metric"]
for vault, params in data[product].items():
for key, value in params.items():
if key in ["address", "version", "experimental"] or value is None:
continue
has_experiments = product == "special"
label_values = _get_label_values(params, [vault, key], has_experiments)
label_names = mapping[product]["labels"]
item = _build_item(metric, label_names, label_values, value, timestamp)
metrics_to_export.append(item)
for vault, params in data["v2"].items():
metric = mapping["v2"]["metric"]
for key, value in params.items():
if key in ["address", "version", "experimental", "strategies"] or value is None:
continue
label_values = _get_label_values(params, [vault, key], True)
label_names = mapping["v2"]["labels"]
item = _build_item(metric, label_names, label_values, value, timestamp)
metrics_to_export.append(item)
# strategies can have nested structs
metric = mapping["v2_strategy"]["metric"]
for strategy, strategy_params in data["v2"][vault]["strategies"].items():
flat = flatten_dict(strategy_params)
for key, value in flat.items():
if key in ["address", "version", "experimental"] or value is None:
continue
label_values = _get_label_values(params, [vault, strategy, key], True)
label_names = mapping["v2_strategy"]["labels"]
item = _build_item(metric, label_names, label_values, value or 0, timestamp)
metrics_to_export.append(item)
# post all metrics for this timestamp at once
_post(metrics_to_export)
def export_duration(duration_seconds, pool_size, direction, timestamp_seconds):
item = _build_item(
"export_duration",
[ "pool_size", "direction" ],
[ pool_size, direction ],
duration_seconds,
timestamp_seconds
)
_post([item])
def _build_item(metric, label_names, label_values, value, timestamp):
ts_millis = math.floor(timestamp) * 1000
meta = dict(zip(map(_sanitize, label_names), map(str, label_values)))
meta["__name__"] = metric
return {"metric": meta, "values": [_sanitize(value)], "timestamps": [ts_millis]}
def _to_jsonl_gz(metrics_to_export: List[Dict]):
lines = []
for item in metrics_to_export:
lines.append(json.dumps(item))
jsonlines = "\n".join(lines)
return gzip.compress(bytes(jsonlines, "utf-8"))
def _post(metrics_to_export: List[Dict]):
data = _to_jsonl_gz(metrics_to_export)
base_url = os.environ.get('VM_URL', 'http://victoria-metrics:8428')
url = f'{base_url}/api/v1/import'
headers = {
'Connection': 'close',
'Content-Encoding': 'gzip'
}
with requests.Session() as session:
session.post(
url = url,
data = data,
headers = headers
)
def _sanitize(value):
if isinstance(value, bool):
return int(value)
elif isinstance(value, str):
return value.replace('"', '') # e.g. '"yvrenBTC" 0.3.5 0x340832'
return value
def flatten_dict(d):
def items():
for key, value in d.items():
if isinstance(value, dict):
for subkey, subvalue in flatten_dict(value).items():
yield key + "." + subkey, subvalue
else:
yield key, value
return dict(items())
def _get_label_values(params, inital_labels, experimental = False):
address = _get_string_label(params, "address")
version = _get_string_label(params, "version")
label_values = inital_labels + [address, version]
if experimental:
experimental_label = _get_bool_label(params, "experimental")
label_values.append(experimental_label)
return label_values
def _get_bool_label(a_dict, key):
return "true" if key in a_dict and a_dict[key] == True else "false"
def _get_string_label(a_dict, key):
return str(a_dict[key]) if key in a_dict else "n/a"
|
1642010
|
from typing import Dict
def normalize_dict(a: Dict)->Dict:
total = sum([
abs(v) for v in a.values()
])
return {
k:abs(v)/total for k, v in a.items()
}
|
1642040
|
import csv
import json
import logging
import requests
import email.utils as eut
from bs4 import BeautifulSoup
from google.cloud import storage
from datetime import datetime, timedelta, timezone, date
JST = timezone(timedelta(hours=+9), 'JST')
def fetch_csv_as_string(url):
res = requests.get(url)
last_modified = res.headers['Last-Modified']
jst_datetime = eut.parsedate_to_datetime(last_modified).astimezone(JST)
# NOTE: A char code of Fukushima pref's CSV is Shift-JIS
res.encoding = 'shift_jis'
return res.text, jst_datetime
def csv_string_to_list(csvStr):
cr = csv.reader(csvStr.splitlines(), delimiter=',')
csv_list = list(cr)
return csv_list
def get_datetime(datetime_str):
if datetime_str == '':
return None
if '-' in datetime_str:
return datetime.strptime(datetime_str, '%Y-%m-%d')
else:
return datetime.strptime(datetime_str, '%Y/%m/%d')
def generate_datetime_iso(datetime):
return datetime.strftime('%Y-%m-%dT%H:%M:%S')
def generate_datetime_readable(datetime):
return datetime.strftime('%Y/%m/%d %H:%M')
def generate_date_readable(date):
return date.strftime('%Y/%m/%d')
def generate_short_datetime(datetime):
return '{}/{}'.format(datetime.month, datetime.day)
def list_dir(url, ext=''):
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
return [url + '/' + node.get('href') for node in soup.find_all('a') if node.get('href').endswith(ext)]
def upload_json(bucket_name, destination_blob_name, data):
storage_client = storage.Client()
bucket = storage_client.bucket(bucket_name)
blob = bucket.blob(destination_blob_name)
blob.cache_control = 'max-age=10'
blob.upload_from_string(data, content_type='application/json')
logging.info(
"File {} uploaded to {}.".format(
data, destination_blob_name
)
)
class News():
announced_at = None
body = None
url = None
note = None
def __init__(self, announced_at, body, url, note):
self.announced_at = get_datetime(announced_at)
self.body = body
self.url = url
self.note = note
def generate_news_data(news_list):
news_data = []
news_rows = [News(*row) for row in news_list[1:] if row[0]]
news_rows = sorted(news_rows, key=lambda t: t.announced_at, reverse=True)
for news in news_rows:
if news.announced_at == None:
continue
announce_datetime_str = generate_date_readable(news.announced_at)
news_data.append({
'date': announce_datetime_str,
'text': news.body,
'url': news.url
})
return news_data
def generate_news_json():
news_dir_list = list_dir('http://www.pref.fukushima.lg.jp/w4/covid19/topics/')
news_file_latest_uri = news_dir_list[-1]
logging.info(news_file_latest_uri)
news_csv_string, news_csv_datetime = fetch_csv_as_string(news_file_latest_uri)
logging.info(news_csv_datetime)
news_list = csv_string_to_list(news_csv_string)
return generate_news_data(news_list)
def main(request):
news_data = {
'news_items': generate_news_json()
}
upload_json('fukushima-covid19', 'news.json', json.dumps(news_data, ensure_ascii=False))
|
1642082
|
import subprocess
import time
# Use snek environment on RHEL7
# snake-RHEL6 on RHEL6
file_name = 'test_results/results.txt'
node_list = ['drp-tst-acc0%i' % x for x in [1,2,3,4]]
nodes = ','.join(node_list)
sub_call = '`which mpirun` -q -map-by node --oversubscribe -n %i -H '+ nodes + ' python rwc_mpi.py | tee -a ' + file_name
core_list = [8]
core_list = core_list[::-1]
for i in range(1):
for core in core_list:
tot_cores = core*len(node_list)
out_call = sub_call % tot_cores
out_print = 'Calling %i cores' % tot_cores
print(out_call)
subprocess.call('echo %s | tee -a %s' % (out_print, file_name), shell=True)
subprocess.call(out_call, shell=True)
|
1642180
|
import torch
import torch.nn as nn
from cuda import USE_CUDA
##################### LSTM 优化器的模型 ##########################
class LSTM_optimizer_Model(torch.nn.Module):
"""LSTM优化器"""
def __init__(self,input_size,output_size, hidden_size, num_stacks, batchsize, preprocess = True ,p = 10 ,output_scale = 1):
super(LSTM_optimizer_Model,self).__init__()
self.preprocess_flag = preprocess
self.p = p
self.input_flag = 2
if preprocess != True:
self.input_flag = 1
self.output_scale = output_scale #论文
self.lstm = torch.nn.LSTM(input_size*self.input_flag, hidden_size, num_stacks)
self.Linear = torch.nn.Linear(hidden_size,output_size) #1-> output_size
self.Layers = num_stacks
self.batchsize = batchsize
self.Hidden_nums = hidden_size
def LogAndSign_Preprocess_Gradient(self,gradients):
"""
Args:
gradients: `Tensor` of gradients with shape `[d_1, ..., d_n]`.
p : `p` > 0 is a parameter controlling how small gradients are disregarded
Returns:
`Tensor` with shape `[d_1, ..., d_n-1, 2 * d_n]`. The first `d_n` elements
along the nth dimension correspond to the `log output` \in [-1,1] and the remaining
`d_n` elements to the `sign output`.
"""
p = self.p
log = torch.log(torch.abs(gradients))
clamp_log = torch.clamp(log/p , min = -1.0,max = 1.0)
clamp_sign = torch.clamp(torch.exp(torch.Tensor(p))*gradients, min = -1.0, max =1.0)
return torch.cat((clamp_log,clamp_sign),dim = -1) #在gradients的最后一维input_dims拼接
def Output_Gradient_Increment_And_Update_LSTM_Hidden_State(self, input_gradients, prev_state):
"""LSTM的核心操作 coordinate-wise LSTM """
Layers,batchsize,Hidden_nums = self.Layers, self.batchsize, self.Hidden_nums
if prev_state is None: #init_state
prev_state = (torch.zeros(Layers,batchsize,Hidden_nums),
torch.zeros(Layers,batchsize,Hidden_nums))
if USE_CUDA :
prev_state = (torch.zeros(Layers,batchsize,Hidden_nums).cuda(),
torch.zeros(Layers,batchsize,Hidden_nums).cuda())
update , next_state = self.lstm(input_gradients, prev_state)
update = self.Linear(update) * self.output_scale #因为LSTM的输出是当前步的Hidden,需要变换到output的相同形状上
return update, next_state
def forward(self,input_gradients, prev_state):
if USE_CUDA:
input_gradients = input_gradients.cuda()
#LSTM的输入为梯度,pytorch要求torch.nn.lstm的输入为(1,batchsize,input_dim)
#原gradient.size()=torch.size[5] ->[1,1,5]
gradients = input_gradients.unsqueeze(0)
if self.preprocess_flag == True:
gradients = self.LogAndSign_Preprocess_Gradient(gradients)
update , next_state = self.Output_Gradient_Increment_And_Update_LSTM_Hidden_State(gradients , prev_state)
# Squeeze to make it a single batch again.[1,1,5]->[5]
update = update.squeeze().squeeze()
return update , next_state
|
1642186
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class BMacHeader(Base):
__slots__ = ()
_SDM_NAME = 'bMacHeader'
_SDM_ATT_MAP = {
'HeaderBDstAddress': 'bMacHeader.header.bDstAddress-1',
'HeaderBSrcAddress': 'bMacHeader.header.bSrcAddress-2',
}
def __init__(self, parent, list_op=False):
super(BMacHeader, self).__init__(parent, list_op)
@property
def HeaderBDstAddress(self):
"""
Display Name: B-Destination Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBDstAddress']))
@property
def HeaderBSrcAddress(self):
"""
Display Name: B-Source Address (Ethernet)
Default Value: 00:00:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBSrcAddress']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
1642208
|
data = (
(0.304397, 0.467683),
(0.273844, 0.491539),
(0.245239, 0.509037),
(0.216803, 0.518960),
(0.185120, 0.523613),
(0.148488, 0.525367),
(0.107639, 0.525459),
(0.064614, 0.524310),
(0.021601, 0.522036),
(-0.019468, 0.518775),
(-0.056854, 0.514755),
(-0.089129, 0.510203),
(-0.115583, 0.505188),
(-0.136417, 0.499492),
(-0.152563, 0.492659),
(-0.165276, 0.484231),
(-0.175734, 0.474004),
(-0.184847, 0.462050),
(-0.193270, 0.448475),
(-0.201347, 0.433304),
(-0.209048, 0.416592),
(-0.216071, 0.398588),
(-0.222075, 0.379746),
(-0.226804, 0.360542),
(-0.230054, 0.341295),
(-0.231662, 0.322125),
(-0.231492, 0.303023),
(-0.229367, 0.283941),
(-0.225033, 0.264845),
(-0.218184, 0.245687),
(-0.208458, 0.226427),
(-0.195405, 0.207065),
(-0.178568, 0.187631),
(-0.157745, 0.168146),
(-0.133257, 0.148589),
(-0.105966, 0.128894),
(-0.077029, 0.108959),
(-0.047635, 0.088738),
(-0.018936, 0.068362),
(0.007859, 0.048185),
(0.031514, 0.028675),
(0.051044, 0.010250),
(0.065943, -0.006792),
(0.076184, -0.022311),
(0.082050, -0.036357),
(0.083993, -0.049121),
(0.082490, -0.060855),
(0.077821, -0.071936),
(0.069859, -0.083023),
(0.058085, -0.094974),
(0.041902, -0.108510),
(0.021085, -0.123975),
(-0.003946, -0.141353),
(-0.032251, -0.160485),
(-0.062598, -0.181215),
(-0.093500, -0.203324),
(-0.123231, -0.226434),
(-0.150167, -0.250061),
(-0.173189, -0.273804),
(-0.191805, -0.297451),
(-0.205980, -0.320930),
(-0.215905, -0.344220),
(-0.221856, -0.367249),
(-0.224112, -0.389830),
(-0.222855, -0.411737),
(-0.218125, -0.432736),
(-0.209994, -0.452511),
(-0.198728, -0.470707),
(-0.184671, -0.487145),
(-0.167989, -0.501890),
(-0.148617, -0.515060),
(-0.126374, -0.526640),
(-0.101032, -0.536551),
(-0.072372, -0.544803),
(-0.040280, -0.551372),
(-0.004810, -0.556008),
(0.033683, -0.558262),
(0.074400, -0.557609),
(0.116082, -0.553568),
(0.157027, -0.545920),
(0.195360, -0.534936),
(0.229623, -0.521301),
(0.259239, -0.505759),
(0.284400, -0.488738),
(0.305613, -0.470221),
(0.323319, -0.449979),
(0.337739, -0.427886),
(0.348942, -0.403905),
(0.357007, -0.377815),
(0.362122, -0.349123),
(0.364555, -0.317283),
(0.364520, -0.281907),
(0.362034, -0.242857),
(0.357009, -0.200432),
(0.349602, -0.155664),
(0.340513, -0.110301),
(0.330911, -0.066253),
(0.321973, -0.024964),
(0.314450, 0.012749),
(0.308605, 0.046528),
(0.304401, 0.076363),
(0.301666, 0.102613),
(0.300219, 0.125869),
(0.299976, 0.146643),
(0.300958, 0.165165),
(0.303208, 0.181437),
(0.306760, 0.195416),
(0.311734, 0.207134),
(0.318417, 0.216661),
(0.327236, 0.223983),
(0.338781, 0.228980),
(0.353947, 0.231444),
(0.373989, 0.231054),
(0.400231, 0.227403),
(0.433382, 0.220253),
(0.472913, 0.209924),
(0.517013, 0.197413),
(0.563229, 0.184056),
(0.609234, 0.171026),
(0.653105, 0.159116),
(0.693199, 0.148872),
(0.728080, 0.140824),
(0.756607, 0.135645),
(0.778131, 0.134146),
(0.792678, 0.137067),
(0.800987, 0.144725),
(0.804169, 0.156900),
(0.803078, 0.173193),
(0.797918, 0.193360),
(0.788427, 0.217184),
(0.774366, 0.244173),
(0.755840, 0.273495),
(0.733302, 0.304180),
(0.707469, 0.335370),
(0.679257, 0.366461),
(0.649705, 0.397044),
(0.619906, 0.426725),
(0.590891, 0.455157),
(0.563472, 0.482270),
(0.538264, 0.508245),
(0.515882, 0.533201),
(0.496951, 0.556988),
(0.481862, 0.579267),
(0.470580, 0.599705),
(0.462706, 0.618155),
(0.457580, 0.634798),
(0.454382, 0.650127),
(0.452286, 0.664765),
(0.450622, 0.679333),
(0.448935, 0.694409),
(0.446991, 0.710514),
(0.444665, 0.728027),
(0.441778, 0.747018),
(0.438062, 0.767194),
(0.433254, 0.788058),
(0.427190, 0.809055),
(0.419851, 0.829494),
(0.411359, 0.848547),
(0.401867, 0.865528),
(0.391384, 0.880119),
(0.379547, 0.892322),
(0.365381, 0.902278),
(0.347116, 0.910119),
(0.322206, 0.915915),
(0.287886, 0.919692),
(0.242335, 0.921462),
(0.185813, 0.921212),
(0.120769, 0.918884),
(0.050710, 0.914482),
(-0.020918, 0.908204),
(-0.090941, 0.900374),
(-0.156247, 0.891297),
(-0.214027, 0.881267),
(-0.262558, 0.870601),
(-0.301713, 0.859504),
(-0.332629, 0.847848),
(-0.356883, 0.835084),
(-0.375839, 0.820381),
(-0.390444, 0.802953),
(-0.401357, 0.782403),
(-0.409221, 0.758792),
(-0.414779, 0.732378),
(-0.418720, 0.703407),
(-0.421461, 0.672156),
(-0.423051, 0.639010),
(-0.423258, 0.604467),
(-0.421831, 0.569218),
(-0.418763, 0.534240),
(-0.414400, 0.500694),
(-0.409336, 0.469585),
(-0.404218, 0.441423),
(-0.399597, 0.416156),
(-0.395898, 0.393342),
(-0.393434, 0.372369),
(-0.392476, 0.352696),
(-0.393483, 0.333924),
(-0.397386, 0.315480),
(-0.405509, 0.296328),
(-0.418947, 0.275300),
(-0.437935, 0.251599),
(-0.461866, 0.224885),
(-0.489760, 0.195170),
(-0.520507, 0.162832),
(-0.552756, 0.128577),
(-0.584924, 0.093101),
(-0.615569, 0.056780),
(-0.643731, 0.019832),
(-0.668890, -0.017201),
(-0.690665, -0.053339),
(-0.708666, -0.087566),
(-0.722637, -0.119278),
(-0.732580, -0.148400),
(-0.738646, -0.175252),
(-0.740879, -0.200388),
(-0.739086, -0.224410),
(-0.733030, -0.247742),
(-0.722641, -0.270538),
(-0.707865, -0.292858),
(-0.688408, -0.314824),
(-0.664019, -0.336581),
(-0.635120, -0.358214),
(-0.602952, -0.379772),
(-0.568965, -0.401334),
(-0.534101, -0.423151),
(-0.498549, -0.445809),
(-0.462049, -0.470165),
(-0.424405, -0.497090),
(-0.385813, -0.527251),
(-0.346924, -0.560979),
(-0.308731, -0.598174),
(-0.272281, -0.638260),
(-0.238313, -0.680276),
(-0.207016, -0.723041),
(-0.178029, -0.765299),
(-0.150566, -0.805828),
(-0.123633, -0.843497),
(-0.096365, -0.877210),
(-0.068184, -0.905824),
(-0.038594, -0.928254),
(-0.006931, -0.943797),
(0.027568, -0.952385),
(0.065507, -0.954426),
(0.107206, -0.950322),
(0.152667, -0.940310),
(0.201440, -0.924761),
(0.252477, -0.904364),
(0.304274, -0.879868),
(0.355392, -0.851780),
(0.404775, -0.820405),
(0.451595, -0.786077),
(0.494996, -0.749338),
(0.533987, -0.710896),
(0.567599, -0.671377),
(0.595259, -0.631041),
(0.616979, -0.589861),
(0.633231, -0.547853),
(0.644734, -0.505261),
(0.652274, -0.462445),
(0.656581, -0.419673),
(0.658275, -0.377053),
(0.657854, -0.334598),
(0.655729, -0.292359),
(0.652315, -0.250654),
(0.648095, -0.210190),
(0.643589, -0.172033),
(0.639286, -0.137348),
(0.635580, -0.106916),
(0.632635, -0.080842),
(0.630262, -0.058603),
(0.627916, -0.039028),
(0.624672, -0.019547),
(0.619118, 0.004608),
(0.609857, 0.038575),
(0.597351, 0.081816),
)
|
1642212
|
import contextlib
import difflib
import pprint
import pickle
import re
import sys
import logging
import warnings
import weakref
import inspect
from copy import deepcopy
from test import support
import unittest
from unittest.test.support import TestEquality, TestHashing, LoggingResult, LegacyLoggingResult, ResultWithNoStartTestRunStopTestRun
from test.support import captured_stderr
log_foo = logging.getLogger('foo')
log_foobar = logging.getLogger('foo.bar')
log_quux = logging.getLogger('quux')
class Test(object):
"""Keep these TestCase classes out of the main namespace"""
class Foo(unittest.TestCase):
def runTest(self):
pass
def test1(self):
pass
class Bar(Foo):
def test2(self):
pass
class LoggingTestCase(unittest.TestCase):
"""A test case which logs its calls."""
def __init__(self, events):
super(Test.LoggingTestCase, self).__init__('test')
self.events = events
def setUp(self):
self.events.append('setUp')
def test(self):
self.events.append('test')
def tearDown(self):
self.events.append('tearDown')
class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
eq_pairs = [(Test.Foo('test1'), Test.Foo('test1'))]
ne_pairs = [(Test.Foo('test1'), Test.Foo('runTest')), (Test.Foo('test1'
), Test.Bar('test1')), (Test.Foo('test1'), Test.Bar('test2'))]
def test_init__no_test_name(self):
class Test(unittest.TestCase):
def runTest(self):
raise MyException()
def test(self):
pass
self.assertEqual(Test().id()[-13:], '.Test.runTest')
test = unittest.TestCase()
test.assertEqual(3, 3)
with test.assertRaises(test.failureException):
test.assertEqual(3, 2)
with self.assertRaises(AttributeError):
test.run()
def test_init__test_name__valid(self):
class Test(unittest.TestCase):
def runTest(self):
raise MyException()
def test(self):
pass
self.assertEqual(Test('test').id()[-10:], '.Test.test')
def test_init__test_name__invalid(self):
class Test(unittest.TestCase):
def runTest(self):
raise MyException()
def test(self):
pass
try:
Test('testfoo')
except ValueError:
pass
else:
self.fail('Failed to raise ValueError')
def test_countTestCases(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertEqual(Foo('test').countTestCases(), 1)
def test_defaultTestResult(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
result = Foo().defaultTestResult()
self.assertEqual(type(result), unittest.TestResult)
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'addError', 'stopTest']
self.assertEqual(events, expected)
def test_run_call_order__error_in_setUp_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def setUp(self):
super(Foo, self).setUp()
raise RuntimeError('raised by Foo.setUp')
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'addError',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_run_call_order__error_in_test_default_result(self):
events = []
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
Foo(events).run()
self.assertEqual(events, expected)
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addFailure',
'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_run_call_order__failure_in_test_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def test(self):
super(Foo, self).test()
self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun']
events = []
Foo(events).run()
self.assertEqual(events, expected)
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
class Foo(Test.LoggingTestCase):
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
Foo(events).run(result)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
self.assertEqual(events, expected)
def test_run_call_order__error_in_tearDown_default_result(self):
class Foo(Test.LoggingTestCase):
def defaultTestResult(self):
return LoggingResult(self.events)
def tearDown(self):
super(Foo, self).tearDown()
raise RuntimeError('raised by Foo.tearDown')
events = []
Foo(events).run()
expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def test_run_call_order_default_result(self):
class Foo(unittest.TestCase):
def defaultTestResult(self):
return ResultWithNoStartTestRunStopTestRun()
def test(self):
pass
Foo('test').run()
def _check_call_order__subtests(self, result, events, expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2, 3]:
with self.subTest(i=i):
if i == 1:
self.fail('failure')
for j in [2, 3]:
with self.subTest(j=j):
if i * j == 6:
raise RuntimeError('raised by Foo.test')
1 / 0
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'addSubTestSuccess', 'addSubTestFailure',
'addSubTestFailure', 'addSubTestSuccess', 'addError', 'stopTest']
self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self):
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addFailure',
'stopTest']
self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events,
expected_events):
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
for i in [1, 2]:
with self.subTest(i=i):
for j in [2, 3]:
with self.subTest(j=j):
pass
Foo(events).run(result)
self.assertEqual(events, expected_events)
def test_run_call_order__subtests_success(self):
events = []
result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown'] + 6 * [
'addSubTestSuccess'] + ['addSuccess', 'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self):
events = []
result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addSuccess',
'stopTest']
self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_failfast(self):
events = []
result = LoggingResult(events)
result.failfast = True
class Foo(Test.LoggingTestCase):
def test(self):
super(Foo, self).test()
with self.subTest(i=1):
self.fail('failure')
with self.subTest(i=2):
self.fail('failure')
self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addSubTestFailure', 'stopTest']
Foo(events).run(result)
self.assertEqual(events, expected)
def test_subtests_failfast(self):
events = []
class Foo(unittest.TestCase):
def test_a(self):
with self.subTest():
events.append('a1')
events.append('a2')
def test_b(self):
with self.subTest():
events.append('b1')
with self.subTest():
self.fail('failure')
events.append('b2')
def test_c(self):
events.append('c')
result = unittest.TestResult()
result.failfast = True
suite = unittest.makeSuite(Foo)
suite.run(result)
expected = ['a1', 'a2', 'b1']
self.assertEqual(events, expected)
def test_failureException__default(self):
class Foo(unittest.TestCase):
def test(self):
pass
self.assertIs(Foo('test').failureException, AssertionError)
def test_failureException__subclassing__explicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
raise RuntimeError()
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
def test_failureException__subclassing__implicit_raise(self):
events = []
result = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
self.fail('foo')
failureException = RuntimeError
self.assertIs(Foo('test').failureException, RuntimeError)
Foo('test').run(result)
expected = ['startTest', 'addFailure', 'stopTest']
self.assertEqual(events, expected)
def test_setUp(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
Foo().setUp()
def test_tearDown(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
Foo().tearDown()
def test_id(self):
class Foo(unittest.TestCase):
def runTest(self):
pass
self.assertIsInstance(Foo().id(), str)
def test_run__uses_defaultTestResult(self):
events = []
defaultResult = LoggingResult(events)
class Foo(unittest.TestCase):
def test(self):
events.append('test')
def defaultTestResult(self):
return defaultResult
result = Foo('test').run()
self.assertIs(result, defaultResult)
expected = ['startTestRun', 'startTest', 'test', 'addSuccess',
'stopTest', 'stopTestRun']
self.assertEqual(events, expected)
def test_run__returns_given_result(self):
class Foo(unittest.TestCase):
def test(self):
pass
result = unittest.TestResult()
retval = Foo('test').run(result)
self.assertIs(retval, result)
def test_call__invoking_an_instance_delegates_to_run(self):
resultIn = unittest.TestResult()
resultOut = unittest.TestResult()
class Foo(unittest.TestCase):
def test(self):
pass
def run(self, result):
self.assertIs(result, resultIn)
return resultOut
retval = Foo('test')(resultIn)
self.assertIs(retval, resultOut)
def testShortDescriptionWithoutDocstring(self):
self.assertIsNone(self.shortDescription())
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithOneLineDocstring(self):
"""Tests shortDescription() for a method with a docstring."""
self.assertEqual(self.shortDescription(),
'Tests shortDescription() for a method with a docstring.')
@unittest.skipIf(sys.flags.optimize >= 2,
'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithMultiLineDocstring(self):
"""Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.
"""
self.assertEqual(self.shortDescription(),
'Tests shortDescription() for a method with a longer docstring.')
def testAddTypeEqualityFunc(self):
class SadSnake(object):
"""Dummy class for test_addTypeEqualityFunc."""
s1, s2 = SadSnake(), SadSnake()
self.assertFalse(s1 == s2)
def AllSnakesCreatedEqual(a, b, msg=None):
return type(a) == type(b) == SadSnake
self.addTypeEqualityFunc(SadSnake, AllSnakesCreatedEqual)
self.assertEqual(s1, s2)
def testAssertIs(self):
thing = object()
self.assertIs(thing, thing)
self.assertRaises(self.failureException, self.assertIs, thing, object()
)
def testAssertIsNot(self):
thing = object()
self.assertIsNot(thing, object())
self.assertRaises(self.failureException, self.assertIsNot, thing, thing
)
def testAssertIsInstance(self):
thing = []
self.assertIsInstance(thing, list)
self.assertRaises(self.failureException, self.assertIsInstance,
thing, dict)
def testAssertNotIsInstance(self):
thing = []
self.assertNotIsInstance(thing, dict)
self.assertRaises(self.failureException, self.assertNotIsInstance,
thing, list)
def testAssertIn(self):
animals = {'monkey': 'banana', 'cow': 'grass', 'seal': 'fish'}
self.assertIn('a', 'abc')
self.assertIn(2, [1, 2, 3])
self.assertIn('monkey', animals)
self.assertNotIn('d', 'abc')
self.assertNotIn(0, [1, 2, 3])
self.assertNotIn('otter', animals)
self.assertRaises(self.failureException, self.assertIn, 'x', 'abc')
self.assertRaises(self.failureException, self.assertIn, 4, [1, 2, 3])
self.assertRaises(self.failureException, self.assertIn, 'elephant',
animals)
self.assertRaises(self.failureException, self.assertNotIn, 'c', 'abc')
self.assertRaises(self.failureException, self.assertNotIn, 1, [1, 2, 3]
)
self.assertRaises(self.failureException, self.assertNotIn, 'cow',
animals)
def testAssertDictContainsSubset(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertDictContainsSubset({}, {})
self.assertDictContainsSubset({}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1})
self.assertDictContainsSubset({'a': 1}, {'a': 1, 'b': 2})
self.assertDictContainsSubset({'a': 1, 'b': 2}, {'a': 1, 'b': 2})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({(1): 'one'}, {})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 2}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'a': 1, 'c': 1}, {'a': 1})
one = ''.join(chr(i) for i in range(255))
with self.assertRaises(self.failureException):
self.assertDictContainsSubset({'foo': one}, {'foo': '�'})
def testAssertEqual(self):
equal_pairs = [((), ()), ({}, {}), ([], []), (set(), set()), (
frozenset(), frozenset())]
for a, b in equal_pairs:
try:
self.assertEqual(a, b)
except self.failureException:
self.fail('assertEqual(%r, %r) failed' % (a, b))
try:
self.assertEqual(a, b, msg='foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with msg= failed' % (a, b))
try:
self.assertEqual(a, b, 'foo')
except self.failureException:
self.fail('assertEqual(%r, %r) with third parameter failed' %
(a, b))
unequal_pairs = [((), []), ({}, set()), (set([4, 1]), frozenset([4,
2])), (frozenset([4, 5]), set([2, 3])), (set([3, 4]), set([5, 4]))]
for a, b in unequal_pairs:
self.assertRaises(self.failureException, self.assertEqual, a, b)
self.assertRaises(self.failureException, self.assertEqual, a, b,
'foo')
self.assertRaises(self.failureException, self.assertEqual, a, b,
msg='foo')
def testEquality(self):
self.assertListEqual([], [])
self.assertTupleEqual((), ())
self.assertSequenceEqual([], ())
a = [0, 'a', []]
b = []
self.assertRaises(unittest.TestCase.failureException, self.
assertListEqual, a, b)
self.assertRaises(unittest.TestCase.failureException, self.
assertListEqual, tuple(a), tuple(b))
self.assertRaises(unittest.TestCase.failureException, self.
assertSequenceEqual, a, tuple(b))
b.extend(a)
self.assertListEqual(a, b)
self.assertTupleEqual(tuple(a), tuple(b))
self.assertSequenceEqual(a, tuple(b))
self.assertSequenceEqual(tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, a,
tuple(b))
self.assertRaises(self.failureException, self.assertTupleEqual,
tuple(a), b)
self.assertRaises(self.failureException, self.assertListEqual, None, b)
self.assertRaises(self.failureException, self.assertTupleEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertSequenceEqual,
None, tuple(b))
self.assertRaises(self.failureException, self.assertListEqual, 1, 1)
self.assertRaises(self.failureException, self.assertTupleEqual, 1, 1)
self.assertRaises(self.failureException, self.assertSequenceEqual, 1, 1
)
self.assertDictEqual({}, {})
c = {'x': 1}
d = {}
self.assertRaises(unittest.TestCase.failureException, self.
assertDictEqual, c, d)
d.update(c)
self.assertDictEqual(c, d)
d['x'] = 0
self.assertRaises(unittest.TestCase.failureException, self.
assertDictEqual, c, d, 'These are unequal')
self.assertRaises(self.failureException, self.assertDictEqual, None, d)
self.assertRaises(self.failureException, self.assertDictEqual, [], d)
self.assertRaises(self.failureException, self.assertDictEqual, 1, 1)
def testAssertSequenceEqualMaxDiff(self):
self.assertEqual(self.maxDiff, 80 * 8)
seq1 = 'a' + 'x' * 80 ** 2
seq2 = 'b' + 'x' * 80 ** 2
diff = '\n'.join(difflib.ndiff(pprint.pformat(seq1).splitlines(),
pprint.pformat(seq2).splitlines()))
omitted = unittest.case.DIFF_OMITTED % (len(diff) + 1,)
self.maxDiff = len(diff) // 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertLess(len(msg), len(diff))
self.assertIn(omitted, msg)
self.maxDiff = len(diff) * 2
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
self.maxDiff = None
try:
self.assertSequenceEqual(seq1, seq2)
except self.failureException as e:
msg = e.args[0]
else:
self.fail('assertSequenceEqual did not fail.')
self.assertGreater(len(msg), len(diff))
self.assertNotIn(omitted, msg)
def testTruncateMessage(self):
self.maxDiff = 1
message = self._truncateMessage('foo', 'bar')
omitted = unittest.case.DIFF_OMITTED % len('bar')
self.assertEqual(message, 'foo' + omitted)
self.maxDiff = None
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
self.maxDiff = 4
message = self._truncateMessage('foo', 'bar')
self.assertEqual(message, 'foobar')
def testAssertDictEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertDictEqual({}, {(1): 0})
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertDictEqual did not fail')
def testAssertMultiLineEqualTruncates(self):
test = unittest.TestCase('assertEqual')
def truncate(msg, diff):
return 'foo'
test._truncateMessage = truncate
try:
test.assertMultiLineEqual('foo', 'bar')
except self.failureException as e:
self.assertEqual(str(e), 'foo')
else:
self.fail('assertMultiLineEqual did not fail')
def testAssertEqual_diffThreshold(self):
self.assertEqual(self._diffThreshold, 2 ** 16)
self.maxDiff = None
old_threshold = self._diffThreshold
self._diffThreshold = 2 ** 5
self.addCleanup(lambda : setattr(self, '_diffThreshold', old_threshold)
)
s = 'x' * 2 ** 4
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s + 'a', s + 'b')
self.assertIn('^', str(cm.exception))
self.assertEqual(s + 'a', s + 'a')
s = 'x' * 2 ** 6
def explodingTruncation(message, diff):
raise SystemError('this should not be raised')
old_truncate = self._truncateMessage
self._truncateMessage = explodingTruncation
self.addCleanup(lambda : setattr(self, '_truncateMessage',
old_truncate))
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
self.assertNotIn('^', str(cm.exception))
self.assertEqual(str(cm.exception), '%r != %r' % (s1, s2))
self.assertEqual(s + 'a', s + 'a')
def testAssertEqual_shorten(self):
old_threshold = self._diffThreshold
self._diffThreshold = 0
self.addCleanup(lambda : setattr(self, '_diffThreshold', old_threshold)
)
s = 'x' * 100
s1, s2 = s + 'a', s + 'b'
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[35 chars]' + 'x' * 61
self.assertEqual(str(cm.exception), "'%sa' != '%sb'" % (c, c))
self.assertEqual(s + 'a', s + 'a')
p = 'y' * 50
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[85 chars]xxxxxxxxxxx'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, p, c, p)
)
p = 'y' * 100
s1, s2 = s + 'a' + p, s + 'b' + p
with self.assertRaises(self.failureException) as cm:
self.assertEqual(s1, s2)
c = 'xxxx[91 chars]xxxxx'
d = 'y' * 40 + '[56 chars]yyyy'
self.assertEqual(str(cm.exception), "'%sa%s' != '%sb%s'" % (c, d, c, d)
)
def testAssertCountEqual(self):
a = object()
self.assertCountEqual([1, 2, 3], [3, 2, 1])
self.assertCountEqual(['foo', 'bar', 'baz'], ['bar', 'baz', 'foo'])
self.assertCountEqual([a, a, 2, 2, 3], (a, 2, 3, a, 2))
self.assertCountEqual([1, '2', 'a', 'a'], ['a', '2', True, 'a'])
self.assertRaises(self.failureException, self.assertCountEqual, [1,
2] + [3] * 100, [1] * 100 + [2, 3])
self.assertRaises(self.failureException, self.assertCountEqual, [1,
'2', 'a', 'a'], ['a', '2', True, 1])
self.assertRaises(self.failureException, self.assertCountEqual, [10
], [10, 11])
self.assertRaises(self.failureException, self.assertCountEqual, [10,
11], [10])
self.assertRaises(self.failureException, self.assertCountEqual, [10,
11, 10], [10, 11])
self.assertCountEqual([[1, 2], [3, 4], 0], [False, [3, 4], [1, 2]])
self.assertCountEqual(iter([1, 2, [], 3, 4]), iter([1, 2, [], 3, 4]))
self.assertRaises(self.failureException, self.assertCountEqual, [],
[divmod, 'x', 1, 5j, 2j, frozenset()])
self.assertCountEqual([{'a': 1}, {'b': 2}], [{'b': 2}, {'a': 1}])
self.assertCountEqual([1, 'x', divmod, []], [divmod, [], 'x', 1])
self.assertRaises(self.failureException, self.assertCountEqual, [],
[divmod, [], 'x', 1, 5j, 2j, set()])
self.assertRaises(self.failureException, self.assertCountEqual, [[1
]], [[2]])
self.assertRaises(self.failureException, self.assertCountEqual, [1,
1, 2], [2, 1])
self.assertRaises(self.failureException, self.assertCountEqual, [1,
1, '2', 'a', 'a'], ['2', '2', True, 'a'])
self.assertRaises(self.failureException, self.assertCountEqual, [1,
{'b': 2}, None, True], [{'b': 2}, True, None])
a = [{2, 4}, {1, 2}]
b = a[::-1]
self.assertCountEqual(a, b)
diffs = set(unittest.util._count_diff_all_purpose('aaabccd', 'abbbcce')
)
expected = {(3, 1, 'a'), (1, 3, 'b'), (1, 0, 'd'), (0, 1, 'e')}
self.assertEqual(diffs, expected)
diffs = unittest.util._count_diff_all_purpose([[]], [])
self.assertEqual(diffs, [(1, 0, [])])
diffs = set(unittest.util._count_diff_hashable('aaabccd', 'abbbcce'))
expected = {(3, 1, 'a'), (1, 3, 'b'), (1, 0, 'd'), (0, 1, 'e')}
self.assertEqual(diffs, expected)
def testAssertSetEqual(self):
set1 = set()
set2 = set()
self.assertSetEqual(set1, set2)
self.assertRaises(self.failureException, self.assertSetEqual, None,
set2)
self.assertRaises(self.failureException, self.assertSetEqual, [], set2)
self.assertRaises(self.failureException, self.assertSetEqual, set1,
None)
self.assertRaises(self.failureException, self.assertSetEqual, set1, [])
set1 = set(['a'])
set2 = set()
self.assertRaises(self.failureException, self.assertSetEqual, set1,
set2)
set1 = set(['a'])
set2 = set(['a'])
self.assertSetEqual(set1, set2)
set1 = set(['a'])
set2 = set(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1,
set2)
set1 = set(['a'])
set2 = frozenset(['a', 'b'])
self.assertRaises(self.failureException, self.assertSetEqual, set1,
set2)
set1 = set(['a', 'b'])
set2 = frozenset(['a', 'b'])
self.assertSetEqual(set1, set2)
set1 = set()
set2 = 'foo'
self.assertRaises(self.failureException, self.assertSetEqual, set1,
set2)
self.assertRaises(self.failureException, self.assertSetEqual, set2,
set1)
set1 = set([(0, 1), (2, 3)])
set2 = set([(4, 5)])
self.assertRaises(self.failureException, self.assertSetEqual, set1,
set2)
def testInequality(self):
self.assertGreater(2, 1)
self.assertGreaterEqual(2, 1)
self.assertGreaterEqual(1, 1)
self.assertLess(1, 2)
self.assertLessEqual(1, 2)
self.assertLessEqual(1, 1)
self.assertRaises(self.failureException, self.assertGreater, 1, 2)
self.assertRaises(self.failureException, self.assertGreater, 1, 1)
self.assertRaises(self.failureException, self.assertGreaterEqual, 1, 2)
self.assertRaises(self.failureException, self.assertLess, 2, 1)
self.assertRaises(self.failureException, self.assertLess, 1, 1)
self.assertRaises(self.failureException, self.assertLessEqual, 2, 1)
self.assertGreater(1.1, 1.0)
self.assertGreaterEqual(1.1, 1.0)
self.assertGreaterEqual(1.0, 1.0)
self.assertLess(1.0, 1.1)
self.assertLessEqual(1.0, 1.1)
self.assertLessEqual(1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.1)
self.assertRaises(self.failureException, self.assertGreater, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertGreaterEqual,
1.0, 1.1)
self.assertRaises(self.failureException, self.assertLess, 1.1, 1.0)
self.assertRaises(self.failureException, self.assertLess, 1.0, 1.0)
self.assertRaises(self.failureException, self.assertLessEqual, 1.1, 1.0
)
self.assertGreater('bug', 'ant')
self.assertGreaterEqual('bug', 'ant')
self.assertGreaterEqual('ant', 'ant')
self.assertLess('ant', 'bug')
self.assertLessEqual('ant', 'bug')
self.assertLessEqual('ant', 'ant')
self.assertRaises(self.failureException, self.assertGreater, 'ant',
'bug')
self.assertRaises(self.failureException, self.assertGreater, 'ant',
'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual,
'ant', 'bug')
self.assertRaises(self.failureException, self.assertLess, 'bug', 'ant')
self.assertRaises(self.failureException, self.assertLess, 'ant', 'ant')
self.assertRaises(self.failureException, self.assertLessEqual,
'bug', 'ant')
self.assertGreater(b'bug', b'ant')
self.assertGreaterEqual(b'bug', b'ant')
self.assertGreaterEqual(b'ant', b'ant')
self.assertLess(b'ant', b'bug')
self.assertLessEqual(b'ant', b'bug')
self.assertLessEqual(b'ant', b'ant')
self.assertRaises(self.failureException, self.assertGreater, b'ant',
b'bug')
self.assertRaises(self.failureException, self.assertGreater, b'ant',
b'ant')
self.assertRaises(self.failureException, self.assertGreaterEqual,
b'ant', b'bug')
self.assertRaises(self.failureException, self.assertLess, b'bug',
b'ant')
self.assertRaises(self.failureException, self.assertLess, b'ant',
b'ant')
self.assertRaises(self.failureException, self.assertLessEqual,
b'bug', b'ant')
def testAssertMultiLineEqual(self):
sample_text = """http://www.python.org/doc/2.3/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...]
"""
revised_sample_text = """http://www.python.org/doc/2.4.1/lib/module-unittest.html
test case
A test case is the smallest unit of testing. [...] You may provide your
own implementation that does not subclass from TestCase, of course.
"""
sample_text_error = """- http://www.python.org/doc/2.3/lib/module-unittest.html
? ^
+ http://www.python.org/doc/2.4.1/lib/module-unittest.html
? ^^^
test case
- A test case is the smallest unit of testing. [...]
+ A test case is the smallest unit of testing. [...] You may provide your
? +++++++++++++++++++++
+ own implementation that does not subclass from TestCase, of course.
"""
self.maxDiff = None
try:
self.assertMultiLineEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testAssertEqualSingleLine(self):
sample_text = 'laden swallows fly slowly'
revised_sample_text = 'unladen swallows fly quickly'
sample_text_error = """- laden swallows fly slowly
? ^^^^
+ unladen swallows fly quickly
? ++ ^^^^^
"""
try:
self.assertEqual(sample_text, revised_sample_text)
except self.failureException as e:
error = str(e).split('\n', 1)[1]
self.assertEqual(sample_text_error, error)
def testEqualityBytesWarning(self):
if sys.flags.bytes_warning:
def bytes_warning():
return self.assertWarnsRegex(BytesWarning,
'Comparison between bytes and string')
else:
def bytes_warning():
return contextlib.ExitStack()
with bytes_warning(), self.assertRaises(self.failureException):
self.assertEqual('a', b'a')
with bytes_warning():
self.assertNotEqual('a', b'a')
a = [0, 'a']
b = [0, b'a']
with bytes_warning(), self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSequenceEqual('a', b'a')
with bytes_warning(), self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with self.assertRaises(self.failureException):
self.assertListEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), b)
a = [0, b'a']
b = [0]
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
a = [0]
b = [0, b'a']
with self.assertRaises(self.failureException):
self.assertListEqual(a, b)
with self.assertRaises(self.failureException):
self.assertTupleEqual(tuple(a), tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(a, tuple(b))
with self.assertRaises(self.failureException):
self.assertSequenceEqual(tuple(a), b)
with self.assertRaises(self.failureException):
self.assertSetEqual(set(a), set(b))
with bytes_warning(), self.assertRaises(self.failureException):
self.assertDictEqual({'a': 0}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({}, {b'a': 0})
with self.assertRaises(self.failureException):
self.assertDictEqual({b'a': 0}, {})
with self.assertRaises(self.failureException):
self.assertCountEqual([b'a', b'a'], [b'a', b'a', b'a'])
with bytes_warning():
self.assertCountEqual(['a', b'a'], ['a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a'], [b'a', b'a'])
with bytes_warning(), self.assertRaises(self.failureException):
self.assertCountEqual(['a', 'a', []], [b'a', b'a', []])
def testAssertIsNone(self):
self.assertIsNone(None)
self.assertRaises(self.failureException, self.assertIsNone, False)
self.assertIsNotNone('DjZoPloGears on Rails')
self.assertRaises(self.failureException, self.assertIsNotNone, None)
def testAssertRegex(self):
self.assertRegex('asdfabasdf', 'ab+')
self.assertRaises(self.failureException, self.assertRegex, 'saaas',
'aaaa')
def testAssertRaisesCallable(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaises(ExceptionMock, Stub)
self.assertRaises((ValueError, ExceptionMock), Stub)
self.assertRaises(ValueError, int, '19', base=8)
with self.assertRaises(self.failureException):
self.assertRaises(ExceptionMock, lambda : 0)
with self.assertWarns(DeprecationWarning):
self.assertRaises(ExceptionMock, None)
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesContext(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
with self.assertRaises(ExceptionMock):
Stub()
with self.assertRaises((ValueError, ExceptionMock)) as cm:
Stub()
self.assertIsInstance(cm.exception, ExceptionMock)
self.assertEqual(cm.exception.args[0], 'We expect')
with self.assertRaises(ValueError):
int('19', base=8)
with self.assertRaises(self.failureException):
with self.assertRaises(ExceptionMock):
pass
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaises(ExceptionMock, msg='foobar'):
pass
with self.assertWarnsRegex(DeprecationWarning, 'foobar'
), self.assertRaises(AssertionError):
with self.assertRaises(ExceptionMock, foobar=42):
pass
with self.assertRaises(ExceptionMock):
self.assertRaises(ValueError, Stub)
def testAssertRaisesNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaises()
with self.assertRaises(TypeError):
self.assertRaises(1)
with self.assertRaises(TypeError):
self.assertRaises(object)
with self.assertRaises(TypeError):
self.assertRaises((ValueError, 1))
with self.assertRaises(TypeError):
self.assertRaises((ValueError, object))
def testAssertRaisesRefcount(self):
def func():
try:
raise ValueError
except ValueError:
raise ValueError
refcount = sys.getrefcount(func)
self.assertRaises(ValueError, func)
self.assertEqual(refcount, sys.getrefcount(func))
def testAssertRaisesRegex(self):
class ExceptionMock(Exception):
pass
def Stub():
raise ExceptionMock('We expect')
self.assertRaisesRegex(ExceptionMock, re.compile('expect$'), Stub)
self.assertRaisesRegex(ExceptionMock, 'expect$', Stub)
with self.assertWarns(DeprecationWarning):
self.assertRaisesRegex(ExceptionMock, 'expect$', None)
def testAssertNotRaisesRegex(self):
self.assertRaisesRegex(self.failureException,
'^Exception not raised by <lambda>$', self.assertRaisesRegex,
Exception, re.compile('x'), lambda : None)
self.assertRaisesRegex(self.failureException,
'^Exception not raised by <lambda>$', self.assertRaisesRegex,
Exception, 'x', lambda : None)
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertRaisesRegex(Exception, 'expect', msg='foobar'):
pass
with self.assertWarnsRegex(DeprecationWarning, 'foobar'
), self.assertRaises(AssertionError):
with self.assertRaisesRegex(Exception, 'expect', foobar=42):
pass
def testAssertRaisesRegexInvalidRegex(self):
class MyExc(Exception):
pass
self.assertRaises(TypeError, self.assertRaisesRegex, MyExc, lambda :
True)
def testAssertWarnsRegexInvalidRegex(self):
class MyWarn(Warning):
pass
self.assertRaises(TypeError, self.assertWarnsRegex, MyWarn, lambda :
True)
def testAssertRaisesRegexMismatch(self):
def Stub():
raise Exception('Unexpected')
self.assertRaisesRegex(self.failureException,
'"\\^Expected\\$" does not match "Unexpected"', self.
assertRaisesRegex, Exception, '^Expected$', Stub)
self.assertRaisesRegex(self.failureException,
'"\\^Expected\\$" does not match "Unexpected"', self.
assertRaisesRegex, Exception, re.compile('^Expected$'), Stub)
def testAssertRaisesExcValue(self):
class ExceptionMock(Exception):
pass
def Stub(foo):
raise ExceptionMock(foo)
v = 'particular value'
ctx = self.assertRaises(ExceptionMock)
with ctx:
Stub(v)
e = ctx.exception
self.assertIsInstance(e, ExceptionMock)
self.assertEqual(e.args[0], v)
def testAssertRaisesRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertRaisesRegex()
with self.assertRaises(TypeError):
self.assertRaisesRegex(ValueError)
with self.assertRaises(TypeError):
self.assertRaisesRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, 1), 'expect')
with self.assertRaises(TypeError):
self.assertRaisesRegex((ValueError, object), 'expect')
def testAssertWarnsCallable(self):
def _runtime_warn():
warnings.warn('foo', RuntimeWarning)
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns(RuntimeWarning, _runtime_warn)
self.assertWarns((DeprecationWarning, RuntimeWarning), _runtime_warn)
self.assertWarns(RuntimeWarning, warnings.warn, 'foo', category=
RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(RuntimeWarning, lambda : 0)
with self.assertWarns(DeprecationWarning):
self.assertWarns(RuntimeWarning, None)
with warnings.catch_warnings():
warnings.simplefilter('default', RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarns(DeprecationWarning, _runtime_warn)
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises(RuntimeWarning):
self.assertWarns(DeprecationWarning, _runtime_warn)
def testAssertWarnsContext(self):
def _runtime_warn():
warnings.warn('foo', RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarns(RuntimeWarning) as cm:
_runtime_warn()
with self.assertWarns((DeprecationWarning, RuntimeWarning)) as cm:
_runtime_warn()
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], 'foo')
self.assertIn('test_case.py', cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
with self.assertWarns(RuntimeWarning):
_runtime_warn()
_runtime_warn()
with self.assertWarns(RuntimeWarning):
warnings.warn('foo', category=RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(RuntimeWarning):
pass
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarns(RuntimeWarning, msg='foobar'):
pass
with self.assertWarnsRegex(DeprecationWarning, 'foobar'
), self.assertRaises(AssertionError):
with self.assertWarns(RuntimeWarning, foobar=42):
pass
with warnings.catch_warnings():
warnings.simplefilter('default', RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises(RuntimeWarning):
with self.assertWarns(DeprecationWarning):
_runtime_warn()
def testAssertWarnsNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarns()
with self.assertRaises(TypeError):
self.assertWarns(1)
with self.assertRaises(TypeError):
self.assertWarns(object)
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, 1))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, object))
with self.assertRaises(TypeError):
self.assertWarns((UserWarning, Exception))
def testAssertWarnsRegexCallable(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
self.assertWarnsRegex(RuntimeWarning, 'o+', _runtime_warn, 'foox')
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, 'o+', lambda : 0)
with self.assertWarns(DeprecationWarning):
self.assertWarnsRegex(RuntimeWarning, 'o+', None)
with warnings.catch_warnings():
warnings.simplefilter('default', RuntimeWarning)
with self.assertRaises(self.failureException):
self.assertWarnsRegex(DeprecationWarning, 'o+',
_runtime_warn, 'foox')
with self.assertRaises(self.failureException):
self.assertWarnsRegex(RuntimeWarning, 'o+', _runtime_warn, 'barz')
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
self.assertWarnsRegex(RuntimeWarning, 'o+', _runtime_warn,
'barz')
def testAssertWarnsRegexContext(self):
def _runtime_warn(msg):
warnings.warn(msg, RuntimeWarning)
_runtime_warn_lineno = inspect.getsourcelines(_runtime_warn)[1]
with self.assertWarnsRegex(RuntimeWarning, 'o+') as cm:
_runtime_warn('foox')
self.assertIsInstance(cm.warning, RuntimeWarning)
self.assertEqual(cm.warning.args[0], 'foox')
self.assertIn('test_case.py', cm.filename)
self.assertEqual(cm.lineno, _runtime_warn_lineno + 1)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, 'o+'):
pass
with self.assertRaisesRegex(self.failureException, 'foobar'):
with self.assertWarnsRegex(RuntimeWarning, 'o+', msg='foobar'):
pass
with self.assertWarnsRegex(DeprecationWarning, 'foobar'
), self.assertRaises(AssertionError):
with self.assertWarnsRegex(RuntimeWarning, 'o+', foobar=42):
pass
with warnings.catch_warnings():
warnings.simplefilter('default', RuntimeWarning)
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(DeprecationWarning, 'o+'):
_runtime_warn('foox')
with self.assertRaises(self.failureException):
with self.assertWarnsRegex(RuntimeWarning, 'o+'):
_runtime_warn('barz')
with warnings.catch_warnings():
warnings.simplefilter('error', RuntimeWarning)
with self.assertRaises((RuntimeWarning, self.failureException)):
with self.assertWarnsRegex(RuntimeWarning, 'o+'):
_runtime_warn('barz')
def testAssertWarnsRegexNoExceptionType(self):
with self.assertRaises(TypeError):
self.assertWarnsRegex()
with self.assertRaises(TypeError):
self.assertWarnsRegex(UserWarning)
with self.assertRaises(TypeError):
self.assertWarnsRegex(1, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex(object, 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, 1), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, object), 'expect')
with self.assertRaises(TypeError):
self.assertWarnsRegex((UserWarning, Exception), 'expect')
@contextlib.contextmanager
def assertNoStderr(self):
with captured_stderr() as buf:
yield
self.assertEqual(buf.getvalue(), '')
def assertLogRecords(self, records, matches):
self.assertEqual(len(records), len(matches))
for rec, match in zip(records, matches):
self.assertIsInstance(rec, logging.LogRecord)
for k, v in match.items():
self.assertEqual(getattr(rec, k), v)
def testAssertLogsDefaults(self):
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info('1')
log_foobar.debug('2')
self.assertEqual(cm.output, ['INFO:foo:1'])
self.assertLogRecords(cm.records, [{'name': 'foo'}])
def testAssertLogsTwoMatchingMessages(self):
with self.assertNoStderr():
with self.assertLogs() as cm:
log_foo.info('1')
log_foobar.debug('2')
log_quux.warning('3')
self.assertEqual(cm.output, ['INFO:foo:1', 'WARNING:quux:3'])
self.assertLogRecords(cm.records, [{'name': 'foo'}, {'name':
'quux'}])
def checkAssertLogsPerLevel(self, level):
with self.assertNoStderr():
with self.assertLogs(level=level) as cm:
log_foo.warning('1')
log_foobar.error('2')
log_quux.critical('3')
self.assertEqual(cm.output, ['ERROR:foo.bar:2', 'CRITICAL:quux:3'])
self.assertLogRecords(cm.records, [{'name': 'foo.bar'}, {'name':
'quux'}])
def testAssertLogsPerLevel(self):
self.checkAssertLogsPerLevel(logging.ERROR)
self.checkAssertLogsPerLevel('ERROR')
def checkAssertLogsPerLogger(self, logger):
with self.assertNoStderr():
with self.assertLogs(level='DEBUG') as outer_cm:
with self.assertLogs(logger, level='DEBUG') as cm:
log_foo.info('1')
log_foobar.debug('2')
log_quux.warning('3')
self.assertEqual(cm.output, ['INFO:foo:1', 'DEBUG:foo.bar:2'])
self.assertLogRecords(cm.records, [{'name': 'foo'}, {'name':
'foo.bar'}])
self.assertEqual(outer_cm.output, ['WARNING:quux:3'])
def testAssertLogsPerLogger(self):
self.checkAssertLogsPerLogger(logging.getLogger('foo'))
self.checkAssertLogsPerLogger('foo')
def testAssertLogsFailureNoLogs(self):
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs():
pass
def testAssertLogsFailureLevelTooHigh(self):
with self.assertNoStderr():
with self.assertRaises(self.failureException):
with self.assertLogs(level='WARNING'):
log_foo.info('1')
def testAssertLogsFailureMismatchingLogger(self):
with self.assertLogs('quux', level='ERROR'):
with self.assertRaises(self.failureException):
with self.assertLogs('foo'):
log_quux.error('1')
def testDeprecatedMethodNames(self):
"""
Test that the deprecated methods raise a DeprecationWarning. See #9424.
"""
old = (self.failIfEqual, (3, 5)), (self.assertNotEquals, (3, 5)), (self
.failUnlessEqual, (3, 3)), (self.assertEquals, (3, 3)), (self.
failUnlessAlmostEqual, (2.0, 2.0)), (self.assertAlmostEquals, (
2.0, 2.0)), (self.failIfAlmostEqual, (3.0, 5.0)), (self.
assertNotAlmostEquals, (3.0, 5.0)), (self.failUnless, (True,)), (
self.assert_, (True,)), (self.failUnlessRaises, (TypeError, lambda
_: 3.14 + 'spam')), (self.failIf, (False,)), (self.
assertDictContainsSubset, (dict(a=1, b=2), dict(a=1, b=2, c=3))), (
self.assertRaisesRegexp, (KeyError, 'foo', lambda : {}['foo'])), (
self.assertRegexpMatches, ('bar', 'bar'))
for meth, args in old:
with self.assertWarns(DeprecationWarning):
meth(*args)
def _testDeprecatedFailMethods(self):
"""Test that the deprecated fail* methods get removed in 3.x"""
if sys.version_info[:2] < (3, 3):
return
deprecated_names = ['failIfEqual', 'failUnlessEqual',
'failUnlessAlmostEqual', 'failIfAlmostEqual', 'failUnless',
'failUnlessRaises', 'failIf', 'assertDictContainsSubset']
for deprecated_name in deprecated_names:
with self.assertRaises(AttributeError):
getattr(self, deprecated_name)
def testDeepcopy(self):
class TestableTest(unittest.TestCase):
def testNothing(self):
pass
test = TestableTest('testNothing')
deepcopy(test)
def testPickle(self):
test = unittest.TestCase('run')
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
pickled_test = pickle.dumps(test, protocol=protocol)
unpickled_test = pickle.loads(pickled_test)
self.assertEqual(test, unpickled_test)
unpickled_test.assertEqual(set(), set())
def testKeyboardInterrupt(self):
def _raise(self=None):
raise KeyboardInterrupt
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
with self.assertRaises(KeyboardInterrupt):
klass('test_something').run()
def testSkippingEverywhere(self):
def _skip(self=None):
raise unittest.SkipTest('some reason')
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _skip
class Test2(unittest.TestCase):
setUp = _skip
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _skip
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_skip)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.skipped), 1)
self.assertEqual(result.testsRun, 1)
def testSystemExit(self):
def _raise(self=None):
raise SystemExit
def nothing(self):
pass
class Test1(unittest.TestCase):
test_something = _raise
class Test2(unittest.TestCase):
setUp = _raise
test_something = nothing
class Test3(unittest.TestCase):
test_something = nothing
tearDown = _raise
class Test4(unittest.TestCase):
def test_something(self):
self.addCleanup(_raise)
for klass in (Test1, Test2, Test3, Test4):
result = unittest.TestResult()
klass('test_something').run(result)
self.assertEqual(len(result.errors), 1)
self.assertEqual(result.testsRun, 1)
@support.cpython_only
def testNoCycles(self):
case = unittest.TestCase()
wr = weakref.ref(case)
with support.disable_gc():
del case
self.assertFalse(wr())
def test_no_exception_leak(self):
class MyException(Exception):
ninstance = 0
def __init__(self):
MyException.ninstance += 1
Exception.__init__(self)
def __del__(self):
MyException.ninstance -= 1
class TestCase(unittest.TestCase):
def test1(self):
raise MyException()
@unittest.expectedFailure
def test2(self):
raise MyException()
for method_name in ('test1', 'test2'):
testcase = TestCase(method_name)
testcase.run()
self.assertEqual(MyException.ninstance, 0)
if __name__ == '__main__':
unittest.main()
|
1642233
|
from .item import Item, ItemCreate, ItemInDB, ItemUpdate
from .msg import Msg
from .token import Token, TokenPayload
from .user import *
from .reponse import Response
from .role import *
from .system.menu import *
from .system.dict import *
from .system.department import *
|
1642245
|
import hashlib
import io
import socket
def get_name(uuid):
m = hashlib.sha1()
m.update(uuid)
n = int(m.hexdigest(), 16)
lines = []
with open("names.txt", "r") as f:
lines = f.readlines()
return lines[n % len(lines)].strip().lower()
def get_thing(secret):
"""Get dweet thing name from secret.
"""
m = hashlib.sha1()
m.update(secret)
return m.hexdigest()
pi_cached = None
def is_raspberry_pi(raise_on_errors=False):
"""Checks if Raspberry PI.
Thanks https://raspberrypi.stackexchange.com/a/74541
"""
global pi_cached
if pi_cached != None:
return pi_cached
try:
with io.open('/proc/cpuinfo', 'r') as cpuinfo:
found = False
for line in cpuinfo:
if line.startswith('Hardware'):
found = True
label, value = line.strip().split(':', 1)
value = value.strip()
if value not in (
'BCM2708',
'BCM2709',
'BCM2835',
'BCM2836'
):
if raise_on_errors:
raise ValueError(
'This system does not appear to be a '
'Raspberry Pi.'
)
else:
pi_cached = False
return False
if not found:
if raise_on_errors:
raise ValueError(
'Unable to determine if this system is a Raspberry Pi.'
)
else:
pi_cached = False
return False
except IOError:
if raise_on_errors:
raise ValueError('Unable to open `/proc/cpuinfo`.')
else:
pi_cached = False
return False
pi_cached = True
return True
def get_ip():
"""Gets the IP address as a string.
ty https://stackoverflow.com/a/1267524
"""
return (([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")] or [[(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close()) for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) + ["no IP found"])[0]
|
1642303
|
import numpy as np
def dice_numpy(targets, outputs, threshold=None, min_area=None,
empty_one: bool = True, eps=1e-6):
if threshold is not None:
# noinspection PyUnresolvedReferences
outputs = (outputs >= threshold).astype(np.uint8)
targets_sum = targets.sum()
outputs_sum = outputs.sum()
if min_area and outputs_sum < min_area:
outputs = np.zeros(outputs.shape, dtype=np.uint8)
outputs_sum = 0
if empty_one and targets_sum == 0 and outputs_sum == 0:
return 1
intersection = (targets * outputs).sum()
union = targets_sum + outputs_sum
dice = 2 * intersection / (union + eps)
return dice
|
1642321
|
import sys
sys.path.append('..')
import torch
import torch.nn as nn
import numpy as np
from dataclasses import dataclass
from trphysx.transformer import PhysformerGPT2
@dataclass
class PhooConfig:
n_ctx:int = 16
n_embd:int = 16
n_layer:int = 2
n_head:int = 2
activation_function:str = "gelu_new"
resid_pdrop:float = 0.0
embd_pdrop:float = 0.0
attn_pdrop:float = 0.0
layer_norm_epsilon:float = 1e-5
initializer_range:float = 0.1
output_hidden_states:bool = False
output_attentions:bool = True
use_cache:bool = False
model_type:str = "Phoo"
if __name__ == "__main__":
# === GPT2 Tests ===
config = PhooConfig()
model = PhysformerGPT2(config)
# === Forward test ===
batch_size = np.random.randint(1, 10)
n_steps = np.random.randint(1, config.n_ctx)
x = torch.randn(batch_size, n_steps, config.n_embd) # Batch, time-steps, embed
output = model(x, use_cache=False, output_attentions=True)
# Test output tensor size is correct
assert output[0].size() == torch.Size((batch_size, n_steps, config.n_ctx))
# Test attention matrix sizes
assert type(output[1]) == tuple
assert len(output[1]) == config.n_layer
for i in range(config.n_layer):
assert output[1][i].size() == torch.Size((batch_size, config.n_head, n_steps, n_steps))
# Make sure attention scores at each step are summing up to 1 (approx.)
assert (torch.abs(torch.mean(1.0 - torch.sum(output[1][i], dim=-1))) < 1e-6).item()
# Test generation
n_steps = np.random.randint(config.n_ctx, 2*config.n_ctx)
inputs_embeds = torch.randn(batch_size, 1, config.n_embd)
output = model.generate(inputs_embeds=inputs_embeds, max_length=n_steps, use_cache=False)
assert output[0].size() == torch.Size((batch_size, n_steps, config.n_embd))
|
1642332
|
import unittest
from slack_sdk.http_retry import RateLimitErrorRetryHandler
from slack_sdk.webhook import WebhookClient
from tests.slack_sdk.webhook.mock_web_api_server import (
cleanup_mock_web_api_server,
setup_mock_web_api_server,
)
from ..my_retry_handler import MyRetryHandler
class TestWebhook_HttpRetries(unittest.TestCase):
def setUp(self):
setup_mock_web_api_server(self)
def tearDown(self):
cleanup_mock_web_api_server(self)
def test_send(self):
retry_handler = MyRetryHandler(max_retry_count=2)
client = WebhookClient(
"http://localhost:8888/remote_disconnected",
retry_handlers=[retry_handler],
)
try:
client.send(text="hello!")
self.fail("An exception is expected")
except Exception as _:
pass
self.assertEqual(2, retry_handler.call_count)
def test_ratelimited(self):
client = WebhookClient("http://localhost:8888/ratelimited")
client.retry_handlers.append(RateLimitErrorRetryHandler())
response = client.send(text="hello!")
# Just running retries; no assertions for call count so far
self.assertEqual(429, response.status_code)
|
1642374
|
import threading
from .peer import Peer
class Shortlist(object):
def __init__(self, k, key):
self.k = k
self.key = key
self.list = list()
self.lock = threading.Lock()
self.completion_value = None
def set_complete(self, value):
with self.lock:
self.completion_value = value
def completion_result(self):
with self.lock:
return self.completion_value
def update(self, nodes):
for node in nodes:
self._update_one(node)
def _update_one(self, node):
if node.id == self.key or self.completion_value:
return
with self.lock:
for i in range(len(self.list)):
if node.id == self.list[i][0][2]:
break
if node.id ^ self.key < self.list[i][0][2] ^ self.key:
self.list.insert(i, (node.astriple(), False))
self.list = self.list[:self.k]
break
else:
if len(self.list) < self.k:
self.list.append((node.astriple(), False))
def mark(self, node):
with self.lock:
for i in range(len(self.list)):
if node.id == self.list[i][0][2]:
self.list[i] = (node.astriple(), True)
def complete(self):
if self.completion_value:
return True
with self.lock:
for node, completed in self.list:
if not completed:
return False
return True
def get_next_iteration(self, alpha):
if self.completion_value:
return []
next_iteration = []
with self.lock:
for node, completed in self.list:
if not completed:
next_iteration.append(Peer(*node))
if len(next_iteration) >= alpha:
break
return next_iteration
def results(self):
with self.lock:
return [Peer(*node) for (node, completed) in self.list]
|
1642425
|
import abc
import typing
from ..compose import Intersection, Pipeline, Union
__all__ = ["Query"]
class Query(abc.ABC):
"""Abstract class for models working on a query."""
def __init__(self, on: typing.Union[str, list]):
self.on = on if isinstance(on, list) else [on]
@property
def type(self) -> str:
return "query"
def __repr__(self) -> str:
repr = f"Query {self.__class__.__name__}"
return repr
@abc.abstractmethod
def __call__(self, q: str, **kwargs) -> str:
return self
def __add__(self, other) -> Pipeline:
"""Pipeline operator."""
if isinstance(other, Pipeline):
return Pipeline(models=[self] + other.models)
return Pipeline(models=[self, other])
def __or__(self, other) -> Union:
"""Union operator."""
raise NotImplementedError("Union not working with a Query model")
def __and__(self, other) -> Intersection:
"""Intersection operator."""
raise NotImplementedError("Intersection not working with a Query model")
|
1642501
|
from __future__ import unicode_literals
from six.moves.urllib.parse import urlparse
from rbtools.api.transport.sync import SyncTransport
class RBClient(object):
"""Entry point for accessing RB resources through the web API.
By default the synchronous transport will be used. To use a
different transport, provide the transport class in the
'transport_cls' parameter.
"""
def __init__(self, url, transport_cls=SyncTransport, *args, **kwargs):
self.url = url
self.domain = urlparse(url)[1]
self._transport = transport_cls(url, *args, **kwargs)
def get_root(self, *args, **kwargs):
return self._transport.get_root(*args, **kwargs)
def get_path(self, path, *args, **kwargs):
return self._transport.get_path(path, *args, **kwargs)
def get_url(self, url, *args, **kwargs):
return self._transport.get_url(url, *args, **kwargs)
def login(self, *args, **kwargs):
return self._transport.login(*args, **kwargs)
def logout(self, *args, **kwargs):
return self._transport.logout(*args, **kwargs)
|
1642545
|
import sys
if sys.version_info < (3, 5):
print("Python 3.5 or greater required")
sys.exit(1)
import logging
import urllib.request
from zipfile import ZipFile
import os
import os.path
import sys
import platform
import struct
TEMP_DIR = 'tmp'
BINARIES_32_URL = "https://github.com/sk89q/Plumeria/releases/download/extras/plumeria_libs_py35_win32_2016-10-02.zip"
BINARIES_64_URL = "https://github.com/sk89q/Plumeria/releases/download/extras/plumeria_libs_py35_win64_2016-10-02.zip"
VIRTUAL_ENV_DIR = ".venv"
def download_file(url, path):
logging.info("Downloading {url} to {path}...".format(url=url, path=path))
with open(path, "wb") as f:
with urllib.request.urlopen(url) as u:
f.write(u.read())
def execute(command):
ret_code = os.system(command)
if ret_code != 0:
logging.error("The command '{}' did not complete successfully (got return code {})".format(command, ret_code))
sys.exit(1)
def main():
is_64bit = struct.calcsize("P") * 8 == 64
logging.info("Is 64-bit? {}".format("yes" if is_64bit else "no"))
if not os.path.exists(TEMP_DIR):
os.makedirs(TEMP_DIR)
bins_url = BINARIES_64_URL if is_64bit else BINARIES_32_URL
bins_path = os.path.join(TEMP_DIR, os.path.basename(bins_url))
if not os.path.exists(bins_path):
download_file(bins_url, bins_path)
else:
logging.info("The file {} already exists so it won't be re-downloaded".format(bins_path))
logging.info("Extracting {path}...".format(path=bins_path))
with ZipFile(bins_path, 'r') as zip:
zip.extractall('.')
logging.info("Making sure that virtualenv is installed globally...")
execute("pip install virtualenv")
if not os.path.isdir(VIRTUAL_ENV_DIR):
logging.info("Creating virtualenv...")
execute("python -m virtualenv {}".format(VIRTUAL_ENV_DIR))
else:
logging.info("The folder {} already exists so no new virtualenv will be created".format(VIRTUAL_ENV_DIR))
logging.info("Installing binary packages...")
execute("{}\\Scripts\\pip install -r packages\\{}.txt".format(VIRTUAL_ENV_DIR, "win64" if is_64bit else "win32"))
logging.info("Installing other packages...")
execute("{}\\Scripts\\pip install -r requirements.txt".format(VIRTUAL_ENV_DIR))
logging.info("")
logging.info("Installation was SUCCESSFUL!")
logging.info("")
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format="[%(levelname)s] %(message)s")
main()
|
1642555
|
def circle_sort_backend(A, L, R):
n = R - L
if n < 2:
return 0
swaps = 0
m = n // 2
for i in range(m):
if A[R - (i + 1)] < A[L + i]:
A[R - (i + 1)], A[L + i] = A[L + i], A[R - (i + 1)]
swaps += 1
if n & 1 and A[L + m] < A[L + m - 1]:
A[L + m - 1], A[L + m] = A[L + m], A[L + m - 1]
swaps += 1
return swaps + circle_sort_backend(A, L, L + m) + circle_sort_backend(A, L + m, R)
def circle_sort(L):
''''Sort L in place, returning the number of swaps'''
swaps = 0
s = 1
while s:
s = circle_sort_backend(L, 0, len(L))
swaps += s
return swaps
test = [7, 6, 5, 9, 8, 4, 3, 1, 2, 0]
print(circle_sort(test))
# 12
print(test)
# [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
|
1642566
|
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# torch.manual_seed(1) # reproducible
# np.random.seed(1)
# Hyper Parameters
BATCH_SIZE = 64
LR_G = 0.0001 # learning rate for generator
LR_D = 0.0001 # learning rate for discriminator
N_IDEAS = 5 # think of this as number of ideas for generating an art work(Generator)
ART_COMPONENTS = 15 # it could be total point G can drew in the canvas
PAINT_POINTS = np.vstack([np.linspace(-1, 1, ART_COMPONENTS) for _ in range(BATCH_SIZE)])
def artist_works(): # painting from the famous artist (real target)
#a = np.random.uniform(1, 2, size=BATCH_SIZE)[:, np.newaxis]
r = 0.02 * np.random.randn(1, ART_COMPONENTS)
paintings = np.sin(PAINT_POINTS * np.pi) + r
paintings = torch.from_numpy(paintings).float()
return paintings
G = nn.Sequential( # Generator
nn.Linear(N_IDEAS, 128), # random ideas (could from normal distribution)
nn.ReLU(),
nn.Linear(128, ART_COMPONENTS), # making a painting from these random ideas
)
D = nn.Sequential( # Discriminator
nn.Linear(ART_COMPONENTS, 128), # receive art work either from the famous artist or a newbie like G
nn.ReLU(),
nn.Linear(128, 1),
nn.Sigmoid(), # tell the probability that the art work is made by artist
)
opt_D = torch.optim.Adam(D.parameters(), lr=LR_D)
opt_G = torch.optim.Adam(G.parameters(), lr=LR_G)
plt.ion() # something about continuous plotting
D_loss_history = []
G_loss_history = []
for step in range(10000):
artist_paintings = artist_works() # real painting from artist
G_ideas = torch.randn(BATCH_SIZE, N_IDEAS) # random ideas
G_paintings = G(G_ideas) # fake painting from G (random ideas)
prob_artist0 = D(artist_paintings) # D try to increase this prob
prob_artist1 = D(G_paintings) # D try to reduce this prob
D_loss = - torch.mean(torch.log(prob_artist0) + torch.log(1. - prob_artist1))
G_loss = torch.mean(torch.log(1. - prob_artist1))
D_loss_history.append(D_loss)
G_loss_history.append(G_loss)
opt_D.zero_grad()
D_loss.backward(retain_graph=True) # reusing computational graph
opt_D.step()
opt_G.zero_grad()
G_loss.backward()
opt_G.step()
if step % 50 == 0: # plotting
plt.cla()
plt.plot(PAINT_POINTS[0], G_paintings.data.numpy()[0], c='#4AD631', lw=3, label='Generated painting',)
plt.plot(PAINT_POINTS[0], np.sin(PAINT_POINTS[0] * np.pi), c='#74BCFF', lw=3, label='upper bound')
plt.text(-1, 0.75, 'D accuracy=%.2f (0.5 for D to converge)' % prob_artist0.data.numpy().mean(), fontdict={'size': 13})
plt.text(-1, 0.5, 'D score= %.2f (-1.38 for G to converge)' % -D_loss.data.numpy(), fontdict={'size': 13})
plt.ylim((-1, 1));plt.legend(loc='upper right', fontsize=10);plt.draw();plt.pause(0.01)
plt.ioff()
plt.show()
|
1642572
|
from typing import List, Optional
from tests.typing.models import Test, ProjectionTest
async def find_many() -> List[Test]:
return await Test.find().to_list()
async def find_many_with_projection() -> List[ProjectionTest]:
return await Test.find().project(projection_model=ProjectionTest).to_list()
async def find_many_generator() -> List[Test]:
docs: List[Test] = []
async for doc in Test.find():
docs.append(doc)
return docs
async def find_many_generator_with_projection() -> List[ProjectionTest]:
docs: List[ProjectionTest] = []
async for doc in Test.find().project(projection_model=ProjectionTest):
docs.append(doc)
return docs
async def find_one() -> Optional[Test]:
return await Test.find_one()
async def find_one_with_projection() -> Optional[ProjectionTest]:
return await Test.find_one().project(projection_model=ProjectionTest)
|
1642574
|
from abc import ABC, abstractmethod
from datetime import datetime
import random
from time import sleep
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
import explorerhat
from .models import Watered, Schedule
class BaseSensor(ABC):
'''Base Sensor class for sensors'''
def __init__(self, database_uri: str) -> None:
self.engine = create_engine(database_uri)
@abstractmethod
def read_moisture(self) -> None:
'''function to read the soil moisture and upload results to database'''
...
@abstractmethod
def water(self) -> None:
'''function to water the plant and upload record to database'''
...
class ExplorerHat(BaseSensor):
def __init__(self, database_uri: str, v_min: float, v_max: float):
self.v_min: int = v_min
self.v_max: int = v_max
super().__init__(database_uri)
def read_moisture(self, plant_id: int) -> None:
'''function to read the moisture from the explorer hat'''
voltage: float = explorerhat.analog.one.read()
water_level: float = ((voltage - self.v_min) * 100) / (self.v_max - self.v_min) # calculate the % moisture of the soil
obj: Schedule = Schedule(datetime=datetime.now(), water_level=water_level, plant_id=plant_id)
with Session(self.engine) as session:
session.add(obj)
session.commit()
def water(self, plant_id: int, water_time: float = 10.0) -> None:
'''function to toggle the explorer hat's builtin Light and motor controls to water the plant'''
explorerhat.output[0].on()
explorerhat.light[0].on()
sleep(water_time)
explorerhat.output[0].off()
explorerhat.light[0].off()
watered_on: Watered = Watered(date_watered=datetime.now(), plant_id=plant_id)
with Session(self.engine) as session:
session.add(watered_on)
session.commit()
class TestSensor(BaseSensor):
def __init__(self, database_uri) -> None:
super().__init__(database_uri)
def read_moisture(self, plant_id: int) -> None:
'''function to create a database entry mimicking the water level at the current date/ time'''
water_level: Schedule = Schedule(datetime=datetime.now(), water_level=random.randint(0, 100), plant_id=plant_id)
with Session(self.engine) as session:
session.add(water_level)
session.commit()
def water(self, plant_id: int) -> None:
'''function to create a database entry mimicking the plant being watered at the current date/ time'''
watered_on: Watered = Watered(date_watered=datetime.now(), plant_id=plant_id)
with Session(self.engine) as session:
session.add(watered_on)
session.commit()
|
1642652
|
class ZigbeeMessage:
def __init__(self, message):
self.raw = message
def get_signal_level(self):
if 'linkquality' in self.raw and self.raw['linkquality'] != None:
return int(int(self.raw['linkquality']) * 11 / 255)
else:
return None
def get_battery_level(self):
if 'battery' in self.raw and self.raw['battery'] != None:
return int(float(self.raw['battery']))
else:
return None
|
1642670
|
import sys
from collections import defaultdict
features = defaultdict(lambda:0)
remap = {
'CopyDistance':'CopyDistance',
'DistanceHuffmanTable':'CopyDistance',
'ComplexLiterals':'ComplexLiterals',
'CopyLength':'CopyLength',
'LiteralHuffmanTable':'ComplexLiterals',
'InsertCopyHuffmanTable':'CopyLength',
'LiteralContextMode':'LiteralContextMode',
'MetablockHeader':'Misc',
'BlockTypeMetadata':'BlockTypeMetadata',
'DistancContextMode':'DistanceContextMode',
'Misc':'Misc',
}
for line in open(sys.argv[1]):
for key,val in remap.iteritems():
if key != val:
line = line.replace(key,val)
vals = line.split()
bytes = float(vals[1])
features[vals[2]] += bytes
maxb = max(len(str(item)) for item in features.values())
maxa = max(len(str(int(item*8 + .5))) for item in features.values())
for item in sorted(features.keys()):
bitval = str(int(features[item] * 8 + .5))
byteval = str(features[item])
print bitval + ' '*(maxa + 2 - len(bitval)) + byteval + ' '*(maxb + 2 - len(byteval)) + item
|
1642681
|
import time
import sys
from app.common import values, definitions, utilities
from app.common.utilities import error_exit, save_current_state, load_state, get_source_name_from_slice
from app.tools import evolver, merger
from app.tools import emitter, reader, logger
from app.ast import ast_generator
file_index = 1
backup_file_list = dict()
def safe_exec(function_def, title, *args):
start_time = time.time()
emitter.sub_title("Starting " + title + "...")
description = title[0].lower() + title[1:]
try:
if not args:
result = function_def()
else:
result = function_def(*args)
duration = format((time.time() - start_time) / 60, '.3f')
emitter.success("\n\tSuccessful " + description + ", after " + duration + " minutes.")
except Exception as exception:
duration = format((time.time() - start_time) / 60, '.3f')
emitter.error("Crash during " + description + ", after " + duration + " minutes.")
error_exit(exception, "Unexpected error during " + description + ".")
return result
def evolve_macros():
logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
if values.missing_macro_list:
header_list, macro_list = evolver.evolve_definitions(values.missing_macro_list)
values.missing_macro_list = macro_list
values.missing_header_list = merger.merge_header_info(values.missing_header_list, header_list)
def evolve_data_types():
logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
if values.missing_data_type_list:
missing_header_list, missing_macro_list = evolver.evolve_data_type(values.missing_data_type_list)
def evolve_functions():
logger.trace(__name__ + ":" + sys._getframe().f_code.co_name, locals())
if values.missing_function_list:
header_list, macro_list, function_list = evolver.evolve_functions(values.missing_function_list,
values.DEFAULT_TRANSPLANT_DEPTH)
values.missing_macro_list = merger.merge_macro_info(values.missing_macro_list, macro_list)
values.missing_header_list = merger.merge_header_info(values.missing_header_list, header_list)
values.missing_function_list = function_list
def evolve_code():
global file_index
if not values.ast_transformation_info:
error_exit("nothing to evolve")
updated_info = dict()
for list_tuple in values.ast_transformation_info.items():
file_list, generated_data = list_tuple
slice_file_a = file_list[0]
slice_file_b = file_list[1]
slice_file_c = file_list[2]
slice_file_d = slice_file_c.replace(values.CONF_PATH_C, values.Project_D.path)
slice_file_list = (slice_file_a, slice_file_b, slice_file_c, slice_file_d)
vector_source_a = get_source_name_from_slice(slice_file_a)
vector_source_b = get_source_name_from_slice(slice_file_b)
vector_source_c = get_source_name_from_slice(slice_file_c)
vector_source_d = vector_source_c.replace(values.CONF_PATH_C, values.Project_D.path)
source_file_list = (vector_source_a, vector_source_b, vector_source_c, vector_source_d)
ast_tree_global_a = ast_generator.get_ast_json(vector_source_a, values.DONOR_REQUIRE_MACRO, True)
ast_tree_global_b = ast_generator.get_ast_json(vector_source_b, values.DONOR_REQUIRE_MACRO, True)
ast_tree_global_c = ast_generator.get_ast_json(vector_source_c, values.DONOR_REQUIRE_MACRO, True)
utilities.shift_slice_source(slice_file_a, slice_file_c)
segment_code = slice_file_c.replace(vector_source_c + ".", "").split(".")[0]
segment_identifier_a = slice_file_a.split("." + segment_code + ".")[-1].replace(".slice", "")
segment_identifier_c = slice_file_c.split("." + segment_code + ".")[-1].replace(".slice", "")
emitter.sub_sub_title("evolving " + segment_identifier_c + " in " + vector_source_c)
# emitter.highlight("\tOriginal AST script")
original_script = generated_data[1]
# emitter.emit_ast_script(original_script)
# emitter.highlight("\tGenerated AST script")
translated_script = generated_data[0]
# emitter.emit_ast_script(translated_script)
identified_function_list, \
identified_macro_list, evolved_script, \
identified_var_list, identified_data_type_list = evolver.evolve_code(slice_file_list,
source_file_list,
translated_script,
segment_identifier_a,
segment_identifier_c,
segment_code,
ast_tree_global_a,
ast_tree_global_b,
ast_tree_global_c
)
updated_data = (evolved_script, original_script)
updated_info[file_list] = updated_data
emitter.highlight("\tEvolved AST script")
emitter.emit_ast_script(evolved_script)
file_index += 1
if values.missing_function_list:
if identified_function_list:
values.missing_function_list.update(identified_function_list)
else:
values.missing_function_list = identified_function_list
if values.missing_var_list:
if identified_var_list:
values.missing_var_list.update(identified_var_list)
else:
values.missing_var_list = identified_var_list
if values.missing_data_type_list:
if identified_data_type_list:
values.missing_data_type_list.update(identified_data_type_list)
else:
values.missing_data_type_list = identified_data_type_list
if values.missing_macro_list:
if identified_macro_list:
values.missing_macro_list = merger.merge_macro_info(values.missing_macro_list, identified_macro_list)
else:
values.missing_macro_list = identified_macro_list
utilities.replace_file(vector_source_d, slice_file_d)
utilities.restore_slice_source()
values.ast_transformation_info = updated_info
def load_values():
load_state()
if not values.ast_transformation_info:
script_info = dict()
script_list = reader.read_json(definitions.FILE_TRANSLATED_SCRIPT_INFO)
for (path_info, trans_script_info) in script_list:
script_info[(path_info[0], path_info[1], path_info[2])] = trans_script_info
values.ast_transformation_info = script_info
definitions.FILE_SCRIPT_INFO = definitions.DIRECTORY_OUTPUT + "/script-info"
definitions.FILE_TEMP_FIX = definitions.DIRECTORY_TMP + "/temp-fix"
def save_values():
save_current_state()
def start():
emitter.title("Evolve transformation")
load_values()
if values.PHASE_SETTING[definitions.PHASE_EVOLUTION]:
safe_exec(evolve_code, "evaluate code slices")
if values.missing_function_list:
safe_exec(evolve_functions, "evolve function definitions")
if values.missing_data_type_list:
safe_exec(evolve_data_types, "evolve data structures")
if values.missing_macro_list:
safe_exec(evolve_macros, "evolve macros")
save_values()
else:
emitter.special("\n\t-skipping this phase-")
|
1642682
|
import numpy as np
import pytest
from numpy.testing import assert_almost_equal as aae
from spectra import SticksSpectrum
def setup():
pass
def teardown():
pass
def test_init():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities, units="ms", style="IR", y_shift=-5, time=9)
aae(s1.energies, energies)
aae(s1.intensities, intensities)
assert s1.units == "ms"
assert s1.style == "IR"
assert s1.y_shift == -5
assert s1.time == 9
def test_iter():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert all(e == i for e, i in s1)
def test_eq():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
s3 = SticksSpectrum("S1", energies, intensities, style="MS")
s4 = SticksSpectrum("S4", energies, intensities)
s5 = SticksSpectrum("S5", energies, intensities, y_shift=6)
assert s1 == s2
assert s1 != s3
assert s1 != s4
assert s1 != s5
def test_len():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s2 = SticksSpectrum("S1", energies, intensities)
assert len(s1) == len(energies)
assert len(s2) == len(energies)
def test_str():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert str(s1) == "<SticksSpectrum: Hello World>"
def test_add_sub():
energies1, intensities1 = np.arange(10), np.arange(10)
energies2, intensities2 = np.arange(20), np.arange(20)
s1 = SticksSpectrum("Hello World", energies1, intensities1)
s1 + s1
s2 = 1 + s1
s3 = s2 - 1
s4 = 1 - s3
s5 = s1 - s1
s6 = s1 - s2
s7 = SticksSpectrum("Hello Big World", energies2, intensities2)
s1 + s7
s1 - s7
s = s1.copy()
s.energies += 1
s + s1
s - s1
assert s1.name == "Hello World"
assert s2.name == "Hello World + 1"
assert s3.name == "Hello World + 1 – 1"
assert s4.name == "1 – Hello World + 1 – 1"
assert s5.name == "Hello World – Hello World"
assert s6.name == "Hello World – Hello World + 1"
aae(s1.energies, s2.energies)
aae(s1.energies, s3.energies)
aae(s1.energies, s4.energies)
aae(s3.intensities, s1.intensities)
def test_abs():
energies, intensities1, intensities2 = np.arange(10), np.arange(10), np.arange(10)
intensities2[5:] = -intensities2[5:]
s1 = SticksSpectrum("S1", energies, intensities1)
s2 = SticksSpectrum("S2", energies, intensities2)
assert s1 != s2
assert any(s1.intensities != s2.intensities)
aae(s1.intensities, abs(s2).intensities)
def test_mul():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
s1 * s1
def test_div():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("S1", energies, intensities)
div = s1 / s1
aae(div.energies, range(10))
aae(div.intensities, [np.nan] + [1] * 9)
def test_copy():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.copy()
assert s1 == s2
assert id(s1) != id(s2)
def test_domain():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
assert s1.domain == (0, 9)
@pytest.mark.xfail(raises=NotImplementedError)
def test_smoothed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.smoothed()
def test_baseline_subtracted():
energies, intensities = np.arange(1, 11), np.arange(1, 11)
s1 = SticksSpectrum("Hello World", energies, intensities)
s2 = s1.baseline_subtracted()
s3 = s1.baseline_subtracted(9)
aae(s1.intensities - 1, s2.intensities)
aae(s1.intensities - 9, s3.intensities)
@pytest.mark.xfail(raises=NotImplementedError)
def test_set_zero():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.set_zero(99)
def test_sliced():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.sliced()
def test_from_csvs(tmp_path):
test_csv = f"{tmp_path}/test.csv"
with open(test_csv, "w") as f:
f.write("x,A,B\n0,2,4\n1,3,5")
SticksSpectrum.from_csvs(test_csv)
SticksSpectrum.from_csvs("tests/files/xrd.csv")
@pytest.mark.xfail(raises=NotImplementedError)
def test_norm():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.norm()
def test_normed():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.normed()
@pytest.mark.xfail(raises=NotImplementedError)
def test_peaks():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.peaks()
def test_min_max():
s1 = SticksSpectrum.from_csvs("tests/files/spectrum1.csv")[0]
assert min(s1) == (5, 0)
assert max(s1) == (25, 0)
assert s1.min == (16, -10)
assert s1.max == (13, 21)
@pytest.mark.xfail(raises=NotImplementedError)
def test_correlation():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.correlation(s1)
def test_convert():
energies, intensities = np.arange(10), np.arange(10)
s1 = SticksSpectrum("Hello World", energies, intensities)
s1.convert(2, npoints=100)
s1.convert(2, npoints=100, energy_lim=(-5, 50))
|
1642702
|
from enum import Enum
from typing import List
from typing import Optional
import numpy as np
import pandas as pd
from etna.transforms.base import PerSegmentWrapper
from etna.transforms.base import Transform
class ImputerMode(str, Enum):
"""Enum for different imputation strategy."""
zero = "zero"
mean = "mean"
running_mean = "running_mean"
forward_fill = "forward_fill"
seasonal = "seasonal"
class _OneSegmentTimeSeriesImputerTransform(Transform):
"""One segment version of transform to fill NaNs in series of a given dataframe.
- It is assumed that given series begins with first non NaN value.
- This transform can't fill NaNs in the future, only on train data.
- This transform can't fill NaNs if all values are NaNs. In this case exception is raised.
"""
def __init__(self, in_column: str, strategy: str, window: int, seasonality: int, default_value: Optional[float]):
"""
Create instance of _OneSegmentTimeSeriesImputerTransform.
Parameters
----------
in_column:
name of processed column
strategy:
filling value in missing timestamps:
- If "zero", then replace missing dates with zeros
- If "mean", then replace missing dates using the mean in fit stage.
- If "running_mean" then replace missing dates using mean of subset of data
- If "forward_fill" then replace missing dates using last existing value
- If "seasonal" then replace missing dates using seasonal moving average
window:
In case of moving average and seasonality.
* If ``window=-1`` all previous dates are taken in account
* Otherwise only window previous dates
seasonality:
the length of the seasonality
default_value:
value which will be used to impute the NaNs left after applying the imputer with the chosen strategy
Raises
------
ValueError:
if incorrect strategy given
"""
self.in_column = in_column
self.strategy = ImputerMode(strategy)
self.window = window
self.seasonality = seasonality
self.default_value = default_value
self.fill_value: Optional[int] = None
self.nan_timestamps: Optional[List[pd.Timestamp]] = None
def fit(self, df: pd.DataFrame) -> "_OneSegmentTimeSeriesImputerTransform":
"""
Fit preprocess params.
Parameters
----------
df: pd.DataFrame
dataframe with series to fit preprocess params with
Returns
-------
self: _OneSegmentTimeSeriesImputerTransform
fitted preprocess
"""
raw_series = df[self.in_column]
if np.all(raw_series.isna()):
raise ValueError("Series hasn't non NaN values which means it is empty and can't be filled.")
series = raw_series[raw_series.first_valid_index() :]
self.nan_timestamps = series[series.isna()].index
if self.strategy == ImputerMode.zero:
self.fill_value = 0
elif self.strategy == ImputerMode.mean:
self.fill_value = series.mean()
return self
def transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Transform given series.
Parameters
----------
df: pd.Dataframe
transform ``in_column`` series of given dataframe
Returns
-------
result: pd.DataFrame
dataframe with in_column series with filled gaps
"""
result_df = df.copy()
cur_nans = result_df[result_df[self.in_column].isna()].index
result_df[self.in_column] = self._fill(result_df[self.in_column])
# restore nans not in self.nan_timestamps
restore_nans = cur_nans.difference(self.nan_timestamps)
result_df.loc[restore_nans, self.in_column] = np.nan
return result_df
def inverse_transform(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Inverse transform dataframe.
Parameters
----------
df: pd.Dataframe
inverse transform ``in_column`` series of given dataframe
Returns
-------
result: pd.DataFrame
dataframe with in_column series with initial values
"""
result_df = df.copy()
index = result_df.index.intersection(self.nan_timestamps)
result_df.loc[index, self.in_column] = np.nan
return result_df
def _fill(self, df: pd.Series) -> pd.Series:
"""
Create new Series taking all previous dates and adding missing dates.
Fills missed values for new dates according to ``self.strategy``
Parameters
----------
df: pd.Series
series to fill
Returns
-------
result: pd.Series
"""
if self.nan_timestamps is None:
raise ValueError("Trying to apply the unfitted transform! First fit the transform.")
if self.strategy == ImputerMode.zero or self.strategy == ImputerMode.mean:
df = df.fillna(value=self.fill_value)
elif self.strategy == ImputerMode.forward_fill:
df = df.fillna(method="ffill")
elif self.strategy == ImputerMode.running_mean or self.strategy == ImputerMode.seasonal:
history = self.seasonality * self.window if self.window != -1 else len(df)
timestamps = list(df.index)
for timestamp in self.nan_timestamps:
i = timestamps.index(timestamp)
indexes = np.arange(i - self.seasonality, i - self.seasonality - history, -self.seasonality)
indexes = indexes[indexes >= 0]
df.iloc[i] = np.nanmean(df.iloc[indexes])
if self.default_value:
df = df.fillna(value=self.default_value)
return df
class TimeSeriesImputerTransform(PerSegmentWrapper):
"""Transform to fill NaNs in series of a given dataframe.
- It is assumed that given series begins with first non NaN value.
- This transform can't fill NaNs in the future, only on train data.
- This transform can't fill NaNs if all values are NaNs. In this case exception is raised.
Warning
-------
This transform can suffer from look-ahead bias in 'mean' mode. For transforming data at some timestamp
it uses information from the whole train part.
"""
def __init__(
self,
in_column: str = "target",
strategy: str = ImputerMode.zero,
window: int = -1,
seasonality: int = 1,
default_value: Optional[float] = None,
):
"""
Create instance of TimeSeriesImputerTransform.
Parameters
----------
in_column:
name of processed column
strategy:
filling value in missing timestamps:
- If "zero", then replace missing dates with zeros
- If "mean", then replace missing dates using the mean in fit stage.
- If "running_mean" then replace missing dates using mean of subset of data
- If "forward_fill" then replace missing dates using last existing value
- If "seasonal" then replace missing dates using seasonal moving average
window:
In case of moving average and seasonality.
* If ``window=-1`` all previous dates are taken in account
* Otherwise only window previous dates
seasonality:
the length of the seasonality
default_value:
value which will be used to impute the NaNs left after applying the imputer with the chosen strategy
Raises
------
ValueError:
if incorrect strategy given
"""
self.in_column = in_column
self.strategy = strategy
self.window = window
self.seasonality = seasonality
self.default_value = default_value
super().__init__(
transform=_OneSegmentTimeSeriesImputerTransform(
in_column=self.in_column,
strategy=self.strategy,
window=self.window,
seasonality=self.seasonality,
default_value=self.default_value,
)
)
__all__ = ["TimeSeriesImputerTransform"]
|
1642721
|
import os
from _common_search_paths import charm_path_search, grackle_path_search
is_arch_valid = 1
#python_lt_27 = 1
flags_arch = '-O3 -Wall -g'
#flags_arch = '-Wall -g'
flags_link = '-rdynamic'
#optional fortran flag
flags_arch_fortran = '-ffixed-line-length-132'
cc = 'gcc'
f90 = 'gfortran'
#flags_prec_single = '-fdefault-real-4 -fdefault-double-8'
flags_prec_single = ''
flags_prec_double = '-fdefault-real-8 -fdefault-double-8'
libpath_fortran = ''
libs_fortran = ['gfortran']
home = os.environ['HOME']
hdf5_path = os.getenv('HDF5HOME',None)
if hdf5_path is not None:
hdf5_inc = hdf5_path + '/include'
hdf5_lib = hdf5_path + '/lib'
else:
# the following environment variables are set by the hdf5 module
hdf5_inc = os.environ['TACC_HDF5_INC']
hdf5_lib = os.environ['TACC_HDF5_LIB']
boost_path = os.environ['BOOST_ROOT']
boost_inc = boost_path + '/include'
boost_lib = boost_path + '/lib'
#--------------------------------------------------
# CHARM
#
# Change charm_path below to match where your copy is. To compile
# Charm++ on Stampede with GNU compilers, use the following:
#
# ./build charm++ ofi-linux-x86_64 -j8 --with-production --enable-tracing
#
#--------------------------------------------------
if os.path.isdir(home + '/Charm/682/gnu/omni/charm'):
charm_path = home + '/Charm/682/gnu/omni/charm'
else:
charm_path = charm_path_search(home)
if ((os.getenv("TACC_PAPI_LIB", None) is not None) and
(os.getenv("TACC_PAPI_INC", None) is not None)):
papi_inc = os.environ["TACC_PAPI_INC"]
papi_lin = os.environ["TACC_PAPI_LIB"]
else:
papi_inc = home + '/include'
papi_lib = home + '/lib'
png_path = '/usr/lib64'
if os.path.isdir(home + '/public/Grackle/src/clib'):
grackle_path = home + '/public/Grackle/src/clib'
else:
grackle_path = grackle_path_search(home)
|
1642732
|
r"""
Contains a list of constants and user defined units
"""
__author__ = "<NAME>"
__version__ = "0.1"
import yaml
from dataclasses import replace, asdict, is_dataclass, field, dataclass as dat
from typing import Dict, Optional, List, Union, Literal, NamedTuple
import operator
import functools
from pydantic.dataclasses import dataclass
from astropy import units as u
from astropy.table import QTable
from astropy.cosmology import FlatLambdaCDM
from colorama import Fore
AllLines = Literal["all"]
CenterConstraint = Literal["free", "constrained", "fixed"]
class Quantity(u.SpecificTypeQuantity):
"""
Validation of the types of unit for each parameter, to ensure the right type
is being given.
"""
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
return cls(v)
@dat(frozen=True, eq=True, unsafe_hash=True)
class SourceIdentifier:
sample: str
setup: str
pointing: str
source: int
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
sample, setup, pointing, source = v.split('.')
return cls(sample, setup, pointing, int(source))
class Length(Quantity):
_equivalent_unit = u.m
class Frequency(Quantity):
_equivalent_unit = (1 / u.s).unit
class Temperature(Quantity):
_equivalent_unit = u.K
def override(basic, overrides):
"""
Function to override parameters inside a dictionary or a dataclass. Only
items set in overrides will replace their corresponding items in basic.
Function recursively goes inside each item if they are dict and replaces
items inside (element by element).
Input:
basic: reference object which we want to override
overrides: object used to override elements inside basic.
Return:
dict with values from basic replaced with values in overrides, where
they are set.
"""
# Is the reference object is empty set it to an empty dict. This is needed
# to correctly override unset parameters
if basic is None:
basic = {}
# If no overrides are set, just return the reference objct
if overrides is None:
return basic
# Convert inputs to dict.
if is_dataclass(basic):
basic = asdict(basic)
if is_dataclass(overrides):
overrides = asdict(overrides)
# Return the override dict, with items replaced with their values from
# overrides, when they are None.
return {
**basic,
**{
key: value
for key, value in overrides.items()
if value is not None and not isinstance(value, dict)
},
**{
key: override(basic.get(key), value)
for key, value in overrides.items()
if isinstance(value, dict)
},
}
@dataclass
class Cosmology:
"""
Cosmological parameters. Contains sensible default values and checks for
correct types.
"""
H0: Frequency = 70 * u.km / (u.Mpc * u.s)
Om0: float = 0.3
Tcmb0: Temperature = 2.725 * u.K
@property
def cosmo(self):
# Cosmology using Astropy method
return FlatLambdaCDM(H0=self.H0, Om0=self.Om0, Tcmb0=self.Tcmb0)
@dataclass
class FittingParametersOverrides:
"""
Class to hold overrides for fitting parameters. The overrides can come from
setups or individual sources. If no overrides are set, then the fitting
parameters are set to the defaults in the FittingParameters class.
"""
SN_limit: Optional[float] = None
tolerance: Optional[Length] = None
w: Optional[Length] = None
mask_width: Optional[Length] = None
cont_width: Optional[Length] = None
center: Optional[CenterConstraint] = None
@dataclass
class FittingParameters:
"""
Paramters needed for the fitting of the lines. Contains sensible default
values and checks for correct types.
"""
SN_limit: float = 2
tolerance: Length = 26.0 * u.Angstrom
w: Length = 3 * u.Angstrom
mask_width: Length = 20 * u.Angstrom
cont_width: Length = 70 * u.Angstrom
center: CenterConstraint = "free"
@dataclass
class ConfigOverrides:
"""
Different configuration overrides. The overrides can come from setups or
individual sources. If no overrides are set, then the parameters are set to
the defaults and values (if the object is mandatory and no default can be
set) in the Config class.
"""
sky: Optional[str] = None
mask_sky: Optional[str] = None
line_table: Optional[str] = None
resolution: Optional[Length] = None
lines: Union[AllLines, None, List[str]] = None
fitting: Optional[FittingParametersOverrides] = None
@dataclass
class Config:
"""
An entire configuration, customizable per telescope and per source.
"""
line_table: str
resolution: Length
sky: Optional[str] = None
mask_sky: bool = False
lines: Union[AllLines, List[str]] = "all"
fitting: FittingParameters = FittingParameters()
cosmology: Cosmology = Cosmology()
@property
def line_list(self):
"""
Return a QTable containing the lines to be fit for each source.
Input:
self.line_table: Fits table on disk
self.lines: Which lines from the table to select
Output:
QTable
"""
table = QTable.read(self.line_table)
table.sort("wavelength")
if self.lines == "all":
return table
else:
lines = {line.strip() for line in self.lines}
mask = [line.strip() in lines for line in table["line"]]
return table[mask]
@property
def sky_list(self):
"""
Return a sky table of regions contaminated by sky emission/absorption
that should be masked or None if not masking is necessary.
Input:
self.line_table: Fits table on disk
self.mask_sky: Should the wavelengths affected by sky be masked in the
fitting.
Output:
QTable
"""
# If no sky fits table is set, then no sky lines will be masked
if self.sky is None:
if self.mask_sky == True:
print(
Fore.YELLOW
+ f"Warning: You want to mask the sky, but no valid sky catalog was set. Sky will not be masked."
)
self.mask_sky = False
return None
else:
# If sky fits table exits, decide whether to mask or not based on
# user preferences.
if self.mask_sky is True:
return QTable.read(self.sky)
if self.mask_sky is False:
return None
@dataclass
class Constants:
"""
Constants pertaining to cosmology, to fitting procedures and other telescope
or source specific parameters. Contains sensible defaults and check for unit
type correctness.
"""
cosmology: Optional[Cosmology] = None
globals: ConfigOverrides = ConfigOverrides()
setups: Dict[str, ConfigOverrides] = field(default_factory=dict)
sources: Dict[SourceIdentifier, ConfigOverrides] = field(default_factory=dict)
def __call__(
self, sample: str, setup_name: str, pointing: str, source_number: int,
) -> Config:
# Override the defaults with values set in the used configuration file.
# First override with any global values, then with telescope specific
# values and then with any source specific values. Cosmology is fixed
# for the entire project.
extra = functools.reduce(
override,
[
{},
self.globals,
self.setups.get(setup_name),
self.sources.get(SourceIdentifier(sample, setup_name, pointing, source_number)),
{"cosmology": self.cosmology},
],
)
return Config(**extra)
def read_config(config_file) -> Constants:
"""
Read YAML configuration file into a class. Not all parameters have to be
set. It not set, a parameter will be set to the default value. The class has
defaults that the config file will override. The unit types will also be
checked for correctness.
Input:
config_file: path to YAML parameter file
Output:
return a Constants dataclass instance with the defaults and config
overrides.
"""
config = yaml.safe_load(open(config_file).read())
return Constants(**config)
|
1642750
|
del_items(0x800A0AE0)
SetType(0x800A0AE0, "void VID_OpenModule__Fv()")
del_items(0x800A0BA0)
SetType(0x800A0BA0, "void InitScreens__Fv()")
del_items(0x800A0C90)
SetType(0x800A0C90, "void MEM_SetupMem__Fv()")
del_items(0x800A0CBC)
SetType(0x800A0CBC, "void SetupWorkRam__Fv()")
del_items(0x800A0D4C)
SetType(0x800A0D4C, "void SYSI_Init__Fv()")
del_items(0x800A0E58)
SetType(0x800A0E58, "void GM_Open__Fv()")
del_items(0x800A0E7C)
SetType(0x800A0E7C, "void PA_Open__Fv()")
del_items(0x800A0EB4)
SetType(0x800A0EB4, "void PAD_Open__Fv()")
del_items(0x800A0EF8)
SetType(0x800A0EF8, "void OVR_Open__Fv()")
del_items(0x800A0F18)
SetType(0x800A0F18, "void SCR_Open__Fv()")
del_items(0x800A0F48)
SetType(0x800A0F48, "void DEC_Open__Fv()")
del_items(0x800A11BC)
SetType(0x800A11BC, "char *GetVersionString__FPc(char *VersionString2)")
del_items(0x800A1290)
SetType(0x800A1290, "char *GetWord__FPc(char *VStr)")
|
1642751
|
import orjson
from rest_framework.renderers import JSONRenderer
from rest_framework.utils.serializer_helpers import ReturnDict, ReturnList
class ORJSONRenderer(JSONRenderer):
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return bytes()
return orjson.dumps(data, default=serialize_arbitrary_type)
def serialize_arbitrary_type(data):
if isinstance(data, ReturnDict):
return dict(data)
if isinstance(data, ReturnList):
items = []
for item in data:
items.append(dict(item))
return list(items)
|
1642817
|
import unittest
from pathlib import Path
import colab_transfer
class TestTransferMethods(unittest.TestCase):
def get_dummy_data_root(self):
data_root_folder_name = 'dummy_data_for_unit_test/'
return data_root_folder_name
def create_dummy_data(self):
input_data_folder_name = self.get_dummy_data_root() + 'input/'
inner_input_data_folder_name = input_data_folder_name + 'inner_folder/'
Path(inner_input_data_folder_name).mkdir(exist_ok=True, parents=True)
deeper_input_data_folder_name = input_data_folder_name + 'second_inner_folder/deeper_folder/'
Path(deeper_input_data_folder_name).mkdir(exist_ok=True, parents=True)
Path(input_data_folder_name + 'dummy_file.txt').touch(exist_ok=True)
Path(inner_input_data_folder_name + 'inner_dummy_file.txt').touch(exist_ok=True)
Path(deeper_input_data_folder_name + 'deep_inner_dummy_file.txt').touch(exist_ok=True)
return
def test_copy_file(self):
self.create_dummy_data()
input_file_name = 'dummy_file.txt'
input_folder = 'dummy_data_for_unit_test/input/'
output_data_folder_name = self.get_dummy_data_root() + 'output/'
colab_transfer.copy_file(
file_name=input_file_name,
source=input_folder,
destination=output_data_folder_name,
)
path_to_output_file = output_data_folder_name + input_file_name
self.assertTrue(Path(path_to_output_file).exists())
def test_copy_folder_structure(self):
self.create_dummy_data()
input_folder = 'dummy_data_for_unit_test/input/'
output_data_folder_name = self.get_dummy_data_root() + 'output/'
colab_transfer.copy_folder_structure(
source=input_folder,
destination=output_data_folder_name,
)
for input_file_name in [
'dummy_file.txt',
'inner_folder/inner_dummy_file.txt',
'second_inner_folder/deeper_folder/deep_inner_dummy_file.txt',
]:
path_to_output_file = output_data_folder_name + input_file_name
self.assertTrue(Path(path_to_output_file).exists())
if __name__ == '__main__':
unittest.main()
|
1642821
|
import sys
import matplotlib
matplotlib.use("Agg")
from pylab import *
base = '../'
sys.path.append(base+"utils/Correlation")
sys.path.append(base+"utils/OptExtract")
sys.path.append(base+"utils/GLOBALutils")
baryc_dir= base+'utils/SSEphem/'
sys.path.append(baryc_dir)
ephemeris='DEc403'
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# ceres modules
import harpsutils
import correlation
import GLOBALutils
import Marsh
# other useful modules
import argparse
import ephem
import jplephem
from math import radians as rad
from astropy.io import fits as pyfits
import pickle
import os
import scipy
import scipy.interpolate
from scipy import interpolate
import statsmodels.api as sm
lowess = sm.nonparametric.lowess
# Recive input parameters
parser = argparse.ArgumentParser()
parser.add_argument('directorio')
parser.add_argument('-avoid_plot', action="store_true", default=False)
parser.add_argument('-dirout',default='default')
parser.add_argument('-do_class', action="store_true", default=False)
parser.add_argument('-just_extract', action="store_true", default=False)
parser.add_argument('-npools', default=1)
parser.add_argument('-o2do',default='all')
parser.add_argument('-reffile',default='default')
parser.add_argument('-mode', default='HARPS')
parser.add_argument('-do_sim', action="store_true", default=False)
args = parser.parse_args()
dirin = args.directorio
avoid_plot = args.avoid_plot
dirout = args.dirout
DoClass = args.do_class
JustExtract = args.just_extract
npools = int(args.npools)
object2do = args.o2do
reffile = args.reffile
mode = args.mode
dosim = args.do_sim
if dirin[-1] != '/':
dirin = dirin + '/'
if dirout == 'default':
dirout = dirin[:-1]+'_red/'
if not os.access(dirout,os.F_OK):
os.system('mkdir '+dirout)
if os.access(dirout+'proc',os.F_OK):
os.system('rm -r '+dirout+'proc')
os.system('mkdir '+dirout+'proc')
f_res = open(dirout+'proc/'+'results.txt','w')
if reffile == 'default':
reffile = dirin+'reffile.txt'
####### GLOBAL VARIABLES #####
force_pre_process = False
force_flat_extract = False
force_thar_extract = False
force_thar_wavcal = False
force_tharxc = False
force_sci_extract = False
force_stellar_pars = False
force_spectral_file_build = True
dumpargon = False
minlines_glob = 1000
minlines_initial = 50
Inverse_m = True
use_cheby = True
MRMS_initial = 50 # max rms in m/s, initial wav solution
MRMS = 20 # max rms in m/s, global wav solution
trace_degree = 6
Marsh_alg = 0
ext_aperture_R = 5
ext_aperture_B = 5
NSigma_Marsh = 5
NCosmic_Marsh = 5
S_Marsh = 0.4
N_Marsh = 3 # grado polinomio
min_extract_col = 0
max_extract_col = 4095
porder = 4
ncoef_x_B = 5
ncoef_m_B = 7
npar_wsol_B = (min(ncoef_x_B,ncoef_m_B) + 1) * (2*max(ncoef_x_B,ncoef_m_B) - min(ncoef_x_B,ncoef_m_B) + 2) / 2
ncoef_x_R = 5
ncoef_m_R = 6
npar_wsol_R = (min(ncoef_x_R,ncoef_m_R) + 1) * (2*max(ncoef_x_R,ncoef_m_R) - min(ncoef_x_R,ncoef_m_R) + 2) / 2
or0_R = 89
or0_B = 116
models_path = base+"data/COELHO_MODELS/R_40000b/"
order_dir = "wavcals/"
final_wav = '.iwdat'
RESI = 120000.
if mode=='EGGS':
RESI = 85000.
MRMS_initial = 100.
MRMS = 50.
final_wav = '_eggs.iwdat'
#############################
print "\n\n\tHARPS ESO3.6m PIPELINE\n"
print "\tRAW data is in ",dirin
print "\tProducts of reduction will be in",dirout
print '\n'
# file containing the log
log = dirout+'night.log'
biases, flats, ob_flats, co_flats, ThAr_ref, sim_sci, ThAr_ref_dates, obnames, exptimes, co_types = harpsutils.FileClassify(dirin,log,mode=mode)
print '\tThis in the log of the night:\n'
f = open(log)
flines = f.readlines()
for line in flines:
print '\t'+line[:-1]
print '\n'
if ( (os.access(dirout+'FlatOb_'+ mode +'.fits',os.F_OK) == False and len(ob_flats)!=0) or \
(os.access(dirout+'FlatCo_'+ mode +'.fits',os.F_OK) == False and len(co_flats)!=0) or \
(os.access(dirout+'Flat_'+ mode +'.fits', os.F_OK) == False and len(flats)!=0) or \
(os.access(dirout+'trace_'+ mode +'.pkl',os.F_OK) == False) or \
(os.access(dirout+'MasterBias_'+ mode +'.fits',os.F_OK) == False and len(biases)!=0) or \
(force_pre_process) ):
print "\tNo previous pre-processing files or found"
pre_process = 1
else:
print "\tPre-processing files found, going straight to extraction"
pre_process = 0
if (pre_process == 1):
# median combine Biases
print "\t\tGenerating Master calibration frames..."
MasterBias, RO_bias, GA_bias = harpsutils.MedianCombine(biases)
hdu = pyfits.PrimaryHDU( MasterBias )
if (os.access(dirout+'MasterBias_'+mode+'.fits',os.F_OK)):
os.remove(dirout+'MasterBias_'+mode+'.fits')
hdu.writeto(dirout+'MasterBias_'+mode+'.fits')
print "\t\t-> Masterbias: done!"
# median combine list of ob flats
Flat_ob, RO_ob, GA_ob = harpsutils.MedianCombine(ob_flats, zero=dirout+'MasterBias_'+mode+'.fits')
# save this file for later reference
hdu = pyfits.PrimaryHDU( Flat_ob )
if (os.access(dirout+'FlatOb_'+ mode +'.fits',os.F_OK)):
os.remove(dirout+'FlatOb_'+ mode +'.fits')
hdu.writeto(dirout+'FlatOb_'+ mode +'.fits')
# median combine list of co flats
Flat_co, RO_co, GA_co = harpsutils.MedianCombine(co_flats, zero=dirout+'MasterBias_'+mode+'.fits')
hdu = pyfits.PrimaryHDU(Flat_co)
if (os.access(dirout+'FlatCo_'+ mode +'.fits',os.F_OK)):
os.remove(dirout+'FlatCo_'+ mode +'.fits')
hdu.writeto(dirout+'FlatCo_'+ mode +'.fits')
# median combine list of flats
Flat, RO_fl, GA_fl = harpsutils.MedianCombine(flats, zero=dirout+'MasterBias_'+mode+'.fits')
hdu = pyfits.PrimaryHDU(Flat)
if (os.access(dirout+'Flat_'+mode+'.fits',os.F_OK)):
os.remove(dirout+'Flat_'+mode+'.fits')
hdu.writeto(dirout+'Flat_'+mode+'.fits')
print "\t\t-> Masterflat: done!"
print "\tTracing echelle orders..."
c_all1,nord_all1 = GLOBALutils.get_them(Flat[:,:,0],ext_aperture_B,trace_degree,mode=1)
c_all1 = c_all1[5:]
nord_all1 = len(c_all1)
I = np.arange(0,nord_all1,2).astype('int')
c_ob1 = c_all1[I]
nord_ob1 = len(I)
I = np.arange(1,nord_all1,2).astype('int')
c_co1 = c_all1[I]
nord_co1 = len(I)
c_all2,nord_all2 = GLOBALutils.get_them(Flat[:,:,1],ext_aperture_R,trace_degree,mode=1)
I = np.arange(0,nord_all2,2).astype('int')
c_ob2 = c_all2[I]
nord_ob2 = len(I)
I = np.arange(1,nord_all2,2).astype('int')
c_co2 = c_all2[I]
nord_co2 = len(I)
print "\t\t"+str(nord_ob1)+" object orders found in blue chip..."
print "\t\t"+str(nord_co1)+" comparison orders found in blue chip..."
print "\t\t"+str(nord_ob2)+" object orders found in red chip..."
print "\t\t"+str(nord_co2)+" comparison orders found in red chip..."
# pickle traces
trace_dict = {'c_ob1':c_ob1,
'c_co1':c_co1,
'c_ob2':c_ob2,
'c_co2':c_co2,
'c_all1':c_all1,
'c_all2':c_all2,
'nord_ob1':nord_ob1, 'nord_co1':nord_co1,
'nord_ob2':nord_ob2, 'nord_co2':nord_co2,
'GA_ob': GA_ob, 'RO_ob': RO_ob,
'GA_co': GA_co, 'RO_co': RO_co,
'GA_fl': GA_fl, 'RO_fl': RO_fl}
pickle.dump( trace_dict, open( dirout+"trace_"+mode+".pkl", 'w' ) )
else:
trace_dict = pickle.load( open( dirout+"trace_"+mode+".pkl", 'r' ) )
c_co1 = trace_dict['c_co1']
c_ob1 = trace_dict['c_ob1']
c_co2 = trace_dict['c_co2']
c_ob2 = trace_dict['c_ob2']
nord_ob1 = trace_dict['nord_ob1']
nord_co1 = trace_dict['nord_co1']
nord_ob2 = trace_dict['nord_ob2']
nord_co2 = trace_dict['nord_co2']
# recover GA*, RO*
GA_ob = trace_dict['GA_ob']
RO_ob = trace_dict['RO_ob']
GA_co = trace_dict['GA_co']
RO_co = trace_dict['RO_co']
GA_fl = trace_dict['GA_fl']
RO_fl = trace_dict['RO_fl']
# recover flats & master bias
h = pyfits.open(dirout+'FlatOb_'+ mode +'.fits')
Flat_ob = h[0].data
h = pyfits.open(dirout+'FlatCo_'+ mode +'.fits')
Flat_co = h[0].data
h = pyfits.open(dirout+'Flat_'+mode+'.fits')
Flat = h[0].data
h = pyfits.open(dirout+'MasterBias_'+mode+'.fits')
MasterBias = h[0].data
# mesh all orders
c_all1 = GLOBALutils.Mesh( c_ob1, c_co1)
c_all2 = GLOBALutils.Mesh( c_ob2, c_co2)
# Extract flat spectra, object
print '\n\tExtraction of Flat calibration frames:'
P_ob_B_fits = dirout + 'P_ob_B_'+mode+'.fits'
P_ob_R_fits = dirout + 'P_ob_R_'+mode+'.fits'
B_flat_ob_fits = dirout +'B_flat_ob_'+mode+'.fits'
R_flat_ob_fits = dirout +'R_flat_ob_'+mode+'.fits'
P_ob_B = np.zeros( (Flat_ob.shape[0],Flat_ob.shape[1]) )
P_ob_R = np.zeros( (Flat_ob.shape[0],Flat_ob.shape[1]) )
B_flat_ob = np.zeros((nord_ob1, 3, Flat_ob.shape[1]) )
R_flat_ob = np.zeros((nord_ob2, 3, Flat_ob.shape[1]) )
if ( os.access(P_ob_B_fits,os.F_OK) == False ) or ( os.access(B_flat_ob_fits,os.F_OK) == False ) or ( os.access(P_ob_R_fits,os.F_OK) == False ) or ( os.access(R_flat_ob_fits,os.F_OK) == False ) or (force_flat_extract):
print "\t\tNo extracted flat object spectra found or extraction forced, extracting and saving..."
bacfile = dirout + 'BACR_FLAT_'+mode+'.fits'
if os.access(bacfile,os.F_OK) == False:
CentersR = np.zeros((len(c_ob2),Flat_ob[:,:,1].shape[1]))
for i in range(len(c_ob2)):
CentersR[i,:]=np.polyval(c_ob2[i],np.arange(Flat_ob[:,:,1].shape[1]))
bacR = GLOBALutils.get_scat(Flat_ob[:,:,1],CentersR,span=20)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacR )
hdbac.writeto(bacfile)
else:
bacR = pyfits.getdata(bacfile)
flR = Flat_ob[:,:,1] - bacR
bacfile = dirout + 'BACB_FLAT_'+mode+'.fits'
if os.access(bacfile,os.F_OK) == False:
CentersB = np.zeros((len(c_ob1),Flat_ob[:,:,0].shape[1]))
for i in range(len(c_ob1)):
CentersB[i,:]=np.polyval(c_ob1[i],np.arange(Flat_ob[:,:,0].shape[1]))
bacB = GLOBALutils.get_scat(Flat_ob[:,:,0],CentersB,span=10)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacB )
hdbac.writeto(bacfile)
else:
bacB = pyfits.getdata(bacfile)
flB = Flat_ob[:,:,0] - bacB
P_ob_B = GLOBALutils.obtain_P(flB,c_ob1,ext_aperture_B,RO_fl[0],\
GA_fl[0],NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
P_co_B = GLOBALutils.obtain_P(flB,c_co1,ext_aperture_B,RO_fl[0],\
GA_fl[0],NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
P_ob_R = GLOBALutils.obtain_P(flR,c_ob2,ext_aperture_R,RO_fl[1],\
GA_fl[0],NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
P_co_R = GLOBALutils.obtain_P(flR,c_co2,ext_aperture_R,RO_fl[1],\
GA_fl[0],NSigma_Marsh, S_Marsh, \
N_Marsh, Marsh_alg, min_extract_col,\
max_extract_col, npools)
print "\t\t\tWill extract",nord_ob1,"blue orders"
B_flat_ob = GLOBALutils.optimal_extraction(flB,P_ob_B,c_ob1,ext_aperture_B,RO_fl[0],GA_fl[0],\
S_Marsh,NCosmic_Marsh*10,min_extract_col,\
max_extract_col,npools)
B_flat_ob = GLOBALutils.invert(B_flat_ob)
print "\t\t\tWill extract",nord_ob2,"red orders"
R_flat_ob = GLOBALutils.optimal_extraction(flR,P_ob_R,c_ob2,ext_aperture_R,RO_fl[1],GA_fl[1],\
S_Marsh,NCosmic_Marsh*10,min_extract_col,\
max_extract_col,npools)
R_flat_ob = GLOBALutils.invert(R_flat_ob)
B_flat_ob,R_flat_ob = B_flat_ob[::-1],R_flat_ob[::-1]
# write P_on and S_flat_ob as fits files
if (os.access(P_ob_B_fits,os.F_OK)):
os.remove( P_ob_B_fits )
if (os.access(B_flat_ob_fits,os.F_OK)):
os.remove( B_flat_ob_fits )
if (os.access(P_ob_R_fits,os.F_OK)):
os.remove( P_ob_R_fits )
if (os.access(R_flat_ob_fits,os.F_OK)):
os.remove( R_flat_ob_fits )
hdu = pyfits.PrimaryHDU( P_ob_B )
hdu.writeto( P_ob_B_fits )
hdu = pyfits.PrimaryHDU( P_ob_R )
hdu.writeto( P_ob_R_fits )
hdu = pyfits.PrimaryHDU( B_flat_ob )
hdu.writeto( B_flat_ob_fits )
hdu = pyfits.PrimaryHDU( R_flat_ob )
hdu.writeto( R_flat_ob_fits )
else:
print "\t\tExtracted flat object spectra found, loading..."
P_ob_B = pyfits.getdata( P_ob_B_fits )
P_ob_R = pyfits.getdata( P_ob_R_fits )
B_flat_ob = pyfits.getdata( B_flat_ob_fits )
R_flat_ob = pyfits.getdata( R_flat_ob_fits )
# Extract flat spectra, comparison
P_co_B_fits = dirout + 'P_co_B_' + mode + '.fits'
P_co_R_fits = dirout + 'P_co_R_' + mode + '.fits'
B_flat_co_fits = dirout + 'B_flat_co_' + mode + '.fits'
R_flat_co_fits = dirout + 'R_flat_co_' + mode + '.fits'
P_co_B = np.zeros( (Flat_co.shape[0],Flat_co.shape[1]) )
P_co_R = np.zeros( (Flat_co.shape[0],Flat_co.shape[1]) )
B_flat_co = np.zeros((nord_co1, 3, Flat_co.shape[1]) )
R_flat_co = np.zeros((nord_co2, 3, Flat_co.shape[1]) )
if ( os.access(P_co_B_fits,os.F_OK) == False ) or ( os.access(B_flat_co_fits,os.F_OK) == False ) or ( os.access(P_co_R_fits,os.F_OK) == False ) or ( os.access(R_flat_co_fits,os.F_OK) == False ) or (force_flat_extract):
print "\t\tNo extracted flat comparison spectra found or extraction forced, extracting and saving..."
bacfile = dirout + 'BACR_FLAT_CO_'+mode+'.fits'
if os.access(bacfile,os.F_OK) == False:
CentersR = np.zeros((len(c_co2),Flat_co[:,:,1].shape[1]))
for i in range(len(c_co2)):
CentersR[i,:]=np.polyval(c_co2[i],np.arange(Flat_co[:,:,1].shape[1]))
bacR = GLOBALutils.get_scat(Flat_co[:,:,1],CentersR,span=20)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacR )
hdbac.writeto(bacfile)
else:
bacR = pyfits.getdata(bacfile)
flR = Flat_co[:,:,1] - bacR
bacfile = dirout + 'BACB_FLAT_CO_'+mode+'.fits'
if os.access(bacfile,os.F_OK) == False:
CentersB = np.zeros((len(c_co1),Flat_co[:,:,0].shape[1]))
for i in range(len(c_co1)):
CentersB[i,:]=np.polyval(c_co1[i],np.arange(Flat_co[:,:,0].shape[1]))
bacB = GLOBALutils.get_scat(Flat_co[:,:,0],CentersB,span=10)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacB )
hdbac.writeto(bacfile)
else:
bacB = pyfits.getdata(bacfile)
flB = Flat_co[:,:,0] - bacB
print "\t\t\tWill extract",nord_co1,"blue orders"
for i in range(nord_co1):
P_marsh = GLOBALutils.PCoeff( flB, c_co1[i,:], ext_aperture_B, RO_co[0], GA_co[0], NSigma_Marsh,\
S_Marsh, N_Marsh, Marsh_alg, min_extract_col,max_extract_col )
P_co_B += P_marsh
B_flat_co = GLOBALutils.optimal_extraction(Flat[:,:,0],P_co_B,c_co1,ext_aperture_B,RO_fl[0],GA_fl[0],\
S_Marsh,NCosmic_Marsh*10,min_extract_col,\
max_extract_col,npools)
B_flat_co = GLOBALutils.invert(B_flat_co)
print "\t\t\tWill extract",nord_co2,"red orders"
for i in range(nord_co2):
P_marsh = GLOBALutils.PCoeff( flR, c_co2[i,:], ext_aperture_R, RO_co[1], GA_co[1], NSigma_Marsh,\
S_Marsh, N_Marsh, Marsh_alg, min_extract_col,max_extract_col )
P_co_R += P_marsh
R_flat_co = GLOBALutils.optimal_extraction(Flat[:,:,1],P_co_R,c_co2,ext_aperture_R,RO_fl[1],GA_fl[1],\
S_Marsh,NCosmic_Marsh*10,min_extract_col,\
max_extract_col,npools)
R_flat_co = GLOBALutils.invert(R_flat_co)
B_flat_co,R_flat_co = B_flat_co[::-1],R_flat_co[::-1]
# write P_on and S_flat_co as fits files
if (os.access(P_co_B_fits,os.F_OK)):
os.remove( P_co_B_fits )
if (os.access(B_flat_co_fits,os.F_OK)):
os.remove( B_flat_co_fits )
if (os.access(P_co_R_fits,os.F_OK)):
os.remove( P_co_R_fits )
if (os.access(R_flat_co_fits,os.F_OK)):
os.remove( R_flat_co_fits )
hdu = pyfits.PrimaryHDU( P_co_B )
hdu.writeto( P_co_B_fits )
hdu = pyfits.PrimaryHDU( P_co_R )
hdu.writeto( P_co_R_fits )
hdu = pyfits.PrimaryHDU( B_flat_co )
hdu.writeto( B_flat_co_fits )
hdu = pyfits.PrimaryHDU( R_flat_co )
hdu.writeto( R_flat_co_fits )
else:
print "\t\tExtracted flat comparison spectra found, loading..."
P_co_B = pyfits.getdata( P_co_B_fits )
P_co_R = pyfits.getdata( P_co_R_fits )
B_flat_co = pyfits.getdata( B_flat_co_fits )
R_flat_co = pyfits.getdata( R_flat_co_fits )
# Normalize flat field spectra.
B_flat_ob_n,Bnorms = GLOBALutils.FlatNormalize_single( B_flat_ob, mid=2048)
R_flat_ob_n,Rnorms = GLOBALutils.FlatNormalize_single( R_flat_ob, mid=2048)
print '\n\tExtraction of ThAr calibration frames:'
# Extract all ThAr files
for fsim in ThAr_ref:
print "\t\tWorking on ThAr+Ne file ", fsim, "..."
hthar = pyfits.open( fsim )
dtharB = harpsutils.OverscanTrim( hthar[1].data ) - MasterBias[:,:,0]
dtharR = harpsutils.OverscanTrim( hthar[2].data ) - MasterBias[:,:,1]
print 'one'
bacfile = dirout + 'BACR_' + fsim.split('/')[-1][:-4]+'fits'
if os.access(bacfile,os.F_OK) == False:
CentersR = np.zeros((len(c_all2),dtharR.shape[1]))
for i in range(len(c_all2)):
CentersR[i,:]=np.polyval(c_all2[i],np.arange(dtharR.shape[1]))
bacR = GLOBALutils.get_scat(dtharR,CentersR,span=15)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacR )
hdbac.writeto(bacfile)
else:
bacR = pyfits.getdata(bacfile)
print 'two'
bacfile = dirout + 'BACB_' + fsim.split('/')[-1][:-4]+'fits'
if os.access(bacfile,os.F_OK) == False:
CentersB = np.zeros((len(c_all1),dtharB.shape[1]))
for i in range(len(c_all1)):
CentersB[i,:]=np.polyval(c_all1[i],np.arange(dtharB.shape[1]))
bacB = GLOBALutils.get_scat(dtharB,CentersB,span=7)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacB )
hdbac.writeto(bacfile)
else:
bacB = pyfits.getdata(bacfile)
dtharR = dtharR - bacR
dtharB = dtharB - bacB
thar_fits_ob_R = dirout + fsim.split('/')[-1][:-4]+'spec.ob.R.fits.S'
thar_fits_co_R = dirout + fsim.split('/')[-1][:-4]+'spec.co.R.fits.S'
thar_fits_ob_B = dirout + fsim.split('/')[-1][:-4]+'spec.ob.B.fits.S'
thar_fits_co_B = dirout + fsim.split('/')[-1][:-4]+'spec.co.B.fits.S'
if ( os.access(thar_fits_ob_B,os.F_OK) == False ) or ( os.access(thar_fits_co_B,os.F_OK) == False ) \
or ( os.access(thar_fits_ob_R,os.F_OK) == False ) or ( os.access(thar_fits_co_R,os.F_OK) == False ) \
or ( force_thar_extract ):
print "\t\tNo previous extraction or extraction forced for ThAr file", fsim, "extracting..."
tR,tG = hthar[1].header['HIERARCH ESO DET OUT1 RON'],hthar[1].header['HIERARCH ESO DET OUT1 GAIN']
thar_S_ob_B = GLOBALutils.optimal_extraction(dtharB,P_ob_B,c_ob1,ext_aperture_B,tR,tG,\
S_Marsh,0*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
for i in range(nord_ob1):
thar_S_ob_B[i,1,:] = thar_S_ob_B[i,1,:][::-1]
thar_S_ob_B[i,2,:] = thar_S_ob_B[i,2,:][::-1]
thar_S_co_B = GLOBALutils.optimal_extraction(dtharB,P_co_B,c_co1,ext_aperture_B,tR,tG,\
S_Marsh,0*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
for i in range(nord_co1):
thar_S_co_B[i,1,:] = thar_S_co_B[i,1,:][::-1]
thar_S_co_B[i,2,:] = thar_S_co_B[i,2,:][::-1]
thar_S_ob_B,thar_S_co_B = thar_S_ob_B[::-1],thar_S_co_B[::-1]
tR,tG = hthar[2].header['HIERARCH ESO DET OUT1 RON'],hthar[2].header['HIERARCH ESO DET OUT1 GAIN']
thar_S_ob_R = GLOBALutils.optimal_extraction(dtharR,P_ob_R,c_ob2,ext_aperture_R,tR,tG,\
S_Marsh,0*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
for i in range(nord_ob2):
thar_S_ob_R[i,1,:] = thar_S_ob_R[i,1,:][::-1]
thar_S_ob_R[i,2,:] = thar_S_ob_R[i,2,:][::-1]
thar_S_co_R = GLOBALutils.optimal_extraction(dtharR,P_co_R,c_co2,ext_aperture_R,tR,tG,\
S_Marsh,0*NCosmic_Marsh,min_extract_col,max_extract_col,npools)
for i in range(nord_co2):
thar_S_co_R[i,1,:] = thar_S_co_R[i,1,:][::-1]
thar_S_co_R[i,2,:] = thar_S_co_R[i,2,:][::-1]
thar_S_ob_R,thar_S_co_R = thar_S_ob_R[::-1],thar_S_co_R[::-1]
# save as fits file
if (os.access(thar_fits_ob_R,os.F_OK)):
os.remove( thar_fits_ob_R )
if (os.access(thar_fits_ob_B,os.F_OK)):
os.remove( thar_fits_ob_B )
if (os.access(thar_fits_co_R,os.F_OK)):
os.remove( thar_fits_co_R )
if (os.access(thar_fits_co_B,os.F_OK)):
os.remove( thar_fits_co_B )
hdu = pyfits.PrimaryHDU( thar_S_ob_B )
hdu.writeto( thar_fits_ob_B )
hdu = pyfits.PrimaryHDU( thar_S_ob_R )
hdu.writeto( thar_fits_ob_R )
hdu = pyfits.PrimaryHDU( thar_S_co_B )
hdu.writeto( thar_fits_co_B )
hdu = pyfits.PrimaryHDU( thar_S_co_R )
hdu.writeto( thar_fits_co_R )
else:
print "\t\tThAr file", fsim, "all ready extracted, loading..."
# create wavelength calibration files
print "\n\tWavelength solution of ThAr calibration spectra:"
sorted_ThAr_dates = np.argsort( ThAr_ref_dates )
for i in range(len(ThAr_ref_dates)):
index = sorted_ThAr_dates[i]
wavsol_pkl = dirout + ThAr_ref[index].split('/')[-1][:-4]+'wavsolpars.pkl'
wavsol_fits = dirout + ThAr_ref[index].split('/')[-1][:-4]+'spec.fits'
#force_thar_wavcal = True
if ( os.access(wavsol_pkl,os.F_OK) == False ) or (force_thar_wavcal):
print "\t\tWorking on initial ThAr file", ThAr_ref[index]
hthar = pyfits.open( ThAr_ref[index] )
mjd, mjd0 = harpsutils.mjd_fromheader( hthar )
thar_fits_ob_B = dirout + ThAr_ref[index].split('/')[-1][:-4]+'spec.ob.B.fits.S'
thar_fits_co_B = dirout + ThAr_ref[index].split('/')[-1][:-4]+'spec.co.B.fits.S'
thar_fits_ob_R = dirout + ThAr_ref[index].split('/')[-1][:-4]+'spec.ob.R.fits.S'
thar_fits_co_R = dirout + ThAr_ref[index].split('/')[-1][:-4]+'spec.co.R.fits.S'
thar_S_ob_B = pyfits.getdata( thar_fits_ob_B )
thar_S_co_B = pyfits.getdata( thar_fits_co_B )
thar_S_ob_R = pyfits.getdata( thar_fits_ob_R )
thar_S_co_R = pyfits.getdata( thar_fits_co_R )
lines_thar_ob_B = thar_S_ob_B[:,1,:]
iv_thar_ob_B = thar_S_ob_B[:,2,:]
lines_thar_co_B = thar_S_co_B[:,1,:]
iv_thar_co_B = thar_S_co_B[:,2,:]
lines_thar_ob_R = thar_S_ob_R[:,1,:]
iv_thar_ob_R = thar_S_ob_R[:,2,:]
lines_thar_co_R = thar_S_co_R[:,1,:]
iv_thar_co_R = thar_S_co_R[:,2,:]
c_p2w_ob_B = np.zeros((nord_ob1,porder+1))
c_p2w_ob_R = np.zeros((nord_ob2,porder+1))
c_p2w_co_B = np.zeros((nord_co1,porder+1))
c_p2w_co_R = np.zeros((nord_co2,porder+1))
spec_thar_ob = np.zeros((2,nord_ob1+nord_ob2,thar_S_ob_B.shape[2]))
All_Pixel_Centers_R = np.array([])
All_Wavelengths_R = np.array([])
All_Orders_R = np.array([])
All_Centroids_R = np.array([])
All_Sigmas_R = np.array([])
All_Intensities_R = np.array([])
counter = 0
temp_pix = np.array([])
temp_res = np.array([])
meds,ords = [],[]
for order in range(nord_ob2):
order_s = str(order)
if (order < 10):
order_s = '0' + str(order)
thar_order_orig = lines_thar_ob_R[order,:]
IV = iv_thar_ob_R[order,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms,\
residuals, centroids,sigmas, intensities = \
GLOBALutils.Initial_Wav_Calibration( order_dir+'R_order_'+order_s+\
'.iwdat', thar_order, order, wei, rmsmax=MRMS_initial, minlines=minlines_initial,\
FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
if (order == int(np.around(0.5*nord_ob2))):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, int(np.around(0.5*len(thar_order))), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_R = np.append( All_Pixel_Centers_R, pixel_centers )
All_Wavelengths_R = np.append( All_Wavelengths_R, wavelengths )
All_Orders_R = np.append( All_Orders_R, np.zeros( len(pixel_centers) ) + order )
All_Centroids_R = np.append( All_Centroids_R, centroids)
All_Sigmas_R = np.append( All_Sigmas_R, sigmas)
All_Intensities_R = np.append( All_Intensities_R, intensities )
meds.append(GLOBALutils.Cheby_eval(coeffs_pix2wav,0.5*len(thar_order),len(thar_order)))
ords.append(order)
spec_thar_ob[0,counter] = GLOBALutils.Cheby_eval(coeffs_pix2wav,np.arange(len(thar_order)),len(thar_order))
spec_thar_ob[1,counter] = thar_order
counter += 1
pp1 = GLOBALutils.Cheby_eval(coeffs_pix2wav,pixel_centers + sigmas,len(thar_order))
pm1 = GLOBALutils.Cheby_eval(coeffs_pix2wav,pixel_centers - sigmas,len(thar_order))
wavsigmas = .5*(pp1 - pm1)
reses = wavelengths/(wavsigmas*2.355)
jji = 0
vecp,vecm = [],[]
while jji < len(reses):
if jji + 5 < len(reses):
argm = np.argmax(reses[jji:jji+5])
vecp.append(wavelengths[jji+argm])
vecm.append(reses[jji+argm])
jji += 5
vecp,vecm = np.array(vecp),np.array(vecm)
coef_res = np.polyfit(vecp,vecm,2)
#plot(wavelengths,np.polyval(coef_res,wavelengths))
#plot(vecp,vecm,'o')
#print order, rms_ms/np.sqrt(float(len(wavelengths))), rms_ms, len(residuals)
c_p2w_ob_R[order] = coeffs_pix2wav
isz = pixel_centers - sigmas
der = pixel_centers + sigmas
isz = GLOBALutils.Cheby_eval( coeffs_pix2wav, isz,len(thar_order))
der = GLOBALutils.Cheby_eval( coeffs_pix2wav, der,len(thar_order))
sig = 0.5*(der-isz)
fwhm = 2.35 * sig
resol = wavelengths / fwhm
temp_pix = np.hstack((temp_pix,pixel_centers))
temp_res = np.hstack((temp_res,resol))
#plot(pixel_centers,resol,'o')
p0 = np.zeros( npar_wsol_R )
p0[0] = (int(np.around(0.5*nord_ob2))+or0_R) * Global_ZP
#GLOBALutils.get_zero_order_number(ords,meds)
p1_R, G_pix_R, G_ord_R, G_wav_R, II_R, rms_ms_R, G_res_R = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_R, All_Wavelengths_R, All_Orders_R,\
np.ones(All_Intensities_R.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_R,ntotal=nord_ob2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
I = np.argsort(temp_pix)
temp_pix,temp_res = temp_pix[I],temp_res[I]
jj = 0
xx,yy = [],[]
while jj<len(thar_order):
I = np.where((temp_pix>=jj) & (temp_pix<jj+50))[0]
if len(I)>0:
xt,yt = temp_pix[I],temp_res[I]
I = np.argmax(yt)
xx.append(xt[I])
yy.append(yt[I])
jj+=50
xx = np.array(xx)
yy = np.array(yy)
coefs_m = np.polyfit(ords,meds,5)
#plot(ords, meds - np.polyval(coefs_m,ords),'ro')
#show()
#plot(xx,yy)
coef = np.polyfit(xx,yy,3)
#print 'Resolution coefs',coef
#plot(np.arange(4096),np.polyval(coef,np.arange(4096)),linewidth=2.0)
#show()
All_Pixel_Centers_B = np.array([])
All_Wavelengths_B = np.array([])
All_Orders_B = np.array([])
All_Centroids_B = np.array([])
All_Sigmas_B = np.array([])
All_Intensities_B = np.array([])
meds,ords = [],[]
for order in range(nord_ob1):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_ob_B[order,:]
IV = iv_thar_ob_B[order,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, \
centroids_B, sigmas, intensities = GLOBALutils.Initial_Wav_Calibration( order_dir\
+'B_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial,\
minlines=minlines_initial, FixEnds=False,Dump_Argon=dumpargon, Dump_AllLines=True,\
Cheby=use_cheby, porder=porder)
if (order == int(np.around(0.5*nord_ob1))):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, int(np.around(0.5*len(thar_order))), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_B = np.append( All_Pixel_Centers_B, pixel_centers )
All_Wavelengths_B = np.append( All_Wavelengths_B, wavelengths )
All_Orders_B = np.append( All_Orders_B, np.zeros( len(pixel_centers) ) + order )
All_Centroids_B = np.append( All_Centroids_B, centroids)
All_Sigmas_B = np.append( All_Sigmas_B, sigmas)
All_Intensities_B = np.append( All_Intensities_B, intensities )
meds.append(GLOBALutils.Cheby_eval(coeffs_pix2wav,0.5*len(thar_order),len(thar_order)))
ords.append(order)
spec_thar_ob[0,counter] = GLOBALutils.Cheby_eval(coeffs_pix2wav,np.arange(len(thar_order)),len(thar_order))
spec_thar_ob[1,counter] = thar_order
counter += 1
pp1 = GLOBALutils.Cheby_eval(coeffs_pix2wav,pixel_centers + sigmas,len(thar_order))
pm1 = GLOBALutils.Cheby_eval(coeffs_pix2wav,pixel_centers - sigmas,len(thar_order))
wavsigmas = .5*(pp1 - pm1)
reses = wavelengths/(wavsigmas*2.355)
jji = 0
vecp,vecm = [],[]
while jji < len(reses):
if jji + 5 < len(reses):
argm = np.argmax(reses[jji:jji+5])
vecp.append(wavelengths[jji+argm])
vecm.append(reses[jji+argm])
jji += 5
vecp,vecm = np.array(vecp),np.array(vecm)
coef_res = np.polyfit(vecp,vecm,2)
#plot(wavelengths,np.polyval(coef_res,wavelengths))
#plot(vecp,vecm,'o')
c_p2w_ob_B[order] = coeffs_pix2wav
#print order, rms_ms/np.sqrt(float(len(wavelengths))), rms_ms, len(residuals)
p0 = np.zeros( npar_wsol_B )
p0[0] = (int(np.around(0.5*nord_ob1))+or0_B) * Global_ZP
#GLOBALutils.get_zero_order_number(ords,meds)
p1_B, G_pix_B, G_ord_B, G_wav_B, II_B, rms_ms_B, G_res_B = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_B, All_Wavelengths_B, All_Orders_B,\
np.ones(All_Intensities_B.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_B,ntotal=nord_ob1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
nhdu = pyfits.PrimaryHDU(spec_thar_ob)
if os.access(wavsol_fits,os.F_OK):
os.system('rm '+wavsol_fits)
nhdu.writeto(wavsol_fits)
#plot(ords,meds,'ro')
#coefs_m = np.polyfit(ords,meds,6)
#plot(ords, meds - np.polyval(coefs_m,ords),'ro')
#show()
All_Pixel_Centers_co_R = np.array([])
All_Wavelengths_co_R = np.array([])
All_Orders_co_R = np.array([])
All_Centroids_co_R = np.array([])
All_Sigmas_co_R = np.array([])
All_Intensities_co_R = np.array([])
meds,ords = [],[]
for order in range(nord_co2):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_R[order,:]
IV = iv_thar_co_R[order,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'R_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, minlines=minlines_initial,\
FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
c_p2w_co_R[order] = coeffs_pix2wav
meds.append(GLOBALutils.Cheby_eval(coeffs_pix2wav,0.5*len(thar_order),len(thar_order)))
ords.append(order)
if (order == int(np.around(0.5*nord_co2))):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, int(np.around(0.5*len(thar_order))), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_co_R = np.append( All_Pixel_Centers_co_R, pixel_centers )
All_Wavelengths_co_R = np.append( All_Wavelengths_co_R, wavelengths )
All_Orders_co_R = np.append( All_Orders_co_R, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co_R = np.append( All_Centroids_co_R, centroids)
All_Sigmas_co_R = np.append( All_Sigmas_co_R, sigmas)
All_Intensities_co_R = np.append( All_Intensities_co_R, intensities )
#print order, rms_ms/np.sqrt(float(len(wavelengths))), rms_ms, len(residuals)
p0 = np.zeros( npar_wsol_R )
p0[0] = (int(np.around(0.5*nord_co2))+or0_R) * Global_ZP
#GLOBALutils.get_zero_order_number(ords,meds)
p1_co_R, G_pix_co_R, G_ord_co_R, G_wav_co_R, II_co_R, rms_ms_co_R, G_res_co_R = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co_R, All_Wavelengths_co_R, All_Orders_co_R,\
np.ones(All_Intensities_co_R.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
All_Pixel_Centers_co_B = np.array([])
All_Wavelengths_co_B = np.array([])
All_Orders_co_B = np.array([])
All_Centroids_co_B = np.array([])
All_Sigmas_co_B = np.array([])
All_Intensities_co_B = np.array([])
meds,ords = [],[]
for order in range(nord_co1):
order = order + 1
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_B[order-1,:]
IV = iv_thar_co_B[order-1,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals,\
centroids, sigmas, intensities = GLOBALutils.Initial_Wav_Calibration( order_dir+\
'B_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, \
minlines=minlines_initial, FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True,\
Cheby=use_cheby, porder=porder)
c_p2w_co_B[order-1] = coeffs_pix2wav
meds.append(GLOBALutils.Cheby_eval(coeffs_pix2wav,0.5*len(thar_order),len(thar_order)))
ords.append(order)
if (order == int(np.around(0.5*nord_co1))):
if (use_cheby):
Global_ZP = GLOBALutils.Cheby_eval( coeffs_pix2wav, int(np.around(0.5*len(thar_order))), len(thar_order) )
else:
Global_ZP = scipy.polyval( coeffs_pix2wav, 0.0 )
All_Pixel_Centers_co_B = np.append( All_Pixel_Centers_co_B, pixel_centers )
All_Wavelengths_co_B = np.append( All_Wavelengths_co_B, wavelengths )
All_Orders_co_B = np.append( All_Orders_co_B, np.zeros( len(pixel_centers) ) + order )
All_Centroids_co_B = np.append( All_Centroids_co_B, centroids)
All_Sigmas_co_B = np.append( All_Sigmas_co_B, sigmas)
All_Intensities_co_B = np.append( All_Intensities_co_B, intensities )
#print order, rms_ms/np.sqrt(float(len(wavelengths))), rms_ms, len(residuals)
p0 = np.zeros( npar_wsol_B )
p0[0] = (int(np.around(0.5*nord_co1))+or0_B) * Global_ZP
#GLOBALutils.get_zero_order_number(ords,meds)
p1_co_B, G_pix_co_B, G_ord_co_B, G_wav_co_B, II_co_B, rms_ms_co_B, G_res_co_B = \
GLOBALutils.Fit_Global_Wav_Solution(All_Pixel_Centers_co_B, All_Wavelengths_co_B, All_Orders_co_B,\
np.ones(All_Intensities_co_B.shape), p0, Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
#pdict = {'p2w_ob_R':c_p2w_ob_R, 'p2w_ob_B':c_p2w_ob_B, 'p2w_co_R':c_p2w_co_R, 'p2w_co_B':c_p2w_co_B}
pdict = {'p1_R':p1_R,'p1_B':p1_B,'p1_co_R':p1_co_R,'p1_co_B':p1_co_B,\
'G_pix_R':G_pix_R, 'G_pix_B':G_pix_B,'G_pix_co_R':G_pix_co_R, 'G_pix_co_B':G_pix_co_B,\
'G_ord_R':G_ord_R, 'G_ord_B':G_ord_B,'G_ord_co_R':G_ord_co_R, 'G_ord_co_B':G_ord_co_B,\
'G_wav_R':G_wav_R, 'G_wav_B':G_wav_B,'G_wav_co_R':G_wav_co_R, 'G_wav_co_B':G_wav_co_B,\
'II_R':II_R,'II_B':II_B,'II_co_R':II_co_R,'II_co_B':II_co_B,\
'rms_ms_R':rms_ms_R,'rms_ms_B':rms_ms_B,'rms_ms_co_R':rms_ms_co_R,'rms_ms_co_B':rms_ms_co_B,\
'G_res_R':G_res_R, 'G_res_B':G_res_B,'G_res_co_R':G_res_co_R, 'G_res_co_B':G_res_co_B,\
'All_Centroids_R':All_Centroids_R,'All_Centroids_B':All_Centroids_B,\
'All_Centroids_co_R':All_Centroids_co_R,'All_Centroids_co_B':All_Centroids_co_B,\
'All_Orders_R':All_Orders_R,'All_Orders_B':All_Orders_B,\
'All_Orders_co_R':All_Orders_co_R,'All_Orders_co_B':All_Orders_co_B,\
'All_Sigmas_R':All_Sigmas_R,'All_Sigmas_B':All_Sigmas_B,\
'All_Sigmas_co_R':All_Sigmas_co_R,'All_Sigmas_co_B':All_Sigmas_co_B,\
'mjd':mjd,'npix':len(thar_order)}
pickle.dump( pdict, open( wavsol_pkl, 'w' ) )
else:
print "\t\tUsing previously computed wavelength solution in file",wavsol_pkl
xc_fout_f = dirout+'ThAr_XCor+DeltaL.dat'
if ( (os.access(xc_fout_f,os.F_OK) == False) or (force_tharxc)):
xc_fout = open(xc_fout_f,'w')
for i in range(len(sorted_ThAr_dates)):
index = sorted_ThAr_dates[i]
fsim = ThAr_ref[index]
hthar = pyfits.open( fsim )
mjd, mjd0 = harpsutils.mjd_fromheader( hthar )
pdict1 = pickle.load( open(dirout + fsim.split('/')[-1][:-4]+'wavsolpars.pkl','r' ) )
for ii in range(len(sorted_ThAr_dates)):
index2 = sorted_ThAr_dates[ii]
fsim2 = ThAr_ref[index2]
hthar2 = pyfits.open( fsim2 )
mjd2, mjd02 = harpsutils.mjd_fromheader( hthar2 )
pdict2 = pickle.load( open(dirout + fsim2.split('/')[-1][:-4]+'wavsolpars.pkl','r' ) )
p_shift_R, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(pdict2['G_pix_R'], pdict2['G_wav_R'], pdict2['G_ord_R'],\
np.ones(pdict2['G_wav_R'].shape), pdict1['p1_R'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_R,ntotal=nord_ob2,npix=pdict2['npix'],nx=ncoef_x_R,nm=ncoef_m_R)
p_shift_B, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(pdict2['G_pix_B'], pdict2['G_wav_B'], pdict2['G_ord_B'],\
np.ones(pdict2['G_wav_B'].shape), pdict1['p1_B'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_B,ntotal=nord_ob1,npix=pdict2['npix'],nx=ncoef_x_B,nm=ncoef_m_B)
p_shift_co_R, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(pdict2['G_pix_co_R'], pdict2['G_wav_co_R'], pdict2['G_ord_co_R'],\
np.ones(pdict2['G_wav_co_R'].shape), pdict1['p1_co_R'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=pdict2['npix'],nx=ncoef_x_R,nm=ncoef_m_R)
p_shift_co_B, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(pdict2['G_pix_co_B'], pdict2['G_wav_co_B'], pdict2['G_ord_co_B'],\
np.ones(pdict2['G_wav_co_B'].shape), pdict1['p1_co_B'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=pdict2['npix'],nx=ncoef_x_B,nm=ncoef_m_B)
# write out products
line_out = "%20.12f %20.12f %12.6f %12.6f %12.6f %12.6f %s %s\n" % (mjd, mjd2,(1e-6*p_shift_R)*299792458.0,(1e-6*p_shift_B)*299792458.0,(1e-6*p_shift_co_R)*299792458.0,(1e-6*p_shift_co_B)*299792458.0,fsim.split('/')[-1][:-4],fsim2.split('/')[-1][:-4])
xc_fout.write(line_out)
#xc_fout.flush()
xc_fout.close()
### start of science frame reductions ###
new_list = []
new_list_obnames = []
new_list_texp = []
new_list_cotypes = []
for i in range(len(sim_sci)):
fsim = sim_sci[i]
obname = obnames[i]
texp = exptimes[i]
co_type = co_types[i]
if (object2do == 'all'):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
new_list_cotypes.append( co_type )
elif object2do == 'new':
h = pyfits.open(fsim)
fout = 'proc/'+ obname + '_' + h[0].header['DATE-OBS'] + 'sp.fits'
if not os.access(dirout + fout,os.F_OK):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
new_list_cotypes.append( co_type )
else:
if (obname == object2do):
new_list.append(fsim)
new_list_obnames.append( obname )
new_list_texp.append( texp )
new_list_cotypes.append( co_type )
if os.access(dirin + 'moon_corr.txt', os.F_OK):
fmoon = open(dirin + 'moon_corr.txt','r')
moon_lns = fmoon.readlines()
spec_moon = []
use_moon = []
for line in moon_lns:
spec_moon.append(line.split()[0])
if line.split()[1] == '0':
use_moon.append(False)
else:
use_moon.append(True)
else:
spec_moon = []
use_moon = []
spec_moon = np.array(spec_moon)
use_moon = np.array(use_moon)
print '\n\tThe following targets will be processed:\n'
for nlisti in range(len(new_list)):
print '\t'+new_list_obnames[nlisti]
for nlisti in range(len(new_list)):
fsim = new_list[ nlisti ]
obname = new_list_obnames[nlisti]
TEXP = new_list_texp[nlisti]
cotype = new_list_cotypes[nlisti]
know_moon = False
here_moon = False
if fsim.split('/')[-1] in spec_moon:
I = np.where(fsim.split('/')[-1] == spec_moon)[0]
know_moon = True
here_moon = use_moon[I]
h = pyfits.open(fsim)
print '\n'
print "\t--> Working on image: ", fsim
print "\t\tObject name:",obname
mjd,mjd0 = harpsutils.mjd_fromheader(h)
# Open file, trim, overscan subtract and MasterBias subtract
dataB = h[1].data
dataB = harpsutils.OverscanTrim(dataB)
dataB -= MasterBias[:,:,0]
dataR = h[2].data
dataR = harpsutils.OverscanTrim(dataR)
dataR -= MasterBias[:,:,1]
if cotype != 'WAVE':
spanR,spanB = 20,10
c1_temp = c_ob1
c2_temp = c_ob2
else:
spanR,spanB = 10,7
c1_temp = c_all1
c2_temp = c_all2
bacfile = dirout + 'BACR_' + fsim.split('/')[-1][:-4]+'fits'
if os.access(bacfile,os.F_OK) == False:
CentersR = np.zeros((len(c2_temp),dataR.shape[1]))
for i in range(len(c2_temp)):
CentersR[i,:]=np.polyval(c2_temp[i],np.arange(dataR.shape[1]))
bacR = GLOBALutils.get_scat(dataR,CentersR,span=20)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacR )
hdbac.writeto(bacfile)
else:
bacR = pyfits.getdata(bacfile)
bacfile = dirout + 'BACB_' + fsim.split('/')[-1][:-4]+'fits'
if os.access(bacfile,os.F_OK) == False:
CentersB = np.zeros((len(c1_temp),dataB.shape[1]))
for i in range(len(c1_temp)):
CentersB[i,:]=np.polyval(c1_temp[i],np.arange(dataB.shape[1]))
tcen = CentersB[-1]
CentersB = np.vstack((CentersB,tcen+16.))
bacB = GLOBALutils.get_scat(dataB,CentersB,span=10)
if (os.access(bacfile,os.F_OK)):
os.remove( bacfile )
hdbac = pyfits.PrimaryHDU( bacB )
hdbac.writeto(bacfile)
else:
bacB = pyfits.getdata(bacfile)
#plot(dataB[:,100])
#plot(bacB[:,100])
#show()
#print gfd
dataR = dataR - bacR
dataB = dataB - bacB
ron1,gain1 = h[1].header['HIERARCH ESO DET OUT1 RON'],h[1].header['HIERARCH ESO DET OUT1 GAIN']
ron2,gain2 = h[2].header['HIERARCH ESO DET OUT1 RON'],h[2].header['HIERARCH ESO DET OUT1 GAIN']
halfcounts = h[0].header['HIERARCH ESO INS DET1 TMMEAN']
# Find lambda_bary/lambda_topo using baryc
altitude = h[0].header['HIERARCH ESO TEL GEOELEV']
latitude = h[0].header['HIERARCH ESO TEL GEOLAT']
longitude = h[0].header['HIERARCH ESO TEL GEOLON']
ra = h[0].header['RA']
dec = h[0].header['DEC']
epoch = h[0].header['HIERARCH ESO TEL TARG EQUINOX']
ra2,dec2 = GLOBALutils.getcoords(obname,mjd,filen=reffile)
if ra2 !=0 and dec2 != 0:
ra = ra2
dec = dec2
else:
print 'Using the coordinates found in the image header.'
iers = GLOBALutils.JPLiers( baryc_dir, mjd-999.0, mjd+999.0 )
obsradius, R0 = GLOBALutils.JPLR0( latitude, altitude)
obpos = GLOBALutils.obspos( longitude, obsradius, R0 )
jplephem.set_ephemeris_dir( baryc_dir , ephemeris )
jplephem.set_observer_coordinates( obpos[0], obpos[1], obpos[2] )
res = jplephem.doppler_fraction(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
lbary_ltopo = 1.0 + res['frac'][0]
bcvel_baryc = ( lbary_ltopo - 1.0 ) * 2.99792458E5
print "Barycentric velocity:", bcvel_baryc, mjd
res = jplephem.pulse_delay(ra/15.0, dec, int(mjd), mjd%1, 1, 0.0)
mbjd = mjd + res['delay'][0] / (3600.0 * 24.0)
# Moon Phase Calculations
gobs = ephem.Observer()
gobs.name='Eso3.6'
gobs.lat = rad(latitude) # lat/long in decimal degrees
gobs.long = rad(longitude)
DDATE = h[0].header['DATE-OBS'][:10]
HHOUR = h[0].header['DATE-OBS'][11:]
gobs.date = str(DDATE[:4]) + '-' + str(DDATE[5:7]) + '-' + str(DDATE[8:]) + ' ' + HHOUR[:2] + ':' + HHOUR[3:5] +':' + str(float(HHOUR[6:]) + halfcounts * TEXP )
mephem = ephem.Moon()
mephem.compute(gobs)
mephem = ephem.Moon()
mephem.compute(gobs)
Mcoo = jplephem.object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Mp = jplephem.barycentric_object_track("Moon", int(mjd), float(mjd%1), 1, 0.0)
Sp = jplephem.barycentric_object_track("Sun", int(mjd), float(mjd%1), 1, 0.0)
res = jplephem.object_doppler("Moon", int(mjd), mjd%1, 1, 0.0)
lunation,moon_state,moonsep,moonvel = GLOBALutils.get_lunar_props(ephem,gobs,Mcoo,Mp,Sp,res,ra,dec)
refvel = bcvel_baryc + moonvel
print '\t\tRadial Velocity of sacttered moonlight:',refvel
sorted_indices = np.argsort( np.abs( np.array(ThAr_ref_dates) - mjd ) )
# optimally and simply extract spectra
sci_fits_ob_B = dirout + fsim.split('/')[-1][:-4]+'spec.ob.B.fits.S'
sci_fits_co_B = dirout + fsim.split('/')[-1][:-4]+'spec.co.B.fits.S'
sci_fits_ob_R = dirout + fsim.split('/')[-1][:-4]+'spec.ob.R.fits.S'
sci_fits_co_R = dirout + fsim.split('/')[-1][:-4]+'spec.co.R.fits.S'
#force_sci_extract = True
if ( os.access(sci_fits_ob_B,os.F_OK) == False ) or ( os.access(sci_fits_co_B,os.F_OK) == False ) \
or ( os.access(sci_fits_ob_R,os.F_OK) == False ) or ( os.access(sci_fits_co_R,os.F_OK) == False ) \
or (force_sci_extract):
print "No previous extraction or extraction forced for science file", fsim, "extracting..."
sci_S_ob_B = np.zeros( (nord_ob1,3,dataB.shape[1]) )
sci_S_co_B = np.zeros( (nord_co1,3,dataB.shape[1]) )
sci_S_ob_R = np.zeros( (nord_ob2,3,dataR.shape[1]) )
sci_S_co_R = np.zeros( (nord_co2,3,dataR.shape[1]) )
tpars1,tpars2 = [],[]
sci_S_ob_B = GLOBALutils.optimal_extraction(dataB,P_ob_B,c_ob1,ext_aperture_B,\
ron1,gain1,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_co_B = GLOBALutils.optimal_extraction(dataB,P_co_B,c_co1,ext_aperture_B,\
ron1,gain1,S_Marsh,100.*NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_ob_R = GLOBALutils.optimal_extraction(dataR,P_ob_R,c_ob2,ext_aperture_R,\
ron2,gain2,S_Marsh,NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_co_R = GLOBALutils.optimal_extraction(dataR,P_co_R,c_co2,ext_aperture_R,\
ron2,gain2,S_Marsh,100.*NCosmic_Marsh,\
min_extract_col,max_extract_col,npools)
sci_S_ob_B = GLOBALutils.invert(sci_S_ob_B)
sci_S_co_B = GLOBALutils.invert(sci_S_co_B)
sci_S_ob_R = GLOBALutils.invert(sci_S_ob_R)
sci_S_co_R = GLOBALutils.invert(sci_S_co_R)
sci_S_ob_B,sci_S_co_B,sci_S_ob_R,sci_S_co_R = sci_S_ob_B[::-1],sci_S_co_B[::-1],sci_S_ob_R[::-1],sci_S_co_R[::-1]
# save as fits file
if (os.access(sci_fits_ob_B,os.F_OK)):
os.remove( sci_fits_ob_B )
if (os.access(sci_fits_co_B,os.F_OK)):
os.remove( sci_fits_co_B )
if (os.access(sci_fits_ob_R,os.F_OK)):
os.remove( sci_fits_ob_R )
if (os.access(sci_fits_co_R,os.F_OK)):
os.remove( sci_fits_co_R )
hdu = pyfits.PrimaryHDU( sci_S_ob_B )
hdu.writeto( sci_fits_ob_B )
hdu = pyfits.PrimaryHDU( sci_S_co_B )
hdu.writeto( sci_fits_co_B )
hdu = pyfits.PrimaryHDU( sci_S_ob_R )
hdu.writeto( sci_fits_ob_R )
hdu = pyfits.PrimaryHDU( sci_S_co_R )
hdu.writeto( sci_fits_co_R )
else:
print fsim, "has already been extracted, reading in product fits files..."
sci_S_ob_B = pyfits.getdata( sci_fits_ob_B )
sci_S_co_B = pyfits.getdata( sci_fits_co_B )
sci_S_ob_R = pyfits.getdata( sci_fits_ob_R )
sci_S_co_R = pyfits.getdata( sci_fits_co_R )
fout = 'proc/'+ obname + '_' + h[0].header['DATE-OBS'] + 'sp.fits'
dateobs = h[0].header['DATE-OBS'][:4] + h[0].header['DATE-OBS'][5:7] + h[0].header['DATE-OBS'][8:9]
#Build spectra
if ( os.access(dirout+fout ,os.F_OK) == False ) or (force_spectral_file_build):
# initialize file that will have the spectra
spec = np.zeros((11, nord_ob2 + nord_ob1, dataB.shape[1]))
hdu = pyfits.PrimaryHDU( spec )
hdu = GLOBALutils.update_header(hdu,'HIERARCH MJD', mjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH MBJD', mbjd)
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START DATE', dateobs )
hdu = GLOBALutils.update_header(hdu,'HIERARCH SHUTTER START UT', h[0].header['UTC'] / 3600.)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEXP (S)',h[0].header['EXPTIME'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH FLUX WEIGHTED MEAN F ',h[0].header['HIERARCH ESO INS DET1 TMMEAN'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH BARYCENTRIC CORRECTION (KM/S)', bcvel_baryc)
hdu = GLOBALutils.update_header(hdu,'HIERARCH (LAMBDA_BARY / LAMBDA_TOPO)', lbary_ltopo)
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARGET NAME', obname)
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA',h[0].header['RA'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC',h[0].header['DEC'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH RA BARY',ra)
hdu = GLOBALutils.update_header(hdu,'HIERARCH DEC BARY',dec)
hdu = GLOBALutils.update_header(hdu,'HIERARCH EQUINOX',h[0].header['HIERARCH ESO TEL TARG EQUINOX'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LATITUDE',h[0].header['HIERARCH ESO TEL GEOLAT'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS LONGITUDE',h[0].header['HIERARCH ESO TEL GEOLON'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH OBS ALTITUDE',h[0].header['HIERARCH ESO TEL GEOELEV'])
hdu = GLOBALutils.update_header(hdu,'HIERARCH TARG AIRMASS',h[0].header['HIERARCH ESO TEL AIRM START'])
print '\t\tWavelength calibration:'
print '\t\t\tComparision fibre is '+ cotype
indice = sorted_indices[0]
thar_fits_ob_R = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.ob.R.fits.S'
thar_fits_co_R = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.co.R.fits.S'
thar_fits_ob_B = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.ob.B.fits.S'
thar_fits_co_B = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'spec.co.B.fits.S'
pkl_wsol = dirout + ThAr_ref[indice].split('/')[-1][:-4]+'wavsolpars.pkl'
print "\t\t\tUnpickling wavelength solution from", pkl_wsol, " ..."
wsol_dict = pickle.load(open(pkl_wsol,'r'))
#cotype = 'SKY'
if cotype == 'WAVE':
# Extract thAr lines from comparison orders
lines_thar_co_R = sci_S_co_R[:,1,:]
iv_thar_co_R = sci_S_co_R[:,2,:]
lines_thar_co_B = sci_S_co_B[:,1,:]
iv_thar_co_B = sci_S_co_B[:,2,:]
Red_Pixel_Centers_co = np.array([])
Red_Wavelengths_co = np.array([])
Red_Orders_co = np.array([])
Red_Centroids_co = np.array([])
Red_Sigmas_co = np.array([])
Red_Intensities_co = np.array([])
for order in range(nord_co2):
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_R[order,:]
IV = iv_thar_co_R[order,:]
wei = np.sqrt( IV )
#bkg = CoralieUtils.Lines_mBack(thar_order_orig, IV, thres_rel=3)
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'R_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, minlines=minlines_initial, \
FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
Red_Pixel_Centers_co = np.append( Red_Pixel_Centers_co, pixel_centers )
Red_Wavelengths_co = np.append( Red_Wavelengths_co, wavelengths )
Red_Orders_co = np.append( Red_Orders_co, np.zeros( len(pixel_centers) ) + order )
Red_Centroids_co = np.append( Red_Centroids_co, centroids)
Red_Sigmas_co = np.append( Red_Sigmas_co, sigmas)
Red_Intensities_co = np.append( Red_Intensities_co, intensities )
p1_co_R, G_pix_co_R, G_ord_co_R, G_wav_co_R, II_co_R, rms_ms_co_R, G_res_co_R = \
GLOBALutils.Fit_Global_Wav_Solution(Red_Pixel_Centers_co, Red_Wavelengths_co, Red_Orders_co,\
np.ones(Red_Intensities_co.shape), wsol_dict['p1_co_R'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
p_shift_co_R, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co_R, G_wav_co_R, G_ord_co_R,\
np.ones(G_wav_co_R.shape), wsol_dict['p1_co_R'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_R,ntotal=nord_co2,npix=len(thar_order),nx=ncoef_x_R,nm=ncoef_m_R)
weight_R = (np.sqrt(len(orders)) / rms_ms)**2
Blue_Pixel_Centers_co = np.array([])
Blue_Wavelengths_co = np.array([])
Blue_Orders_co = np.array([])
Blue_Centroids_co = np.array([])
Blue_Sigmas_co = np.array([])
Blue_Intensities_co = np.array([])
for order in range(nord_co1):
order = order + 1
order_s = str(order)
if (order < 10):
order_s = '0'+str(order)
thar_order_orig = lines_thar_co_B[order-1,:]
IV = iv_thar_co_B[order-1,:]
wei = np.sqrt( IV )
thar_order = thar_order_orig #- bkg
coeffs_pix2wav, coeffs_pix2sigma, pixel_centers, wavelengths, rms_ms, residuals, centroids, sigmas, intensities \
= GLOBALutils.Initial_Wav_Calibration( order_dir+'B_order_'+order_s+final_wav, thar_order, order, wei, rmsmax=MRMS_initial, minlines=50, \
FixEnds=True,Dump_Argon=dumpargon, Dump_AllLines=True, Cheby=use_cheby, porder=porder)
Blue_Pixel_Centers_co = np.append( Blue_Pixel_Centers_co, pixel_centers )
Blue_Wavelengths_co = np.append( Blue_Wavelengths_co, wavelengths )
Blue_Orders_co = np.append( Blue_Orders_co, np.zeros( len(pixel_centers) ) + order )
Blue_Centroids_co = np.append( Blue_Centroids_co, centroids)
Blue_Sigmas_co = np.append( Blue_Sigmas_co, sigmas)
Blue_Intensities_co = np.append( Blue_Intensities_co, intensities )
p1_co_B, G_pix_co_B, G_ord_co_B, G_wav_co_B, II_co_B, rms_ms_co_B, G_res_co_B = \
GLOBALutils.Fit_Global_Wav_Solution(Blue_Pixel_Centers_co, Blue_Wavelengths_co, Blue_Orders_co,\
np.ones(Blue_Intensities_co.shape), wsol_dict['p1_co_B'], Cheby=use_cheby,\
maxrms=MRMS, Inv=Inverse_m, minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
p_shift_co_B, pix_centers, orders, wavelengths, I, rms_ms, residuals = \
GLOBALutils.Global_Wav_Solution_vel_shift(G_pix_co_B, G_wav_co_B, G_ord_co_B,\
np.ones(G_wav_co_B.shape), wsol_dict['p1_co_B'],\
Cheby=True,Inv=True,maxrms=MRMS,minlines=minlines_glob,\
order0=or0_B,ntotal=nord_co1,npix=len(thar_order),nx=ncoef_x_B,nm=ncoef_m_B)
weight_B = (np.sqrt(len(orders)) / rms_ms)**2
shift = (p_shift_co_R[0]*weight_R + p_shift_co_B[0]*weight_B) / weight_R + weight_B
print p_shift_co_R,p_shift_co_B,shift
else:
p_shift_co_R = [0.]
p_shift_co_B = [0.]
p_shift = 0.
shift = 0.
good_quality = True
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT_R',p_shift_co_R[0])
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT_B',p_shift_co_B[0])
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT',shift)
hdu = GLOBALutils.update_header(hdu,'HIERARCH THAR SHIFT APPLIED',dosim)
# Apply new wavelength solution including barycentric correction
equis = np.arange( dataB.shape[1] )
for order in range(nord_ob2):
m = order + or0_R
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=or0_R,ntotal=nord_ob2,npix=len(equis),nx=ncoef_x_R,nm=ncoef_m_R)
if dosim:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_R'],chebs,ncoef_x_R,ncoef_m_R)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_R'],chebs,ncoef_x_R,ncoef_m_R)
spec[0,order,:] = GLOBALutils.ToVacuum(WavSol)
spec[1,order,:] = sci_S_ob_R[order,1, :]
spec[2,order,:] = sci_S_ob_R[order,2, :]
fn = R_flat_ob[order,1,:]
L = np.where( fn > 0 )[0]
spec[3,order,:][L] = sci_S_ob_R[order,1,:][L] / R_flat_ob[order,1,:][L]
spec[4,order,:][L] = sci_S_ob_R[order,2,:][L] * ( R_flat_ob[order,1,:][L] ** 2 )
for order in range(nord_ob1):
m = order + or0_B
chebs = GLOBALutils.Calculate_chebs(equis, m, Inverse=Inverse_m,order0=or0_B,ntotal=nord_ob1,npix=len(equis),nx=ncoef_x_B,nm=ncoef_m_B)
if dosim:
WavSol = lbary_ltopo * (1.0 + 1.0e-6*shift) * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_B'],chebs,ncoef_x_B,ncoef_m_B)
else:
WavSol = lbary_ltopo * (1.0/m) * GLOBALutils.Joint_Polynomial_Cheby(wsol_dict['p1_B'],chebs,ncoef_x_B,ncoef_m_B)
spec[0,order + nord_ob2,:] = GLOBALutils.ToVacuum(WavSol)
spec[1,order + nord_ob2,:] = sci_S_ob_B[order,1, :]
spec[2,order + nord_ob2,:] = sci_S_ob_B[order,2, :]
fn = B_flat_ob[order,1,:]
L = np.where( fn > 0 )[0]
spec[3,order + nord_ob2,:][L] = sci_S_ob_B[order,1,:][L] / B_flat_ob[order,1,:][L]
spec[4,order + nord_ob2,:][L] = sci_S_ob_B[order,2,:][L] * ( B_flat_ob[order,1,:][L] ** 2 )
ccoefs = GLOBALutils.get_cont(spec[0],spec[3])
for order in range(nord_ob2):
fn = R_flat_ob_n[order,1,:]
L = np.where( (spec[1,order,:] != 0) & (fn > 0) )[0]
spec[5,order,:][L] = spec[3,order,:][L] / np.polyval(ccoefs[order],spec[0,order,:][L])
ratio = np.polyval(ccoefs[order],spec[0,order,:][L])*Rnorms[order]
spec[3,order,:][L] = sci_S_ob_R[order,1,:][L] / R_flat_ob_n[order,1,:][L]
spec[4,order,:][L] = sci_S_ob_R[order,2,:][L] * ( R_flat_ob_n[order,1,:][L] ** 2 )
spec[6,order,:][L] = spec[4,order,:][L] * (ratio ** 2 )
spec[7,order,:][L] = ratio
spec[8,order,:][L] = ratio * R_flat_ob_n[order,1,:][L] / np.sqrt( ratio * R_flat_ob_n[order,1,:][L] / gain2 + (ron2/gain2)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
LL = np.where(spec[5,order,:] > 1 + 10. / scipy.signal.medfilt(spec[8,order,:],21))[0]
spec[5,order,LL] = 1.
spec[9,order,:][L] = spec[5,order,:][L] * (dlambda_dx[L] ** 1)
spec[10,order,:][L] = spec[6,order,:][L] / (dlambda_dx[L] ** 2)
for order in range(nord_ob1):
fn = B_flat_ob_n[order,1,:]
L = np.where( (spec[1,order + nord_ob2,:] != 0) & (fn > 0) )[0]
spec[5,order + nord_ob2,:][L] = spec[3,order + nord_ob2,:][L] / np.polyval(ccoefs[order + nord_ob2],spec[0,order + nord_ob2,:][L])
ratio = np.polyval(ccoefs[order + nord_ob2],spec[0,order + nord_ob2,:][L])*Bnorms[order]
#L = np.where( fn > 0 )
spec[3,order + nord_ob2,:][L] = sci_S_ob_B[order,1,:][L] / B_flat_ob_n[order,1,:][L]
spec[4,order + nord_ob2,:][L] = sci_S_ob_B[order,2,:][L] * ( B_flat_ob_n[order,1,:][L] ** 2 )
spec[6,order + nord_ob2,:][L] = spec[4,order + nord_ob2,:][L] * (ratio ** 2 )
spec[7,order + nord_ob2,:][L] = ratio
spec[8,order + nord_ob2,:][L] = ratio * B_flat_ob_n[order,1,:][L] / np.sqrt( ratio * B_flat_ob_n[order,1,:][L] / gain1 + (ron1/gain1)**2 )
spl = scipy.interpolate.splrep(np.arange(WavSol.shape[0]), WavSol,k=3)
dlambda_dx = scipy.interpolate.splev(np.arange(WavSol.shape[0]), spl, der=1)
NN = np.average(dlambda_dx)
dlambda_dx /= NN
LL = np.where(spec[5,order + nord_ob2,:] > 1 + 20./scipy.signal.medfilt(spec[8,order + nord_ob2,:],21))[0]
spec[5,order + nord_ob2,LL] = 1.
spec[9,order + nord_ob2,:][L] = spec[5,order + nord_ob2,:][L] * (dlambda_dx[L] ** 1)
spec[10,order + nord_ob2,:][L] = spec[6,order + nord_ob2,:][L] / (dlambda_dx[L] ** 2)
JustExtract = False
if (not JustExtract):
if DoClass:
print '\t\tSpectral Analysis:'
query_success,sp_type_query = False,'None'
# spectral analysis
#query_success,sp_type_query = GLOBALutils.simbad_query_obname(obname)
# Now, query SIMBAD by coordinates if above not successful
#if (not query_success):
# query_success,sp_type_query = GLOBALutils.simbad_query_coords('12:00:00','00:00:00')
#print "\t\t\tSpectral type returned by SIMBAD query:",sp_type_query
hdu = GLOBALutils.update_header(hdu,'HIERARCH SIMBAD SPTYP', sp_type_query)
pars_file = dirout + fsim.split('/')[-1][:-4]+'_stellar_pars.txt'
if os.access(pars_file,os.F_OK) == False or force_stellar_pars:
print "\t\t\tEstimating atmospheric parameters:"
Rx = np.around(1./np.sqrt(1./40000.**2 - 1./RESI**2))
spec2 = spec.copy()
for i in range(spec.shape[1]):
IJ = np.where(spec[5,i]!=0.)[0]
spec2[5,i,IJ] = GLOBALutils.convolve(spec[0,i,IJ],spec[5,i,IJ],Rx)
T_eff, logg, Z, vsini, vel0, ccf = correlation.CCF(spec2,model_path=models_path,npools=npools)
line = "%6d %4.1f %4.1f %8.1f %8.1f\n" % (T_eff,logg, Z, vsini, vel0)
f = open(pars_file,'w')
f.write(line)
f.close()
else:
print "\t\t\tAtmospheric parameters loaded from file:"
T_eff, logg, Z, vsini, vel0 = np.loadtxt(pars_file,unpack=True)
print "\t\t\t\tT_eff=",T_eff,"log(g)=",logg,"Z=",Z,"vsin(i)=",vsini,"vel0",vel0
else:
T_eff, logg, Z, vsini, vel0 = -999,-999,-999,-999,-999
# store the parameters measured for this epoch
T_eff_epoch = T_eff
logg_epoch = logg
Z_epoch = Z
vsini_epoch = vsini
vel0_epoch = vel0
hdu = GLOBALutils.update_header(hdu,'HIERARCH TEFF', float(T_eff))
hdu = GLOBALutils.update_header(hdu,'HIERARCH LOGG', float(logg))
hdu = GLOBALutils.update_header(hdu,'HIERARCH Z', Z)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VSINI', vsini)
hdu = GLOBALutils.update_header(hdu,'HIERARCH VEL0', vel0)
print "\t\tRadial Velocity analysis:"
# assign mask
sp_type, mask = GLOBALutils.get_mask_reffile(obname,reffile=reffile,base='../data/xc_masks/')
print "\t\t\tWill use",sp_type,"mask for CCF."
# Read in mask
ml, mh, weight = np.loadtxt(mask,unpack=True)
ml_v = GLOBALutils.ToVacuum( ml )
mh_v = GLOBALutils.ToVacuum( mh )
av_m = 0.5*( ml_v + mh_v )
mask_hw_kms = (GLOBALutils.Constants.c/1e3) * 0.5*(mh_v - ml_v) / av_m
disp = GLOBALutils.get_disp(obname, reffile=reffile)
if disp == 0:
known_sigma = False
if vsini != -999 and vsini != 0.:
disp = vsini
else:
disp = 3.
else:
known_sigma = True
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
print '\t\t\tComputing the CCF...'
cond = True
if sp_type == 'M5':
moon_sig = 4.5
elif sp_type == 'K5':
moon_sig = 4.2
else:
moon_sig = 4.0
while (cond):
# first rough correlation to find the minimum
vels, xc_full, sn, nlines_ccf, W_ccf = \
GLOBALutils.XCor(spec, ml_v, mh_v, weight,\
0, lbary_ltopo, vel_width=300, vel_step=3,\
spec_order=9, iv_order=10, sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
# Normalize the continuum of the CCF robustly with lowess
yy = scipy.signal.medfilt(xc_av,11)
pred = lowess(yy, vels,frac=0.4,it=10,return_sorted=False)
tck1 = scipy.interpolate.splrep(vels,pred,k=1)
xc_av_orig = xc_av.copy()
xc_av /= pred
vel0_xc = vels[ np.argmin( xc_av ) ]
rvels, rxc_av, rpred, rxc_av_orig, rvel0_xc = \
vels.copy(), xc_av.copy(), pred.copy(),\
xc_av_orig.copy(), vel0_xc
xc_av_rough = xc_av
vels_rough = vels
vel_width = np.maximum( 20.0, 6*disp )
vels, xc_full, sn, nlines_ccf, W_ccf =\
GLOBALutils.XCor(spec, ml_v, mh_v, weight,\
vel0_xc, lbary_ltopo, vel_width=vel_width,\
vel_step=0.1, spec_order=9, iv_order=10, sn_order=8,max_vel_rough=300)
xc_av = GLOBALutils.Average_CCF(xc_full, sn, sn_min=3.0, Simple=True, W=W_ccf)
pred = scipy.interpolate.splev(vels,tck1)
xc_av /= pred
p1,XCmodel,p1gau,XCmodelgau,Ls2 = \
GLOBALutils.XC_Final_Fit( vels, xc_av, sigma_res = 4,\
horder=8, moonv=refvel, moons=moon_sig, moon=False)
moonmatters = False
if (know_moon and here_moon):
moonmatters = True
ismoon = True
confused = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = GLOBALutils.XC_Final_Fit( vels, xc_av, \
sigma_res = 4, horder=8, moonv = refvel, moons = moon_sig, moon = True)
moon_flag = 1
else:
confused = False
ismoon = False
p1_m,XCmodel_m,p1gau_m,XCmodelgau_m,Ls2_m = p1,XCmodel,p1gau,XCmodelgau,Ls2
moon_flag = 0
SP = GLOBALutils.calc_bss2(vels,xc_av,p1gau)
#SP = bspan[0]
if (not known_sigma):
disp = np.floor(p1gau[2])
if (disp < 3.0):
disp = 3.0
mask_hw_wide = av_m * disp / (GLOBALutils.Constants.c/1.0e3)
ml_v = av_m - mask_hw_wide
mh_v = av_m + mask_hw_wide
known_sigma = True
else:
cond = False
xc_dict = {'vels':vels,'xc_av':xc_av,'XCmodelgau':XCmodelgau,'Ls2':Ls2,'refvel':refvel,\
'rvels':rvels,'rxc_av':rxc_av,'rpred':rpred,'rxc_av_orig':rxc_av_orig,\
'rvel0_xc':rvel0_xc,'xc_full':xc_full, 'p1':p1, 'sn':sn, 'p1gau':p1gau,\
'p1_m':p1_m,'XCmodel_m':XCmodel_m,'p1gau_m':p1gau_m,'Ls2_m':Ls2_m,\
'XCmodelgau_m':XCmodelgau_m}
moon_dict = {'moonmatters':moonmatters,'moon_state':moon_state,'moonsep':moonsep,\
'lunation':lunation,'mephem':mephem,'texp':h[0].header['EXPTIME']}
pkl_xc = dirout + fsim.split('/')[-1][:-4]+obname+'_XC_'+sp_type+'.pkl'
pickle.dump( xc_dict, open( pkl_xc, 'w' ) )
ccf_pdf = dirout + 'proc/' + fsim.split('/')[-1][:-4] + obname + '_XCs_' + sp_type + '.pdf'
if not avoid_plot:
GLOBALutils.plot_CCF(xc_dict,moon_dict,path=ccf_pdf)
SNR_5130 = np.median(spec[8,28,1900:2101] )
airmass = h[0].header['HIERARCH ESO TEL AIRM START']
seeing = h[0].header['HIERARCH ESO TEL AMBI FWHM START']
B,A = -0.00257864,0.07765779
RVerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
depth_fact = 1. + p1gau[0]/(p1gau[2]*np.sqrt(2*np.pi))
if depth_fact < 0.6:
depth_fact = 0.6
depth_fact = (1 - 0.6) / (1 - depth_fact)
RVerr *= depth_fact
if RVerr < 0.002:
RVerr = .002
B,A = -0.00348879, 0.10220848
BSerr = B + ( 1.6 + 0.2 * p1gau[2] ) * A / np.round(SNR_5130)
if BSerr<0.002:
BSerr = .002
RV = np.around(p1gau_m[1],4)
BS = np.around(SP,4)
RVerr2 = np.around(RVerr,4)
BSerr = np.around(BSerr,4)
print '\t\t\tRV = '+str(RV)+' +- '+str(RVerr2)
print '\t\t\tBS = '+str(BS)+' +- '+str(BSerr)
bjd_out = 2400000.5 + mbjd
T_eff_err = 100
logg_err = 0.5
Z_err = 0.5
vsini_err = 2
XC_min = np.abs(np.around(np.min(XCmodel),2))
SNR_5130 = np.around(SNR_5130)
SNR_5130_R = np.around(SNR_5130*np.sqrt(3.0))
disp_epoch = np.around(p1gau_m[2],1)
hdu = GLOBALutils.update_header(hdu,'RV', RV)
hdu = GLOBALutils.update_header(hdu,'RV_E', RVerr2)
hdu = GLOBALutils.update_header(hdu,'BS', BS)
hdu = GLOBALutils.update_header(hdu,'BS_E', BSerr)
hdu = GLOBALutils.update_header(hdu,'DISP', disp_epoch)
hdu = GLOBALutils.update_header(hdu,'SNR', SNR_5130)
hdu = GLOBALutils.update_header(hdu,'SNR_R', SNR_5130_R)
hdu = GLOBALutils.update_header(hdu,'INST', 'HARPS')
hdu = GLOBALutils.update_header(hdu,'RESOL', RESI)
hdu = GLOBALutils.update_header(hdu,'PIPELINE', 'CERES')
hdu = GLOBALutils.update_header(hdu,'XC_MIN', XC_min)
hdu = GLOBALutils.update_header(hdu,'BJD_OUT', bjd_out)
line_out = "%-15s %18.8f %9.4f %7.4f %9.3f %5.3f harps ceres %8d %6d %5.2f %5.2f %5.1f %4.2f %5.2f %6.1f %4d %s\n"%\
(obname, bjd_out, RV, RVerr2, BS, BSerr, RESI, T_eff_epoch, logg_epoch, Z_epoch, vsini_epoch, XC_min, disp_epoch,\
TEXP, SNR_5130_R, ccf_pdf)
f_res.write(line_out)
if (os.access( dirout + fout,os.F_OK)):
os.remove( dirout + fout)
hdu.writeto( dirout + fout )
else:
print "\t\tReading spectral file from", fout
spec = pyfits.getdata( fout )
f_res.close()
|
1642849
|
import dominate.tags as dt
import dominate.util as du
from io import BytesIO
from IPython.display import HTML, display
from ipywidgets import Box, Output
from nbconvert.filters.pandoc import convert_pandoc
from ..image import Image
_p = print
def _html(text, color=""):
"""print in html"""
text = convert_pandoc(text, "markdown+tex_math_double_backslash", "html")
if color:
d = dt.div()
d.attributes["style"] = "color: {};".format(color)
d.appendChild(du.raw(text))
else:
d = du.raw(text)
return HTML(str(d))
def print(text, **kwargs):
"""wrapper around printing"""
if not isinstance(text, str):
display(text)
return
display(_html(text, **kwargs))
def hr():
"""horizontal rule"""
return HTML(str(dt.hr()))
def newpage():
"""make a new page. in html, this just does a horizontal rule"""
p = dt.p()
p.attributes["style"] = "page-break-before: always;"
return _html(str(p))
def table(df, title="", footnote=""):
"""helper to display a table"""
ret = ""
if title:
ret += "### {}\n".format(title)
ret += df.to_html()
if footnote:
ret += "\n" + footnote + "\n"
return _html(ret)
def pagenum():
"""display a page number (latex only)"""
# TODO
return "[pagenum]"
def _make(text, h_type, **kwargs):
h = getattr(dt, h_type)(text)
h.attributes.update(**kwargs)
return h
def p(text, **kwargs):
return HTML(str(_make(text, "p", **kwargs)))
def h1(text, **kwargs):
return HTML(str(_make(text, "h1", **kwargs)))
def h2(text, **kwargs):
return HTML(str(_make(text, "h2", **kwargs)))
def h3(text, **kwargs):
return HTML(str(_make(text, "h3", **kwargs)))
def h4(text, **kwargs):
return HTML(str(_make(text, "h4", **kwargs)))
def h5(text, **kwargs):
return HTML(str(_make(text, "h5", **kwargs)))
def _grid(items_and_weights):
d = dt.div()
d.attributes["style"] = "display: flex; flex-direction: row;"
for val, width in items_and_weights:
if isinstance(val, Image):
sd = dt.img()
sd.set_attribute("src", "data:image/png;base64,{}".format(val._repr_png_()))
else:
raw_html = val._repr_html_()
sd = dt.div(du.raw(raw_html))
sd.attributes["style"] = "flex: {};".format(width)
d.appendChild(sd)
return HTML(str(d))
def grid(items_and_weights):
children = []
for val, width in items_and_weights:
out = Output(layout={"flex": str(width)})
children.append(out)
with out:
print(val)
return Box(children, layout={"display": "flex", "flex-direction": "row"})
def plot(fig):
imgdata = BytesIO()
fig.savefig(imgdata)
imgdata.seek(0)
return Image(imgdata.read())
|
1642934
|
import unittest
import numpy as np
import numpy.testing as npt
from scipy.linalg import toeplitz
from doatools.model.array_elements import CustomNonisotropicSensor
from doatools.model.perturbations import LocationErrors, GainErrors, \
PhaseErrors, MutualCoupling
from doatools.model.arrays import GridBasedArrayDesign
from doatools.model.arrays import UniformLinearArray, CoPrimeArray, \
NestedArray, MinimumRedundancyLinearArray, \
UniformCircularArray, UniformRectangularArray
from doatools.model.sources import FarField1DSourcePlacement
class Test1DArrayDesigns(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_ula(self):
d0 = 2.
custom_name = 'TestULA'
ula = UniformLinearArray(6, d0, custom_name)
self.assertEqual(ula.size, 6)
self.assertEqual(ula.ndim, 1)
self.assertEqual(ula.name, custom_name)
npt.assert_allclose(ula.d0, np.array([d0]))
npt.assert_allclose(ula.bases, np.array([[d0]]))
npt.assert_array_equal(
ula.element_indices,
np.array([0, 1, 2, 3, 4, 5]).reshape((-1, 1))
)
npt.assert_array_equal(
ula.element_locations,
np.array([0., 2., 4., 6., 8., 10.]).reshape((-1, 1))
)
def test_nested(self):
d0 = 1.
nea = NestedArray(4, 3, d0)
self.assertEqual(nea.n1, 4)
self.assertEqual(nea.n2, 3)
self.assertEqual(nea.size, 7)
self.assertEqual(nea.ndim, 1)
npt.assert_allclose(nea.d0, np.array([d0]))
npt.assert_allclose(nea.bases, np.array([[d0]]))
npt.assert_array_equal(
nea.element_indices,
np.array([0, 1, 2, 3, 4, 9, 14]).reshape((-1, 1))
)
npt.assert_array_equal(
nea.element_locations,
np.array([0., 1., 2., 3., 4., 9., 14.]).reshape((-1, 1))
)
def test_coprime(self):
d0 = self.wavelength / 2
# M
cpa1 = CoPrimeArray(3, 5, d0, 'm')
self.assertEqual(cpa1.coprime_pair, (3, 5))
self.assertEqual(cpa1.mode, 'm')
self.assertEqual(cpa1.size, 7)
self.assertEqual(cpa1.ndim, 1)
npt.assert_array_equal(cpa1.d0, np.array([d0]))
npt.assert_array_equal(cpa1.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa1.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10]).reshape((-1, 1))
)
npt.assert_allclose(
cpa1.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5.]).reshape((-1, 1))
)
# 2M
cpa2 = CoPrimeArray(3, 5, d0, '2m')
self.assertEqual(cpa2.coprime_pair, (3, 5))
self.assertEqual(cpa2.mode, '2m')
self.assertEqual(cpa2.size, 10)
self.assertEqual(cpa2.ndim, 1)
npt.assert_array_equal(cpa2.d0, np.array([d0]))
npt.assert_array_equal(cpa2.bases, np.array([[d0]]))
npt.assert_array_equal(
cpa2.element_indices,
np.array([0, 3, 6, 9, 12, 5, 10, 15, 20, 25]).reshape((-1, 1))
)
npt.assert_allclose(
cpa2.element_locations,
np.array([0., 1.5, 3., 4.5, 6., 2.5, 5., 7.5, 10., 12.5]).reshape((-1, 1))
)
def test_mra(self):
custom_name = 'TestMRA'
d0 = self.wavelength / 2
mra = MinimumRedundancyLinearArray(5, d0, custom_name)
self.assertEqual(mra.size, 5)
self.assertEqual(mra.ndim, 1)
npt.assert_array_equal(mra.d0, np.array([d0]))
npt.assert_array_equal(mra.bases, np.array([[d0]]))
npt.assert_array_equal(
mra.element_indices,
np.array([0, 1, 4, 7, 9]).reshape((-1, 1))
)
npt.assert_allclose(
mra.element_locations,
np.array([0.0, 0.5, 2.0, 3.5, 4.5]).reshape((-1, 1))
)
class Test2DArrayDesigns(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_uca(self):
custom_name = 'TestUCA'
n = 4
r = 2.0
uca = UniformCircularArray(n, r, custom_name)
self.assertEqual(uca.size, n)
self.assertEqual(uca.ndim, 2)
self.assertEqual(uca.name, custom_name)
self.assertEqual(uca.radius, r)
locations_expected = np.array([
[2., 0.], [0., 2.], [-2., 0.], [0., -2.]
])
npt.assert_allclose(uca.element_locations, locations_expected, atol=1e-8)
def test_ura(self):
custom_name = 'TestURA'
m, n = 3, 4
indices_expected = np.array([
[0, 0], [0, 1], [0, 2], [0, 3],
[1, 0], [1, 1], [1, 2], [1, 3],
[2, 0], [2, 1], [2, 2], [2, 3]
])
# Square cells
d0 = self.wavelength / 2
ura1 = UniformRectangularArray(m, n, d0, custom_name)
self.assertEqual(ura1.size, m * n)
self.assertEqual(ura1.ndim, 2)
self.assertEqual(ura1.name, custom_name)
self.assertEqual(ura1.shape, (m, n))
npt.assert_allclose(ura1.d0, np.array([d0, d0]))
npt.assert_allclose(ura1.bases, np.eye(2) * d0)
npt.assert_array_equal(ura1.element_indices, indices_expected)
npt.assert_allclose(ura1.element_locations, indices_expected * d0)
# Rectangular cells
d0 = (self.wavelength / 2, self.wavelength / 3)
ura2 = UniformRectangularArray(m, n, d0, custom_name)
self.assertEqual(ura2.size, m * n)
self.assertEqual(ura2.ndim, 2)
self.assertEqual(ura2.name, custom_name)
self.assertEqual(ura2.shape, (m, n))
npt.assert_allclose(ura2.d0, np.array(d0))
npt.assert_allclose(ura2.bases, np.diag(d0))
npt.assert_array_equal(ura2.element_indices, indices_expected)
npt.assert_allclose(
ura2.element_locations,
indices_expected * np.array(d0)
)
class TestGeneralGridBasedArrays(unittest.TestCase):
def test_3d(self):
bases = np.array([
[0., 0.5, 0.],
[1., 0., 0.],
[0., 0., 2.]
])
indices = np.array([
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[1, 1, 1]
])
locations_expected = indices @ bases
array = GridBasedArrayDesign(indices, bases=bases)
self.assertEqual(array.size, indices.shape[0])
self.assertEqual(array.ndim, bases.shape[1])
npt.assert_allclose(array.d0, np.linalg.norm(bases, ord=2, axis=1))
npt.assert_allclose(array.element_indices, indices)
npt.assert_allclose(array.bases, bases)
npt.assert_allclose(array.element_locations, locations_expected)
class TestSteeringMatrix(unittest.TestCase):
def setUp(self):
self.wavelength = 1.0
def test_without_perturbations(self):
cpa = CoPrimeArray(2, 3, self.wavelength / 2)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/3, 3))
A, DA = cpa.steering_matrix(sources, self.wavelength, True)
A_expected = np.array([
[ 1.000000+0.000000j, 1.000000+0.000000j, 1.000000+0.000000j],
[ 0.666131+0.745835j, 1.000000+0.000000j, 0.666131-0.745835j],
[-0.112539+0.993647j, 1.000000+0.000000j, -0.112539-0.993647j],
[-0.303263-0.952907j, 1.000000+0.000000j, -0.303263+0.952907j],
[-0.816063+0.577964j, 1.000000+0.000000j, -0.816063-0.577964j],
[ 0.798227+0.602356j, 1.000000+0.000000j, 0.798227-0.602356j]
])
DA_expected = np.array([
[ 0.000000+ 0.000000j, 0.000000+ 0.000000j, 0.000000+ 0.000000j],
[-2.343109+ 2.092712j, 0.000000+ 6.283185j, 2.343109+ 2.092712j],
[-6.243270- 0.707105j, 0.000000+12.566371j, 6.243270- 0.707105j],
[ 4.490467- 1.429095j, 0.000000+ 9.424778j, -4.490467- 1.429095j],
[-5.447178- 7.691209j, 0.000000+18.849556j, 5.447178- 7.691209j],
[-8.515612+11.284673j, 0.000000+28.274334j, 8.515612+11.284673j]
])
npt.assert_allclose(A, A_expected, rtol=1e-6)
npt.assert_allclose(DA, DA_expected, rtol=1e-6)
def test_with_perturbations(self):
pass
def test_custom_nonisotropic_1d(self):
# Sine response for azimuth angles (cosine for broadside angles)
f_sr = lambda r, az, el, pol: np.sin(az)
element = CustomNonisotropicSensor(f_sr)
ula = UniformLinearArray(5, self.wavelength / 2, element=element)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
A_expected = np.array([
[ 5.000000e-1+0.000000e+0j, 9.914449e-1+0.000000e+0j, 7.071068e-1+0.000000e+0j],
[-4.563621e-1-2.042881e-1j, 9.092510e-1-3.952538e-1j, -4.282945e-1+5.626401e-1j],
[ 3.330655e-1+3.729174e-1j, 6.762975e-1-7.249721e-1j, -1.882710e-1-6.815820e-1j],
[-1.516317e-1-4.764534e-1j, 3.312097e-1-9.344854e-1j, 6.563659e-1+2.630282e-1j],
[-5.626959e-2+4.968236e-1j, -6.879475e-2-9.890552e-1j, -6.068505e-1+3.629497e-1j]
])
A = ula.steering_matrix(sources, self.wavelength)
npt.assert_allclose(A, A_expected, rtol=1e-6)
def test_custom_vector_sensor_1d(self):
# Each sensor has three outputs with different gains.
gains = [1.0, 0.5, 0.1]
output_size = len(gains)
def f_sr(r, az, el, pol):
# Sine response.
res = np.sin(az)
return np.stack([res * g for g in gains])
element = CustomNonisotropicSensor(f_sr, output_size=output_size)
ula = UniformLinearArray(4, self.wavelength / 2, element=element)
sources = FarField1DSourcePlacement(np.linspace(-np.pi/3, np.pi/4, 3))
A_expected = np.array([
[ 5.000000e-1+0.000000e+0j, 9.914449e-1+0.000000e+0j, 7.071068e-1+0.000000e+0j],
[-4.563621e-1-2.042881e-1j, 9.092510e-1-3.952538e-1j, -4.282945e-1+5.626401e-1j],
[ 3.330655e-1+3.729174e-1j, 6.762975e-1-7.249721e-1j, -1.882710e-1-6.815820e-1j],
[-1.516317e-1-4.764534e-1j, 3.312097e-1-9.344854e-1j, 6.563659e-1+2.630282e-1j],
[ 2.500000e-1+0.000000e+0j, 4.957224e-1+0.000000e+0j, 3.535534e-1+0.000000e+0j],
[-2.281810e-1-1.021441e-1j, 4.546255e-1-1.976269e-1j, -2.141472e-1+2.813200e-1j],
[ 1.665327e-1+1.864587e-1j, 3.381488e-1-3.624861e-1j, -9.413548e-2-3.407910e-1j],
[-7.581586e-2-2.382267e-1j, 1.656049e-1-4.672427e-1j, 3.281829e-1+1.315141e-1j],
[ 5.000000e-2+0.000000e+0j, 9.914449e-2+0.000000e+0j, 7.071068e-2+0.000000e+0j],
[-4.563621e-2-2.042881e-2j, 9.092510e-2-3.952538e-2j, -4.282945e-2+5.626401e-2j],
[ 3.330655e-2+3.729174e-2j, 6.762975e-2-7.249721e-2j, -1.882710e-2-6.815820e-2j],
[-1.516317e-2-4.764534e-2j, 3.312097e-2-9.344854e-2j, 6.563659e-2+2.630282e-2j]
])
A = ula.steering_matrix(sources, self.wavelength)
npt.assert_allclose(A, A_expected, rtol=1e-6)
class TestArrayPerturbations(unittest.TestCase):
def setUp(self):
self.wavelength = 1
def test_array_perturbations(self):
d0 = self.wavelength / 2
ula = UniformLinearArray(5, d0)
ptype2str = {
LocationErrors: 'location_errors',
GainErrors: 'gain_errors',
PhaseErrors: 'phase_errors',
MutualCoupling: 'mutual_coupling'
}
str2ptype = {v: k for k, v in ptype2str.items()}
# No perturbations yet.
self.assertFalse(ula.is_perturbed)
for ptype in ptype2str.keys():
self.assertFalse(ula.has_perturbation(ptype))
# Now we add perturbations.
gain_errors = np.random.uniform(-0.5, 0.5, (ula.size,))
phase_errors = np.random.uniform(-np.pi, np.pi, (ula.size,))
mutual_coupling = toeplitz([1.0, 0.4+0.2j, 0.0, 0.0, 0.0])
perturbed_name = 'PerturbedULA'
# Test for 1D, 2D, 3D location errors.
for ndim in [1, 2, 3]:
location_errors = np.random.uniform(-0.1 * d0, 0.1 * d0, (ula.size, ndim))
perturb_defs = {
'gain_errors': (gain_errors, True),
'phase_errors': (phase_errors, True),
'location_errors': (location_errors, False),
'mutual_coupling': (mutual_coupling, True)
}
ula_perturbed = ula.get_perturbed_copy(perturb_defs, perturbed_name)
self.assertEqual(ula_perturbed.name, perturbed_name)
self.assertTrue(ula_perturbed.is_perturbed)
for k, v in perturb_defs.items():
cur_ptype = str2ptype[k]
self.assertEqual(ula_perturbed.has_perturbation(cur_ptype), True)
npt.assert_allclose(ula_perturbed.get_perturbation_params(cur_ptype), v[0])
self.assertEqual(ula_perturbed.is_perturbation_known(cur_ptype), v[1])
# Verify location error calculations.
self.assertEqual(ula_perturbed.actual_ndim, ndim)
npt.assert_allclose(
ula_perturbed.actual_element_locations,
np.pad(ula.element_locations, ((0, 0), (0, ndim - 1)), 'constant') + location_errors
)
# The `perturbation` property should return a list of perturbations.
perturbs_actual = ula_perturbed.perturbations
self.assertEqual(len(perturbs_actual), len(perturb_defs))
for perturb in perturbs_actual:
params_expected, known_expected = perturb_defs[ptype2str[perturb.__class__]]
npt.assert_allclose(perturb.params, params_expected)
self.assertEqual(perturb.is_known, known_expected)
# Perturbation-free copies should not have perturbations.
self.assertFalse(ula_perturbed.get_perturbation_free_copy().is_perturbed)
def test_perturbation_updates(self):
d0 = self.wavelength / 2
ula = UniformLinearArray(5, d0)
gain_errors = np.random.uniform(-0.5, 0.5, (ula.size,))
ula_perturbed = ula.get_perturbed_copy([
GainErrors(gain_errors, True)
])
for known in [False, True, True, False]:
phase_errors = np.random.uniform(-np.pi, np.pi, (ula.size, ))
ula_perturbed = ula_perturbed.get_perturbed_copy([
PhaseErrors(phase_errors, known)
])
# The gain errors should remain there.
self.assertTrue(ula_perturbed.has_perturbation(GainErrors))
self.assertTrue(ula_perturbed.is_perturbation_known(GainErrors))
npt.assert_allclose(
ula_perturbed.get_perturbation_params(GainErrors),
gain_errors
)
# The phase errors should be updated.
self.assertTrue(ula_perturbed.has_perturbation(PhaseErrors))
self.assertEqual(ula_perturbed.is_perturbation_known(PhaseErrors), known)
npt.assert_allclose(
ula_perturbed.get_perturbation_params(PhaseErrors),
phase_errors
)
if __name__ == '__main__':
unittest.main()
|
1642939
|
import collections
import datetime
import pytz
from django.contrib.auth.decorators import user_passes_test
from django.shortcuts import render
from standup.status.models import Status, Team, StandupUser
def require_superuser(fun):
def authorize_user(user):
return user.is_active and user.is_superuser
return user_passes_test(authorize_user)(fun)
@require_superuser
def errormenow_view(request):
# This is an intentional error designed to kick up the error page because
# otherwise it's difficult to test.
1 / 0 # noqa
@require_superuser
def statistics_view(request):
"""Show health statistics for the system
.. Note::
This is an "admin" view, so it uses Django templates.
"""
hours_24 = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(hours=24)
week = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(days=7)
groups = collections.OrderedDict()
groups['Standup users'] = collections.OrderedDict([
('Team count', Team.objects.count()),
('User count', StandupUser.objects.count()),
('New users in last 24 hours', StandupUser.objects.filter(user__date_joined__gte=hours_24).count()),
('Active users (posted in last week)',
StandupUser.objects.filter(id__in=Status.objects.filter(created__gte=week).values('user__id')).count()),
])
groups['Standup status'] = collections.OrderedDict([
('Status count', Status.objects.count()),
('Status in last 24 hours', Status.objects.filter(created__gte=hours_24).count()),
('Status in last week', Status.objects.filter(created__gte=week).count()),
])
context = {
'title': 'Site statistics',
'statsitems': groups
}
return render(request, 'admin/statistics.html', context)
|
1642940
|
import numpy as np
import copy
import cv2
from parameter import *
class BoundBox:
def __init__(self, class_num):
self.x, self.y, self.w, self.h, self.c = 0., 0., 0., 0., 0.
self.probs = np.zeros((class_num,))
def iou(self, box):
intersection = self.intersect(box)
union = self.w * self.h + box.w * box.h - intersection
return intersection / union
def intersect(self, box):
width = self.__overlap([self.x - self.w / 2, self.x + self.w / 2], [box.x - box.w / 2, box.x + box.w / 2])
height = self.__overlap([self.y - self.h / 2, self.y + self.h / 2], [box.y - box.h / 2, box.y + box.h / 2])
return width * height
def __overlap(self, interval_a, interval_b):
x1, x2 = interval_a
x3, x4 = interval_b
if x3 < x1:
if x4 < x1:
return 0
else:
return min(x2, x4) - x1
else:
if x2 < x3:
return 0
else:
return min(x2, x4) - x1
class WeightReader:
def __init__(self, weight_file):
self.offset = 4
self.all_weights = np.fromfile(weight_file, dtype='float32')
def read_bytes(self, size):
self.offset = self.offset + size
return self.all_weights[self.offset - size:self.offset]
def reset(self):
self.offset = 4
def interpret_netout(image, netout):
boxes = []
# interpret the output by the network
for row in range(GRID_H):
for col in range(GRID_W):
for b in range(BOX):
box = BoundBox(CLASS)
# first 5 weights for x, y, w, h and confidence
box.x, box.y, box.w, box.h, box.c = netout[row, col, b, :5]
box.x = (col + sigmoid(box.x)) / GRID_W
box.y = (row + sigmoid(box.y)) / GRID_H
box.w = ANCHORS[2 * b + 0] * np.exp(box.w) / GRID_W
box.h = ANCHORS[2 * b + 1] * np.exp(box.h) / GRID_H
box.c = sigmoid(box.c)
# last 20 weights for class likelihoods
classes = netout[row, col, b, 5:]
box.probs = softmax(classes) * box.c
box.probs *= box.probs > THRESHOLD
boxes.append(box)
# suppress non-maximal boxes
for c in range(CLASS):
sorted_indices = list(reversed(np.argsort([box.probs[c] for box in boxes])))
for i in range(len(sorted_indices)):
index_i = sorted_indices[i]
if boxes[index_i].probs[c] == 0:
continue
else:
for j in range(i + 1, len(sorted_indices)):
index_j = sorted_indices[j]
if boxes[index_i].iou(boxes[index_j]) >= 0.4:
boxes[index_j].probs[c] = 0
# draw the boxes using a threshold
mark = []
for box in boxes:
max_indx = np.argmax(box.probs)
max_prob = box.probs[max_indx]
thresh = THRESHOLD
#if LABELS[max_indx] == 'traffic_light':
# thresh = 0.6
if max_prob > thresh:
xmin = int((box.x - box.w / 2) * image.shape[1])
xmax = int((box.x + box.w / 2) * image.shape[1])
ymin = int((box.y - box.h / 2) * image.shape[0])
ymax = int((box.y + box.h / 2) * image.shape[0])
#if LABELS[max_indx] == 'traffic_light':
# ymin = int(ymax * 0.7)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), COLORS[max_indx], 5)
cv2.putText(image, LABELS[max_indx]+" "+str(round(max_prob,2)), (xmin, ymin - 12), 0, 1e-3 * image.shape[0], (0, 255, 0), 2)
mark.append({'label' : LABELS[max_indx], 'prob' : max_prob, 'xmin' : xmin,
'ymin' : ymin, 'xmax' : xmax, 'ymax' : ymax})
return image, mark
def parse_annotation(ann_dir):
f = open(ann_dir, 'r')
_f = f.read()
f_content = _f.split('\n')
all_img = []
current = ""
for ann in f_content:
img_data = ann.split(' ')
if img_data == ['']:
break
file_name, width, height, xmin, ymin, xmax, ymax, label = img_data
if not current == file_name:
img = {'height': float(width), 'width': float(height), 'object': [], 'filename': file_name}
current = file_name
all_img.append(img)
img['object'].append({'xmin': float(xmin), 'ymin': float(ymin),
'name': label, 'xmax': float(xmax),
'ymax': float(ymax)})
return all_img
def aug_img(train_instance):
path = train_instance['filename']
all_obj = copy.deepcopy(train_instance['object'][:])
img = cv2.imread(img_dir + path)
h, w, c = img.shape
# scale the image
scale = np.random.uniform() / 10. + 1.
img = cv2.resize(img, (0, 0), fx=scale, fy=scale)
# translate the image
max_offx = (scale - 1.) * w
max_offy = (scale - 1.) * h
offx = int(np.random.uniform() * max_offx)
offy = int(np.random.uniform() * max_offy)
img = img[offy: (offy + h), offx: (offx + w)]
# flip the image
flip = np.random.binomial(1, .5)
if flip > 0.5: img = cv2.flip(img, 1)
# re-color
t = [np.random.uniform()]
t += [np.random.uniform()]
t += [np.random.uniform()]
t = np.array(t)
img = img * (1 + t)
img = img / (255. * 2.)
# resize the image to standard size
img = cv2.resize(img, (NORM_H, NORM_W))
img = img[:, :, ::-1]
# fix object's position and size
for obj in all_obj:
for attr in ['xmin', 'xmax']:
obj[attr] = int(obj[attr] * scale - offx)
obj[attr] = int(obj[attr] * float(NORM_W) / w)
obj[attr] = max(min(obj[attr], NORM_W), 0)
for attr in ['ymin', 'ymax']:
obj[attr] = int(obj[attr] * scale - offy)
obj[attr] = int(obj[attr] * float(NORM_H) / h)
obj[attr] = max(min(obj[attr], NORM_H), 0)
if flip > 0.5:
xmin = obj['xmin']
obj['xmin'] = NORM_W - obj['xmax']
obj['xmax'] = NORM_W - xmin
return img, all_obj
def data_gen(all_img, batch_size):
num_img = len(all_img)
shuffled_indices = np.random.permutation(np.arange(num_img))
l_bound = 0
r_bound = batch_size if batch_size < num_img else num_img
while True:
if l_bound == r_bound:
l_bound = 0
r_bound = batch_size if batch_size < num_img else num_img
shuffled_indices = np.random.permutation(np.arange(num_img))
batch_size = r_bound - l_bound
currt_inst = 0
x_batch = np.zeros((batch_size, NORM_W, NORM_H, 3))
y_batch = np.zeros((batch_size, GRID_W, GRID_H, BOX, 5 + CLASS))
for index in shuffled_indices[l_bound:r_bound]:
train_instance = all_img[index]
# augment input image and fix object's position and size
img, all_obj = aug_img(train_instance)
# for obj in all_obj:
# cv2.rectangle(img[:,:,::-1], (obj['xmin'],obj['ymin']), (obj['xmax'],obj['ymax']), (1,1,0), 3)
# plt.imshow(img); plt.show()
# construct output from object's position and size
for obj in all_obj:
box = []
center_x = .5 * (obj['xmin'] + obj['xmax']) # xmin, xmax
center_x = center_x / (float(NORM_W) / GRID_W)
center_y = .5 * (obj['ymin'] + obj['ymax']) # ymin, ymax
center_y = center_y / (float(NORM_H) / GRID_H)
grid_x = int(np.floor(center_x))
grid_y = int(np.floor(center_y))
if grid_x < GRID_W and grid_y < GRID_H:
obj_indx = LABELS.index(obj['name'])
box = [obj['xmin'], obj['ymin'], obj['xmax'], obj['ymax']]
y_batch[currt_inst, grid_y, grid_x, :, 0:4] = BOX * [box]
y_batch[currt_inst, grid_y, grid_x, :, 4] = BOX * [1.]
y_batch[currt_inst, grid_y, grid_x, :, 5:] = BOX * [[0.] * CLASS]
y_batch[currt_inst, grid_y, grid_x, :, 5 + obj_indx] = 1.0
# concatenate batch input from the image
x_batch[currt_inst] = img
currt_inst += 1
del img, all_obj
yield x_batch, y_batch
l_bound = r_bound
r_bound = r_bound + batch_size
if r_bound > num_img: r_bound = num_img
def sigmoid(x):
return 1. / (1. + np.exp(-x))
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def Rotate(src, degrees):
if degrees == 90:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 1)
elif degrees == 180:
dst = cv2.flip(src, -1)
elif degrees == 270:
dst = cv2.transpose(src)
dst = cv2.flip(dst, 0)
return dst
def get_Object(image, mark, Check):
label, prob, xmin, ymin, xmax, ymax = mark['label'], mark['prob'], mark['xmin'], \
mark['ymin'], mark['xmax'], mark['ymax']
#print("ymax : ",ymax)
#print("label : ", label, "prob : ", prob, "xmin : ", xmin, "ymin : ", ymin, "xmax : ", xmax, "ymax : ", ymax)
try:
if label == 'traffic_light':
Object = image[ymin:ymax, xmin:xmax, :]
b, g, r = 0, 0, 0
for y in range(ymax - ymin):
for x in range(xmax - xmin):
try:
b += Object[y, x, 0]
g += Object[y, x, 1]
r += Object[y, x, 2]
except:
continue
h, s, v = rgb2hsv(r,g,b)
if h < 120:
label = "red"
print("red ", h)
elif h >= 120:
label = "green"
print("green ", h)
# 발견했을때
Check[label][2] = True
Check[label][0] += 1
Check[label][1] = 0
except:
print("error in color extracting")
return None, 0, 0, 0, 0
return label, int(xmin), int(xmax), int(ymin), int(ymax)
def ccw(line, p2): # 시계반대방향알고리즘
p0 = [line[0], line[1]]
p1 = [line[2], line[3]]
dx1 = p1[0] - p0[0];
dy1 = p1[1] - p0[1];
dx2 = p2[0] - p0[0];
dy2 = p2[1] - p0[1];
if (dx1 * dy2 > dy1 * dx2):
return 1 #right
if (dx1 * dy2 < dy1 * dx2) :
return -1 #left
return 0
def rgb2hsv(r, g, b):
r, g, b = r/255.0, g/255.0, b/255.0
mx = max(r, g, b)
mn = min(r, g, b)
df = mx-mn
if mx == mn:
h = 0
elif mx == r:
h = (60 * ((g-b)/df) + 360) % 360
elif mx == g:
h = (60 * ((b-r)/df) + 120) % 360
elif mx == b:
h = (60 * ((r-g)/df) + 240) % 360
if mx == 0:
s = 0
else:
s = df/mx
v = mx
return h, s, v
|
1642949
|
from setuptools import setup
from distutils.core import Extension
from glob import glob
SOURCES = glob('source/*.c') + ['platforms/python/m3module.c']
setup(
name='wasm3',
version='0.0.1',
ext_modules=[
Extension('m3', sources=SOURCES, include_dirs=['source'],
extra_compile_args=['-g', '-O0'])]
)
|
1642959
|
import numpy as np
from collections import OrderedDict
from misc.math_utils import find
from osu.local.beatmap.beatmap import Beatmap
from osu.local.hitobject.hitobject import Hitobject
from osu.local.hitobject.std.std import Std
from osu.local.hitobject.taiko.taiko import Taiko
from osu.local.hitobject.catch.catch import Catch
from osu.local.hitobject.mania.mania import Mania
from osu.local.hitobject.std.std_singlenote_io import StdSingleNoteIO
from osu.local.hitobject.std.std_holdnote_io import StdHoldNoteIO
from osu.local.hitobject.std.std_spinner_io import StdSpinnerIO
from osu.local.hitobject.taiko.taiko_singlenote_hitobject import TaikoSingleNoteHitobject
from osu.local.hitobject.taiko.taiko_holdnote_hitobject import TaikoHoldNoteHitobject
from osu.local.hitobject.taiko.taiko_spinner_hitobject import TaikoSpinnerHitobject
from osu.local.hitobject.catch.catch_singlenote_hitobject import CatchSingleNoteHitobject
from osu.local.hitobject.catch.catch_holdnote_hitobject import CatchHoldNoteHitobject
from osu.local.hitobject.catch.catch_spinner_hitobject import CatchSpinnerHitobject
from osu.local.hitobject.mania.mania_singlenote_io import ManiaSingleNoteIO
from osu.local.hitobject.mania.mania_holdnote_io import ManiaHoldNoteIO
'''
Handles beatmap loading
Input:
load_beatmap - load the beatmap specified
Output:
metadata - information about the beatmap
hitobjects - list of hitobjects present in the map
timingpoints - list of timing points present in the map
'''
class BeatmapIO():
class Section():
SECTION_NONE = 0
SECTION_GENERAL = 1
SECTION_EDITOR = 2
SECTION_METADATA = 3
SECTION_DIFFICULTY = 4
SECTION_EVENTS = 5
SECTION_TIMINGPOINTS = 6
SECTION_COLOURS = 7
SECTION_HITOBJECTS = 8
@staticmethod
def init():
BeatmapIO.SECTION_MAP = {
BeatmapIO.Section.SECTION_GENERAL : BeatmapIO.__parse_general_section,
BeatmapIO.Section.SECTION_EDITOR : BeatmapIO.__parse_editor_section,
BeatmapIO.Section.SECTION_METADATA : BeatmapIO.__parse_metadata_section,
BeatmapIO.Section.SECTION_DIFFICULTY : BeatmapIO.__parse_difficulty_section,
BeatmapIO.Section.SECTION_EVENTS : BeatmapIO.__parse_events_section,
BeatmapIO.Section.SECTION_TIMINGPOINTS : BeatmapIO.__parse_timingpoints_section,
BeatmapIO.Section.SECTION_COLOURS : BeatmapIO.__parse_colour_section,
BeatmapIO.Section.SECTION_HITOBJECTS : BeatmapIO.__parse_hitobjects_section
}
"""
Opens a beatmap file and reads it
Args:
filepath: (string) filepath to the beatmap file to load
"""
@staticmethod
def open_beatmap(filepath=None):
with open(filepath, 'rt', encoding='utf-8') as beatmap_file:
beatmap = BeatmapIO.load_beatmap(beatmap_file)
return beatmap
"""
Loads beatmap data
Args:
beatmap_file: (string) contents of the beatmap file
"""
@staticmethod
def load_beatmap(beatmap_data):
beatmap = Beatmap()
BeatmapIO.__parse_beatmap_data(beatmap_data, beatmap)
BeatmapIO.__process_timing_points(beatmap)
if beatmap.gamemode == Beatmap.GAMEMODE_OSU or beatmap.gamemode == None:
BeatmapIO.__process_slider_timings(beatmap)
BeatmapIO.__process_hitobject_end_times(beatmap)
BeatmapIO.__process_slider_tick_times(beatmap)
if beatmap.gamemode == Beatmap.GAMEMODE_MANIA:
BeatmapIO.__process_columns(beatmap)
BeatmapIO.__validate(beatmap)
beatmap.set_cs_val(beatmap.difficulty.cs)
beatmap.set_ar_val(beatmap.difficulty.ar)
beatmap.set_od_val(beatmap.difficulty.od)
return beatmap
"""
Saves beatmap file data
Args:
filepath: (string) what to save the beatmap as
"""
@staticmethod
def save_beatmap(beatmap_data, filepath):
with open(filepath, 'wt', encoding='utf-8') as f:
f.write(beatmap_data)
"""
Returns:
MD5 checksum of the beatmap file
"""
@staticmethod
def get_md5(beatmap):
pass
@staticmethod
def __process_hitobject_end_times(beatmap):
beatmap.end_times = {}
for i in range(len(beatmap.hitobjects)):
if not beatmap.hitobjects[i].is_hitobject_type(Hitobject.CIRCLE):
beatmap.end_times[beatmap.hitobjects[i].end_time] = i
else:
beatmap.end_times[beatmap.hitobjects[i].time] = i
beatmap.end_times = OrderedDict(sorted(beatmap.end_times.items(), key=lambda x: x[0]))
# Validates beatmap data
@staticmethod
def __validate(beatmap):
if beatmap.difficulty.ar == None:
beatmap.difficulty.ar = beatmap.difficulty.od
if beatmap.difficulty.hp == None:
beatmap.difficulty.hp = beatmap.difficulty.od
if beatmap.gamemode == None:
beatmap.gamemode = Beatmap.GAMEMODE_OSU
@staticmethod
def __parse_beatmap_data(beatmap_data, beatmap):
BeatmapIO.__parse_beatmap_file_format(beatmap_data, beatmap)
BeatmapIO.__parse_beatmap_content(beatmap_data, beatmap)
beatmap.metadata.name = beatmap.metadata.artist + ' - ' + beatmap.metadata.title + ' (' + beatmap.metadata.creator + ') ' + '[' + beatmap.metadata.version + ']'
@staticmethod
def __parse_beatmap_file_format(beatmap_data, beatmap):
line = beatmap_data.readline()
data = line.split('osu file format v')
try: beatmap.metadata.beatmap_format = int(data[1])
except: return
@staticmethod
def __parse_beatmap_content(beatmap_data, beatmap):
if beatmap.metadata.beatmap_format == -1: return
section = BeatmapIO.Section.SECTION_NONE
line = ''
while True:
line = beatmap_data.readline()
if line.strip() == '[General]': section = BeatmapIO.Section.SECTION_GENERAL
elif line.strip() == '[Editor]': section = BeatmapIO.Section.SECTION_EDITOR
elif line.strip() == '[Metadata]': section = BeatmapIO.Section.SECTION_METADATA
elif line.strip() == '[Difficulty]': section = BeatmapIO.Section.SECTION_DIFFICULTY
elif line.strip() == '[Events]': section = BeatmapIO.Section.SECTION_EVENTS
elif line.strip() == '[TimingPoints]': section = BeatmapIO.Section.SECTION_TIMINGPOINTS
elif line.strip() == '[Colours]': section = BeatmapIO.Section.SECTION_COLOURS
elif line.strip() == '[HitObjects]': section = BeatmapIO.Section.SECTION_HITOBJECTS
elif line == '':
return
else:
BeatmapIO.__parse_section(section, line, beatmap)
@staticmethod
def __parse_section(section, line, beatmap):
if section != BeatmapIO.Section.SECTION_NONE:
BeatmapIO.SECTION_MAP[section](line, beatmap)
@staticmethod
def __parse_general_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'PreviewTime':
# ignore
return
if data[0] == 'Countdown':
# ignore
return
if data[0] == 'SampleSet':
# ignore
return
if data[0] == 'StackLeniency':
# ignore
return
if data[0] == 'Mode':
beatmap.gamemode = int(data[1])
return
if data[0] == 'LetterboxInBreaks':
# ignore
return
if data[0] == 'SpecialStyle':
# ignore
return
if data[0] == 'WidescreenStoryboard':
# ignore
return
@staticmethod
def __parse_editor_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
if data[0] == 'DistanceSpacing':
# ignore
return
if data[0] == 'BeatDivisor':
# ignore
return
if data[0] == 'GridSize':
# ignore
return
if data[0] == 'TimelineZoom':
# ignore
return
@staticmethod
def __parse_metadata_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'Title':
beatmap.metadata.title = data[1].strip()
return
if data[0] == 'TitleUnicode':
# ignore
return
if data[0] == 'Artist':
beatmap.metadata.artist = data[1].strip()
return
if data[0] == 'ArtistUnicode':
# ignore
return
if data[0] == 'Creator':
beatmap.metadata.creator = data[1].strip()
return
if data[0] == 'Version':
beatmap.metadata.version = data[1].strip()
return
if data[0] == 'Source':
# ignore
return
if data[0] == 'Tags':
# ignore
return
if data[0] == 'BeatmapID':
beatmap.metadata.beatmap_id = data[1].strip()
return
if data[0] == 'BeatmapSetID':
beatmap.metadata.beatmapset_id = data[1].strip()
return
@staticmethod
def __parse_difficulty_section(line, beatmap):
data = line.split(':', 1)
if len(data) < 2: return
data[0] = data[0].strip()
if data[0] == 'HPDrainRate':
beatmap.difficulty.hp = float(data[1])
return
if data[0] == 'CircleSize':
beatmap.difficulty.cs = float(data[1])
return
if data[0] == 'OverallDifficulty':
beatmap.difficulty.od = float(data[1])
return
if data[0] == 'ApproachRate':
beatmap.difficulty.ar = float(data[1])
return
if data[0] == 'SliderMultiplier':
beatmap.difficulty.sm = float(data[1])
return
if data[0] == 'SliderTickRate':
beatmap.difficulty.st = float(data[1])
return
@staticmethod
def __parse_events_section(line, beatmap):
# ignore
return
@staticmethod
def __parse_timingpoints_section(line, beatmap):
data = line.split(',')
if len(data) < 2: return
timing_point = Beatmap.TimingPoint()
timing_point.offset = float(data[0])
timing_point.beat_interval = float(data[1])
# Old maps don't have meteres
if len(data) > 2: timing_point.meter = int(data[2])
else: timing_point.meter = 4
if len(data) > 6: timing_point.inherited = False if int(data[6]) == 1 else True
else: timing_point.inherited = False
beatmap.timing_points.append(timing_point)
@staticmethod
def __parse_colour_section(self, line):
# ignore
return
@staticmethod
def __parse_hitobjects_section(line, beatmap):
data = line.split(',')
if len(data) < 2: return
hitobject_type = int(data[3])
if beatmap.gamemode == Beatmap.GAMEMODE_OSU or beatmap.gamemode == None:
if Std.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(StdSingleNoteIO.load_singlenote(data, beatmap.difficulty))
return
if Std.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(StdHoldNoteIO.load_holdnote(data, beatmap.difficulty))
return
if Std.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(StdSpinnerIO.load_spinner(data, beatmap.difficulty))
return
if beatmap.gamemode == Beatmap.GAMEMODE_TAIKO:
''' TODO: Fix
if Taiko.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(TaikoSingleNoteHitobject(data))
return
if Taiko.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(TaikoHoldNoteHitobject(data))
return
if Taiko.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(TaikoSpinnerHitobject(data))
return
'''
return
if beatmap.gamemode == Beatmap.GAMEMODE_CATCH:
''' TODO: Fix
if Catch.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(CatchSingleNoteHitobject(data))
return
if Catch.is_hitobject_type(hitobject_type, Hitobject.SLIDER):
beatmap.hitobjects.append(CatchHoldNoteHitobject(data))
return
if Catch.is_hitobject_type(hitobject_type, Hitobject.SPINNER):
beatmap.hitobjects.append(CatchSpinnerHitobject(data))
return
'''
return
if beatmap.gamemode == Beatmap.GAMEMODE_MANIA:
if Mania.is_hitobject_type(hitobject_type, Hitobject.CIRCLE):
beatmap.hitobjects.append(ManiaSingleNoteIO.load_singlenote(data, beatmap.difficulty))
return
if Mania.is_hitobject_type(hitobject_type, Hitobject.MANIALONG):
beatmap.hitobjects.append(ManiaHoldNoteIO.load_holdnote(data, beatmap.difficulty))
return
@staticmethod
def __process_timing_points(beatmap):
beatmap.bpm_min = float('inf')
beatmap.bpm_max = float('-inf')
bpm = 0
slider_multiplier = -100
old_beat = -100
base = 0
for timing_point in beatmap.timing_points:
if timing_point.inherited:
timing_point.beat_length = base
if timing_point.beat_interval < 0:
slider_multiplier = timing_point.beat_interval
old_beat = timing_point.beat_interval
else:
slider_multiplier = old_beat
else:
slider_multiplier = -100
bpm = 60000 / timing_point.beat_interval
timing_point.beat_length = timing_point.beat_interval
base = timing_point.beat_interval
beatmap.bpm_min = min(beatmap.bpm_min, bpm)
beatmap.bpm_max = max(beatmap.bpm_max, bpm)
timing_point.bpm = bpm
timing_point.slider_multiplier = slider_multiplier
@staticmethod
def __process_slider_timings(beatmap):
for hitobject in beatmap.hitobjects:
if not hitobject.is_hitobject_type(Hitobject.SLIDER):
continue
try: idx_timing_point = find(beatmap.timing_points, hitobject.time, lambda timing_point: timing_point.offset)
except:
print(beatmap.timing_points)
raise
timing_point = beatmap.timing_points[idx_timing_point]
hitobject.to_repeat_time = round(((-600.0/timing_point.bpm) * hitobject.pixel_length * timing_point.slider_multiplier) / (100.0 * beatmap.difficulty.sm))
hitobject.end_time = hitobject.time + hitobject.to_repeat_time*hitobject.repeat
@staticmethod
def __process_slider_tick_times(beatmap):
beatmap.slider_tick_times = []
for hitobject in beatmap.hitobjects:
if not hitobject.is_hitobject_type(Hitobject.SLIDER):
continue
ms_per_beat = (100.0 * beatmap.difficulty.sm)/(hitobject.get_velocity() * beatmap.difficulty.st)
hitobject.tick_times = []
for beat_time in np.arange(hitobject.time, hitobject.end_time, ms_per_beat):
hitobject.tick_times.append(beat_time)
if hitobject.tick_times[-1] != hitobject.end_time:
hitobject.tick_times.append(hitobject.end_time)
@staticmethod
def __process_columns(beatmap):
hitobjects = beatmap.hitobjects
beatmap.hitobjects = []
for column in range(int(beatmap.difficulty.cs)):
beatmap.hitobjects.append([])
for hitobject in hitobjects:
column = Mania.get_column(hitobject.pos.x, beatmap.difficulty.cs)
beatmap.hitobjects[column].append(hitobject)
'''
for column in range(len(beatmap.hitobjects)):
beatmap.hitobjects[column] = sorted(beatmap.hitobjects[column], key=lambda hitobject: hitobject.time)
'''
BeatmapIO.init()
|
1642995
|
import os
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
class Flatten(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x.view(-1)
class PerceptualLoss(nn.Module):
def __init__(self, weight, vgg_weights_dir, net='caffe', normalize_grad=False):
super().__init__()
self.weight = weight
self.normalize_grad = normalize_grad
if net == 'pytorch':
model = torchvision.models.vgg19(pretrained=True).features
mean = torch.tensor([0.485, 0.456, 0.406])
std = torch.tensor([0.229, 0.224, 0.225])
num_layers = 30
elif net == 'caffe':
vgg_weights = torch.load(os.path.join(vgg_weights_dir, 'vgg19-d01eb7cb.pth'))
map = {'classifier.6.weight': u'classifier.7.weight', 'classifier.6.bias': u'classifier.7.bias'}
vgg_weights = OrderedDict([(map[k] if k in map else k, v) for k, v in vgg_weights.items()])
model = torchvision.models.vgg19()
model.classifier = nn.Sequential(Flatten(), *model.classifier._modules.values())
model.load_state_dict(vgg_weights)
model = model.features
mean = torch.tensor([103.939, 116.779, 123.680]) / 255.
std = torch.tensor([1., 1., 1.]) / 255.
num_layers = 30
elif net == 'face':
# Load caffe weights for VGGFace, converted from
# https://media.githubusercontent.com/media/yzhang559/vgg-face/master/VGG_FACE.caffemodel.pth
# The base model is VGG16, not VGG19.
model = torchvision.models.vgg16().features
model.load_state_dict(torch.load(os.path.join(vgg_weights_dir, 'vgg_face_weights.pth')))
mean = torch.tensor([103.939, 116.779, 123.680]) / 255.
std = torch.tensor([1., 1., 1.]) / 255.
num_layers = 30
else:
raise ValueError(f"Unknown type of PerceptualLoss: expected '{{pytorch,caffe,face}}', got '{net}'")
self.register_buffer('mean', mean[None, :, None, None])
self.register_buffer('std' , std[None, :, None, None])
layers_avg_pooling = []
for weights in model.parameters():
weights.requires_grad = False
for module in model.modules():
if module.__class__.__name__ == 'Sequential':
continue
elif module.__class__.__name__ == 'MaxPool2d':
layers_avg_pooling.append(nn.AvgPool2d(kernel_size=2, stride=2, padding=0))
else:
layers_avg_pooling.append(module)
if len(layers_avg_pooling) >= num_layers:
break
layers_avg_pooling = nn.Sequential(*layers_avg_pooling)
self.model = layers_avg_pooling
def normalize_inputs(self, x):
return (x - self.mean) / self.std
def forward(self, input, target):
input = (input + 1) / 2
target = (target.detach() + 1) / 2
loss = 0
features_input = self.normalize_inputs(input)
features_target = self.normalize_inputs(target)
for layer in self.model:
features_input = layer(features_input)
features_target = layer(features_target)
if layer.__class__.__name__ == 'ReLU':
if self.normalize_grad:
pass
else:
loss = loss + F.l1_loss(features_input, features_target)
return loss * self.weight
|
1643040
|
from typing import Dict, Iterable, List, Tuple, Union
import pydantic
import ujson
from server import db_dir
from tarkov.exceptions import NotFoundError
from .models import Item, ItemTemplate, NodeTemplate
from .types import TemplateId
AnyTemplate = Union[ItemTemplate, NodeTemplate]
class ItemTemplatesRepository:
def __init__(self) -> None:
items, nodes = self.__read_templates()
self._item_templates: Dict[TemplateId, ItemTemplate] = items
self._node_templates: Dict[TemplateId, NodeTemplate] = nodes
self._item_categories: dict = self.__read_item_categories()
@staticmethod
def __read_templates() -> Tuple[
Dict[TemplateId, ItemTemplate],
Dict[TemplateId, NodeTemplate],
]:
item_templates: List[ItemTemplate] = []
node_templates: List[NodeTemplate] = []
# Read every file from db/items
for item_file_path in db_dir.joinpath("items").glob("*"):
file_data: List[dict] = ujson.load(
item_file_path.open("r", encoding="utf8")
)
item_templates.extend(
pydantic.parse_obj_as(
List[ItemTemplate],
(item for item in file_data if item["_type"] == "Item"),
)
)
node_templates.extend(
pydantic.parse_obj_as(
List[NodeTemplate],
(item for item in file_data if item["_type"] == "Node"),
)
)
return (
{tpl.id: tpl for tpl in item_templates},
{tpl.id: tpl for tpl in node_templates},
)
@staticmethod
def __read_item_categories() -> dict:
items = ujson.load(
db_dir.joinpath("templates", "items.json").open("r", encoding="utf8")
)
items = {item["Id"]: item for item in items}
return items
@property
def templates(self) -> Dict[TemplateId, ItemTemplate]:
return self._item_templates
@property
def client_items_view(self) -> Dict[TemplateId, AnyTemplate]:
return {**self._item_templates, **self._node_templates}
def get_template(self, item: Union[Item, TemplateId]) -> ItemTemplate:
"""
Returns template of item
"""
item_template = self.get_any_template(item)
if isinstance(item_template, NodeTemplate):
raise NotFoundError(
f"Can not found ItemTemplate with id {item_template.id}, however NodeTemplate was found."
)
return item_template
def get_any_template(
self, item: Union[Item, TemplateId]
) -> Union[NodeTemplate, ItemTemplate]:
if isinstance(item, Item):
template_id = item.tpl
else:
template_id = item
if template_id in self._item_templates:
return self._item_templates[template_id]
if template_id in self._node_templates:
return self._node_templates[template_id]
raise NotFoundError(f"Can not found any template with id {template_id}")
def iter_template_children(
self, template_id: TemplateId
) -> Iterable[Union[NodeTemplate, ItemTemplate]]:
templates: List[Union[NodeTemplate, ItemTemplate]] = [
self.get_any_template(template_id)
]
while templates:
template = templates.pop()
yield template
for child_node in self._item_templates.values():
if child_node.parent == template.id:
templates.append(child_node)
for child_item in self._node_templates.values():
if child_item.parent == template.id:
templates.append(child_item)
def get_template_items(self, template_id: TemplateId) -> List[ItemTemplate]:
"""
Returns all items of given category (all barter items for example)
:param template_id:
:return: All items of a category
"""
template = self.get_any_template(template_id)
if isinstance(template, ItemTemplate):
return [template]
return [
tpl
for tpl in self.iter_template_children(template_id)
if isinstance(tpl, ItemTemplate)
]
|
1643071
|
import requests
import os
import json
import logging
from wrapanapi.exceptions import RestClientException
requests.packages.urllib3.disable_warnings()
class BearerTokenAuth(requests.auth.AuthBase):
"""Attaches a bearer token to the given request object"""
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers['Authorization'] = 'Bearer {}'.format(self.token)
return r
class ContainerClient(object):
def __init__(self, hostname, auth, protocol="https", port=6443, entry='api/v1', verify=False):
"""Simple REST API client for container management systems
Args:
hostname: String with the hostname or IP address of the server (e.g. '10.11.12.13')
auth: Either a (user, pass) sequence or a string with token
protocol: Protocol to use for communication with the server
port: Port to use
entry: Entry point of the REST API
verify: 'True' if we want to verify SSL, 'False' otherwise
"""
self._logger = logging.getLogger(__name__)
self.api_entry = "{}://{}:{}/{}".format(protocol, hostname, port, entry)
self.verify = verify
if type(auth) in (list, set, tuple):
self.auth = auth
elif isinstance(auth, str):
self.auth = BearerTokenAuth(auth)
else:
raise RestClientException('Invalid auth object')
def entity_path(self, entity_type, name=None, namespace=None):
"""Processing the entity path according to the type, name and namespace"""
path = '{}s'.format(entity_type)
if namespace is not None:
path = os.path.join('namespaces/{}'.format(namespace), path)
if name is not None:
path = os.path.join(path, '{}'.format(name))
return path
def get(self, entity_type, name=None, namespace=None, convert=None):
"""Sends a request to fetch an entity of specific type
Fetches a single entity if its name is provided or all of given type if name is ommited.
Note:
Some entities are tied to namespaces (projects).
To fetch these by name, namespace has to be provided as well.
convert: The convert method to use for the json content (e.g. eval_strings).
Return:
Tuple containing status code and json response with requested entity/entities.
"""
path = self.entity_path(entity_type, name, namespace)
r = self.raw_get(path)
json_content = r.json()
if json_content and convert:
json_content = convert(json_content)
return (r.status_code, json_content)
def post(self, entity_type, data, name=None, namespace=None, convert=None):
"""Sends a POST request to an entity specified by the method parameters"""
path = self.entity_path(entity_type, name, namespace)
r = self.raw_post(path, data)
json_content = r.json()
if json_content and convert:
json_content = convert(json_content)
return (r.status_code, json_content)
def patch(self, entity_type, data, name=None, namespace=None, convert=None,
headers={'Content-Type': 'application/strategic-merge-patch+json'}):
"""Sends a PATCH request to an entity specified by the method parameters"""
path = self.entity_path(entity_type, name, namespace)
r = self.raw_patch(path, data, headers)
json_content = r.json()
if json_content and convert:
json_content = convert(json_content)
return (r.status_code, json_content)
def delete(self, entity_type, name, namespace=None, convert=None):
"""Sends a DELETE request to an entity specified by the method parameters
(In simple words - delete the entity)"""
path = self.entity_path(entity_type, name, namespace)
r = self.raw_delete(path)
json_content = r.json()
if json_content and convert:
json_content = convert(json_content)
return (r.status_code, json_content)
def get_json(self, path, headers=None, params=None):
return self.raw_get(path, headers, params).json()
def put_status(self, path, data, headers=None):
r = self.raw_put(path, data, headers)
return r.ok
def post_status(self, path, data, headers=None):
r = self.raw_post(path, data, headers)
return r.ok
def delete_status(self, path, headers=None):
r = self.raw_delete(path, headers)
return r.ok
def raw_get(self, path, headers=None, params=None):
self._logger.debug('GET %s;', path)
return requests.get(
os.path.join(self.api_entry, path),
auth=self.auth,
verify=self.verify,
headers=headers,
params=params)
def raw_put(self, path, data, headers=None):
self._logger.debug('PUT %s; data=%s;', path, data)
return requests.put(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers, data=json.dumps(data))
def raw_post(self, path, data, headers=None):
self._logger.debug('POST %s; data=%s;', path, data)
return requests.post(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers, data=json.dumps(data))
def raw_patch(self, path, data, headers=None):
self._logger.debug('PATCH %s; data=%s;', path, data)
return requests.patch(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers, data=json.dumps(data))
def raw_delete(self, path, headers=None):
self._logger.debug('DELETE %s;', path)
return requests.delete(
os.path.join(self.api_entry, path), auth=self.auth, verify=self.verify,
headers=headers)
|
1643077
|
import os
import sys
from math import sqrt
from simple_oss import SimpleOss
INSTANCE_ID = int(os.environ.get('ALI_DIKU_INSTANCE_ID'))
TASK_ID = os.environ.get('ALI_DIKU_TASK_ID')
INSTANCE_COUNT = int(os.environ.get('INSTANCE_COUNT'))
OSS_HOST = os.environ.get('ALI_DIKU_OSS_HOST')
ID = 'P4c9wtscfsH4rxeT'
KEY = '<KEY>'
BUCKET = 'vp02'
FIND_OUTPUT_PATH = 'batch-python-sdk/output/find_task_result_%s.txt'
COUNT_OUTPUT_PATH = 'batch-python-sdk/output/count_task_result.txt'
start_num = 2
end_num = 10000
instance_count = 2
oss_clnt = SimpleOss(OSS_HOST, ID, KEY)
print os.environ
def get_range(start, end, instance_count, instance_id):
'''
A function to split all numbers into 'instance_count' part totally
and return the start and end number of the 'instance_id' part.
'''
total = end - start
step = total / instance_count
assert step, 'total numbers should be bigger than instance_count.'
residue = total % instance_count
l = [step+1 if i<residue else step for i in range(instance_count)]
s = sum(l[:instance_id])
e = s + l[instance_id]
return s, e
def find_task():
is_prime = lambda x: 0 not in [ x%d for d in range(2, int(sqrt(x))+1)]
s, e = get_range(start_num, end_num, INSTANCE_COUNT, INSTANCE_ID)
f = open('result.txt', 'w')
for num in xrange(s, e):
if is_prime(num):
f.write(str(num) + '\n')
f.close()
oss_clnt.upload(BUCKET, 'result.txt', FIND_OUTPUT_PATH%INSTANCE_ID)
return 0
def count_task():
prime_list = []
for instance_id in range(INSTANCE_COUNT):
instance_result = oss_clnt.download_str(BUCKET, FIND_OUTPUT_PATH%instance_id)
prime_list += instance_result.splitlines()
count = len(prime_list)
oss_clnt.upload_str(BUCKET, str(count), COUNT_OUTPUT_PATH)
def main():
if TASK_ID == 'Find':
find_task()
else:
count_task()
return 0
if __name__ == '__main__':
sys.exit(main())
|
1643080
|
from dcard.manager import Downloader
class TestDownloader:
def test_dwonload_with_bundles_but_no_urls(self):
downloader = Downloader()
metas = dict(test='some data')
urls = []
bundles = [(metas, urls)]
downloader.resource_bundles = bundles
cnt, _ = downloader.download()
assert cnt == 0
|
1643089
|
from app.errors.handlers import bad_request
from re import T
from flask import jsonify
from app import db
from app.tasks import bp
from app.schemas import TasksSchema
from flask_jwt_extended import jwt_required, current_user
tasks_schema = TasksSchema(many=True)
@bp.get("/background-task/count-seconds/<int:number>")
@jwt_required()
def background_worker_count_seconds(number: int) -> str:
"""
Spawn a background task via RQ to perform a long running task
Parameters
----------
number : int
The number of seconds the background tasks needs to count
Returns
-------
JSON
A JSON object containing either the success message or an error message
"""
if current_user.get_task_in_progress("count_seconds"):
return bad_request("Task already in progress")
else:
current_user.launch_task("count_seconds", "Counting seconds...", number=number)
db.session.commit()
return jsonify({"msg": "Launched background task"}), 200
@bp.get("/get/active-background-tasks")
@jwt_required()
def active_background_tasks() -> str:
"""
Endpoint to retrieve all the active background tasks
Returns
-------
str
A JSON object containing the active tasks
"""
tasks = current_user.get_tasks_in_progress()
return tasks_schema.jsonify(tasks), 200
@bp.get("/get/finished-background-tasks")
@jwt_required()
def finished_background_tasks() -> str:
"""
Endpoint to retrieve the finished background tasks
Returns
-------
str
A JSON object containing the finished tasks
"""
tasks = current_user.get_completed_tasks()
return tasks_schema.jsonify(tasks), 200
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.