hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3efe56717d90a8d09328f57a5bc578273d39ac | 9,089 | py | Python | vpc-flow-logs/enable-vpc-flowlogs.py | adamgilman/aws-fast-fixes | ace2ee78f19ea9555d4e2314c049a0df741b406a | [
"Apache-2.0"
] | null | null | null | vpc-flow-logs/enable-vpc-flowlogs.py | adamgilman/aws-fast-fixes | ace2ee78f19ea9555d4e2314c049a0df741b406a | [
"Apache-2.0"
] | null | null | null | vpc-flow-logs/enable-vpc-flowlogs.py | adamgilman/aws-fast-fixes | ace2ee78f19ea9555d4e2314c049a0df741b406a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import boto3
from botocore.exceptions import ClientError
import logging
def main(args, logger):
'''Executes the Primary Logic'''
# If they specify a profile use it. Otherwise do the normal thing
if args.profile:
session = boto3.Session(profile_name=args.profile)
else:
session = boto3.Session()
# Get all the Regions for this account
all_regions = get_regions(session, args)
# processiong regions
for region in all_regions:
process_region(args, region, session, logger)
return
def process_region(args, region, session, logger):
logger.info(f"Processing region {region}")
ec2_client = session.client('ec2', region_name=region)
vpcs = []
paginator = ec2_client.get_paginator('describe_vpcs')
for page in paginator.paginate():
for vpc in page['Vpcs']:
if args.vpc_id:
if args.vpc_id == vpc['VpcId']:
vpcs.append(vpc['VpcId'])
else:
vpcs.append(vpc['VpcId'])
if vpcs:
# processing VPCs
for VpcId in vpcs:
# enable flowlogs if the vpc has eni within it
logger.debug(f" Processing VpcId {VpcId}")
network_interfaces = ec2_client.describe_network_interfaces(Filters=[{'Name':'vpc-id','Values':[VpcId]}])['NetworkInterfaces']
if network_interfaces:
logger.debug(f" ENI found in VpcId {VpcId}")
enable_flowlogs(VpcId, ec2_client, args, region)
else:
logger.debug(f" No ENI found in VpcId {VpcId}, skipped.")
else:
logger.debug(" No VPCs to enable flow logs in region:{}".format(region))
return
def enable_flowlogs(VpcId,client,args,region):
# checking for existing flow logs
bucket = 'arn:aws:s3:::{}'.format(args.flowlog_bucket)
paginator = client.get_paginator('describe_flow_logs')
for page in paginator.paginate(
Filters=[
{
'Name': 'resource-id',
'Values': [VpcId]
},
{
'Name': 'log-destination-type',
'Values': ['s3']
}
]
):
for FlowLog in page['FlowLogs']:
if FlowLog['LogDestination'] == bucket:
accept_destructive_update=False
logger.debug(" Flow Log ({}) already exist, region:{}, VPC:{}".format(FlowLog['FlowLogId'],region,VpcId))
if FlowLog['DeliverLogsStatus'] == 'FAILED':
logger.error("Flow Log ({}) failed, region:{}, VPC:{}, please check it".format(FlowLog['FlowLogId'],region,VpcId))
return
logger.debug("Flow Log ({}) is {} on {}\n traffic type: {}\n destination type: {}\n destination: {}\n log format: \n {}".format(
FlowLog['FlowLogId'],
FlowLog['FlowLogStatus'],
FlowLog['ResourceId'],
FlowLog['TrafficType'],
FlowLog['LogDestinationType'],
FlowLog['LogDestination'],
FlowLog['LogFormat']
))
difflist = []
if FlowLog['TrafficType'] != args.traffic_type:
difflist.append("Traffic type will change from {} to {}.".format(FlowLog['TrafficType'],args.traffic_type))
if FlowLog['LogDestination'] != bucket:
difflist.append("Log Destination will change from {} to {}.".format(FlowLog['LogDestination'],bucket))
if difflist == []:
# No actions to perform here
continue
logger.info("Existing flow log will be terminated and new flow log created with these changes:\n\t{}\n".format(difflist))
if args.force:
accept_destructive_update='y'
else:
accept_destructive_update = input(f'Do you wish to continue? [y/N] ').lower()
if accept_destructive_update[:1] == 'y':
delete_flowlog(VpcId,FlowLog['FlowLogId'],True,client,args,region)
create_flowlog(VpcId,bucket,client,args,region)
else:
logger.info("User declined replacement of flow log {}".format(FlowLog['FlowLogId']))
else:
create_flowlog(VpcId,bucket,client,args,region)
return
def delete_flowlog(VpcId, FlowLogId, actually_do_it, client, args, region):
if args.actually_do_it:
logger.debug(" deleting Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
response = client.delete_flow_logs(
DryRun=not actually_do_it,
FlowLogIds=[FlowLogId]
)
if response.get('Unsuccessful'):
for failure in response['Unsuccessful']:
if failure.get('Error'):
logger.error("Flow Log deletion failed, error:{}".format(failure['Error'].get('Message')))
else:
logger.info("Successfully deleted Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
else:
logger.info("Would delete Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
return
def create_flowlog(VpcId,bucket,client,args,region):
# creating flow logs
if args.actually_do_it:
logger.debug("enabling Flow Log region:{}, VPC:{}".format(region,VpcId))
response = client.create_flow_logs(
ResourceIds=[VpcId],
ResourceType='VPC',
TrafficType=args.traffic_type,
LogDestinationType='s3',
LogDestination=bucket
)
if response.get('Unsuccessful'):
for unsuccess in response['Unsuccessful']:
if unsuccess.get('Error'):
logger.error("Flow Log creation failed, error:{}".format(unsuccess['Error'].get('Message')))
elif response.get('FlowLogIds'):
logger.info("Successfully created Flow Logs:{}, region:{}, VPC:{}".format(response['FlowLogIds'][0],region,VpcId))
else:
logger.info("Would Enable Flow Log region:{}, VPC:{}".format(region,VpcId))
return
def get_regions(session, args):
'''Return a list of regions with us-east-1 first. If --region was specified, return a list wth just that'''
# If we specifed a region on the CLI, return a list of just that
if args.region:
return([args.region])
# otherwise return all the regions, us-east-1 first
ec2 = session.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
# return us-east-1 first, but dont return it twice
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true')
parser.add_argument("--region", help="Only Process Specified Region")
parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)")
parser.add_argument("--vpc-id", help="Only Process Specified VPC")
parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true')
parser.add_argument("--flowlog-bucket", help="S3 bucket to deposit logs to", required=True)
parser.add_argument("--traffic-type", help="The type of traffic to log", default='ALL', choices=['ACCEPT','REJECT','ALL'])
parser.add_argument("--force", help="Perform flowlog replacement without prompt", action='store_true')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
# Logging idea stolen from: https://docs.python.org/3/howto/logging.html#configuring-logging
# create console handler and set level to debug
logger = logging.getLogger('enable-vpc-flowlogs')
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.error:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
# Silence Boto3 & Friends
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
# create formatter
if args.timestamp:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
else:
formatter = logging.Formatter('%(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1) | 40.395556 | 154 | 0.601276 |
import boto3
from botocore.exceptions import ClientError
import logging
def main(args, logger):
if args.profile:
session = boto3.Session(profile_name=args.profile)
else:
session = boto3.Session()
all_regions = get_regions(session, args)
for region in all_regions:
process_region(args, region, session, logger)
return
def process_region(args, region, session, logger):
logger.info(f"Processing region {region}")
ec2_client = session.client('ec2', region_name=region)
vpcs = []
paginator = ec2_client.get_paginator('describe_vpcs')
for page in paginator.paginate():
for vpc in page['Vpcs']:
if args.vpc_id:
if args.vpc_id == vpc['VpcId']:
vpcs.append(vpc['VpcId'])
else:
vpcs.append(vpc['VpcId'])
if vpcs:
for VpcId in vpcs:
logger.debug(f" Processing VpcId {VpcId}")
network_interfaces = ec2_client.describe_network_interfaces(Filters=[{'Name':'vpc-id','Values':[VpcId]}])['NetworkInterfaces']
if network_interfaces:
logger.debug(f" ENI found in VpcId {VpcId}")
enable_flowlogs(VpcId, ec2_client, args, region)
else:
logger.debug(f" No ENI found in VpcId {VpcId}, skipped.")
else:
logger.debug(" No VPCs to enable flow logs in region:{}".format(region))
return
def enable_flowlogs(VpcId,client,args,region):
bucket = 'arn:aws:s3:::{}'.format(args.flowlog_bucket)
paginator = client.get_paginator('describe_flow_logs')
for page in paginator.paginate(
Filters=[
{
'Name': 'resource-id',
'Values': [VpcId]
},
{
'Name': 'log-destination-type',
'Values': ['s3']
}
]
):
for FlowLog in page['FlowLogs']:
if FlowLog['LogDestination'] == bucket:
accept_destructive_update=False
logger.debug(" Flow Log ({}) already exist, region:{}, VPC:{}".format(FlowLog['FlowLogId'],region,VpcId))
if FlowLog['DeliverLogsStatus'] == 'FAILED':
logger.error("Flow Log ({}) failed, region:{}, VPC:{}, please check it".format(FlowLog['FlowLogId'],region,VpcId))
return
logger.debug("Flow Log ({}) is {} on {}\n traffic type: {}\n destination type: {}\n destination: {}\n log format: \n {}".format(
FlowLog['FlowLogId'],
FlowLog['FlowLogStatus'],
FlowLog['ResourceId'],
FlowLog['TrafficType'],
FlowLog['LogDestinationType'],
FlowLog['LogDestination'],
FlowLog['LogFormat']
))
difflist = []
if FlowLog['TrafficType'] != args.traffic_type:
difflist.append("Traffic type will change from {} to {}.".format(FlowLog['TrafficType'],args.traffic_type))
if FlowLog['LogDestination'] != bucket:
difflist.append("Log Destination will change from {} to {}.".format(FlowLog['LogDestination'],bucket))
if difflist == []:
continue
logger.info("Existing flow log will be terminated and new flow log created with these changes:\n\t{}\n".format(difflist))
if args.force:
accept_destructive_update='y'
else:
accept_destructive_update = input(f'Do you wish to continue? [y/N] ').lower()
if accept_destructive_update[:1] == 'y':
delete_flowlog(VpcId,FlowLog['FlowLogId'],True,client,args,region)
create_flowlog(VpcId,bucket,client,args,region)
else:
logger.info("User declined replacement of flow log {}".format(FlowLog['FlowLogId']))
else:
create_flowlog(VpcId,bucket,client,args,region)
return
def delete_flowlog(VpcId, FlowLogId, actually_do_it, client, args, region):
if args.actually_do_it:
logger.debug(" deleting Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
response = client.delete_flow_logs(
DryRun=not actually_do_it,
FlowLogIds=[FlowLogId]
)
if response.get('Unsuccessful'):
for failure in response['Unsuccessful']:
if failure.get('Error'):
logger.error("Flow Log deletion failed, error:{}".format(failure['Error'].get('Message')))
else:
logger.info("Successfully deleted Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
else:
logger.info("Would delete Flow Log:{}, region:{}, VPC:{}".format(FlowLogId,region,VpcId))
return
def create_flowlog(VpcId,bucket,client,args,region):
if args.actually_do_it:
logger.debug("enabling Flow Log region:{}, VPC:{}".format(region,VpcId))
response = client.create_flow_logs(
ResourceIds=[VpcId],
ResourceType='VPC',
TrafficType=args.traffic_type,
LogDestinationType='s3',
LogDestination=bucket
)
if response.get('Unsuccessful'):
for unsuccess in response['Unsuccessful']:
if unsuccess.get('Error'):
logger.error("Flow Log creation failed, error:{}".format(unsuccess['Error'].get('Message')))
elif response.get('FlowLogIds'):
logger.info("Successfully created Flow Logs:{}, region:{}, VPC:{}".format(response['FlowLogIds'][0],region,VpcId))
else:
logger.info("Would Enable Flow Log region:{}, VPC:{}".format(region,VpcId))
return
def get_regions(session, args):
if args.region:
return([args.region])
ec2 = session.client('ec2')
response = ec2.describe_regions()
output = ['us-east-1']
for r in response['Regions']:
if r['RegionName'] == "us-east-1":
continue
output.append(r['RegionName'])
return(output)
def do_args():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--debug", help="print debugging info", action='store_true')
parser.add_argument("--error", help="print error info only", action='store_true')
parser.add_argument("--timestamp", help="Output log with timestamp and toolname", action='store_true')
parser.add_argument("--region", help="Only Process Specified Region")
parser.add_argument("--profile", help="Use this CLI profile (instead of default or env credentials)")
parser.add_argument("--vpc-id", help="Only Process Specified VPC")
parser.add_argument("--actually-do-it", help="Actually Perform the action", action='store_true')
parser.add_argument("--flowlog-bucket", help="S3 bucket to deposit logs to", required=True)
parser.add_argument("--traffic-type", help="The type of traffic to log", default='ALL', choices=['ACCEPT','REJECT','ALL'])
parser.add_argument("--force", help="Perform flowlog replacement without prompt", action='store_true')
args = parser.parse_args()
return(args)
if __name__ == '__main__':
args = do_args()
ogging.getLogger('enable-vpc-flowlogs')
ch = logging.StreamHandler()
if args.debug:
logger.setLevel(logging.DEBUG)
elif args.error:
logger.setLevel(logging.ERROR)
else:
logger.setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.WARNING)
logging.getLogger('boto3').setLevel(logging.WARNING)
logging.getLogger('urllib3').setLevel(logging.WARNING)
if args.timestamp:
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
else:
formatter = logging.Formatter('%(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
try:
main(args, logger)
except KeyboardInterrupt:
exit(1) | true | true |
1c3efe5ec2a1831555eaf795850e6be08dadf58f | 844 | bzl | Python | build/go_binary.bzl | xybots/cert-manager | cdccb752ff98219a1995fce2c6f797c450437805 | [
"Apache-2.0"
] | 1 | 2021-04-01T04:14:36.000Z | 2021-04-01T04:14:36.000Z | build/go_binary.bzl | xybots/cert-manager | cdccb752ff98219a1995fce2c6f797c450437805 | [
"Apache-2.0"
] | 1 | 2021-02-24T00:42:10.000Z | 2021-02-24T00:42:10.000Z | build/go_binary.bzl | xybots/cert-manager | cdccb752ff98219a1995fce2c6f797c450437805 | [
"Apache-2.0"
] | 3 | 2020-06-17T19:04:26.000Z | 2021-02-11T14:29:09.000Z | # Copyright 2020 The Jetstack cert-manager contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(":version.bzl", "version_x_defs")
load("@io_bazel_rules_go//go:def.bzl", real_go_binary = "go_binary")
def go_binary(name, **kwargs):
real_go_binary(
name = name,
x_defs = version_x_defs(),
**kwargs,
)
| 35.166667 | 74 | 0.726303 |
load(":version.bzl", "version_x_defs")
load("@io_bazel_rules_go//go:def.bzl", real_go_binary = "go_binary")
def go_binary(name, **kwargs):
real_go_binary(
name = name,
x_defs = version_x_defs(),
**kwargs,
)
| true | true |
1c3efe945ce0be96d2fe3fb29a37f0bac9cd0d9d | 6,302 | py | Python | idm_lp/database/database.py | hfek/hfek | ecafcd177bf5a4af1f499180a2230985bd953863 | [
"MIT"
] | null | null | null | idm_lp/database/database.py | hfek/hfek | ecafcd177bf5a4af1f499180a2230985bd953863 | [
"MIT"
] | null | null | null | idm_lp/database/database.py | hfek/hfek | ecafcd177bf5a4af1f499180a2230985bd953863 | [
"MIT"
] | null | null | null | import asyncio
import json
import os
import typing
from typing import List
from pydantic import BaseModel, validator, Field
from idm_lp import const
from . import (
Alias,
ChatEnterModel,
IgnoredMembers,
IgnoredGlobalMembers,
MutedMembers,
ContextInstanceMixin,
RegexDeleter,
RolePlayCommand,
TrustedUser,
SlouMo,
DatabaseError,
Timer
)
class Database(BaseModel, ContextInstanceMixin):
# Не передаются на сервер, получаются либо с него (исключая токены и сервисные префиксы), либо с файла
tokens: List[str] = Field([], to_server='exclude', from_server='exclude')
secret_code: str = Field("", to_server='exclude', from_server='include')
ru_captcha_key: typing.Optional[str] = Field("", to_server='exclude', from_server='include')
service_prefixes: List[str] = Field([".слп", "!слп"], to_server='exclude', from_server='exclude')
# Получаются исключительно с сервера
repeater_word: str = Field("..", to_server='include', from_server='include')
dd_prefix: str = Field("дд", to_server='include', from_server='include')
timers: typing.List[Timer] = Field([], to_server='include', from_server='include')
auto_infection: bool = Field(False, to_server='include', from_server='include')
auto_infection_interval: int = Field(3600, to_server='include', from_server='include')
auto_infection_peer_id: int = Field(-174105461, to_server='include', from_server='include')
auto_infection_argument: str = Field("р", to_server='include', from_server='include')
bio_reply: bool = Field(False, to_server='include', from_server='include')
repeater_active: bool = Field(False, to_server='include', from_server='include')
delete_all_notify: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat_delete_chat: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat_add_to_black_list: bool = Field(False, to_server='include', from_server='include')
disable_notifications: bool = Field(False, to_server='include', from_server='include')
nometa_enable: bool = Field(False, to_server='include', from_server='include')
nometa_message: str = Field("nometa.xyz", to_server='include', from_server='include')
nometa_attachments: List[str] = Field([], to_server='include', from_server='include')
nometa_delay: float = Field(5 * 60, to_server='include', from_server='include')
self_prefixes: List[str] = Field([".л", "!л"], to_server='include', from_server='include')
duty_prefixes: List[str] = Field([".лд", "!лд"], to_server='include', from_server='include')
ignored_members: List[IgnoredMembers] = Field([], to_server='include', from_server='include')
ignored_global_members: List[IgnoredGlobalMembers] = Field([], to_server='include', from_server='include')
muted_members: List[MutedMembers] = Field([], to_server='include', from_server='include')
aliases: List[Alias] = Field([], to_server='include', from_server='include')
role_play_commands: List[RolePlayCommand] = Field([], to_server='include', from_server='include')
trusted: List[TrustedUser] = Field([], to_server='include', from_server='include')
add_to_friends_on_chat_enter: List[ChatEnterModel] = Field([], to_server='include', from_server='include')
sloumo: List[SlouMo] = Field([], to_server='include', from_server='include')
regex_deleter: List[RegexDeleter] = Field([], to_server='include', from_server='include')
__on_save_listeners: typing.List[typing.Callable] = []
def __enter__(self) -> "Database":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.save()
@validator('tokens')
def name_must_contain_space(cls, v):
if not v:
raise DatabaseError(
name='Нет токенов',
description='Укажите токены в файле конфигурации'
)
return v
@staticmethod
def get_path() -> str:
if const.USE_APP_DATA:
local_data_path = os.environ["APPDATA"]
return os.path.abspath(
os.path.join(
local_data_path,
"IDM",
const.CONFIG_PATH
)
)
return os.path.abspath(const.CONFIG_PATH)
@staticmethod
def load() -> 'Database':
path_to_file = Database.get_path()
try:
db = Database.parse_file(path_to_file)
except FileNotFoundError:
db = None
if not db:
raise DatabaseError(
'Конфиг не найден',
f"Конфиг не найден по пути {path_to_file}"
)
if not db.tokens:
raise DatabaseError(
'Нет токенов',
f"Укажите токены в файле конфигурации по пути {path_to_file}"
)
return db
@classmethod
def add_on_save(cls, func):
cls.__on_save_listeners.append(func)
return func
def load_from_server(self):
from ..idm_api import IDMAPI
new_config = IDMAPI.get_current().get_lp_info_sync(self.tokens[0])['config']
new_database = {
"tokens": self.tokens,
"service_prefixes": self.service_prefixes,
"secret_code": self.secret_code,
**new_config
}
return Database.parse_obj(new_database)
def get_to_server(self):
to_server = {}
for key, value in json.loads(self.json()).items():
try:
field = self.__fields__[key]
extra = field.field_info.extra
if extra['to_server'] == 'exclude':
continue
to_server[key] = value
except KeyError:
pass
return to_server
def save(self):
path_to_file = Database.get_path()
for __on_save_listener in self.__on_save_listeners:
asyncio.create_task(__on_save_listener(self))
with open(path_to_file, 'w', encoding='utf-8') as file:
file.write(
self.json(exclude={'__on_save_listeners'}, **{"ensure_ascii": False, "indent": 2})
)
| 39.142857 | 110 | 0.643923 | import asyncio
import json
import os
import typing
from typing import List
from pydantic import BaseModel, validator, Field
from idm_lp import const
from . import (
Alias,
ChatEnterModel,
IgnoredMembers,
IgnoredGlobalMembers,
MutedMembers,
ContextInstanceMixin,
RegexDeleter,
RolePlayCommand,
TrustedUser,
SlouMo,
DatabaseError,
Timer
)
class Database(BaseModel, ContextInstanceMixin):
tokens: List[str] = Field([], to_server='exclude', from_server='exclude')
secret_code: str = Field("", to_server='exclude', from_server='include')
ru_captcha_key: typing.Optional[str] = Field("", to_server='exclude', from_server='include')
service_prefixes: List[str] = Field([".слп", "!слп"], to_server='exclude', from_server='exclude')
repeater_word: str = Field("..", to_server='include', from_server='include')
dd_prefix: str = Field("дд", to_server='include', from_server='include')
timers: typing.List[Timer] = Field([], to_server='include', from_server='include')
auto_infection: bool = Field(False, to_server='include', from_server='include')
auto_infection_interval: int = Field(3600, to_server='include', from_server='include')
auto_infection_peer_id: int = Field(-174105461, to_server='include', from_server='include')
auto_infection_argument: str = Field("р", to_server='include', from_server='include')
bio_reply: bool = Field(False, to_server='include', from_server='include')
repeater_active: bool = Field(False, to_server='include', from_server='include')
delete_all_notify: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat_delete_chat: bool = Field(False, to_server='include', from_server='include')
auto_exit_from_chat_add_to_black_list: bool = Field(False, to_server='include', from_server='include')
disable_notifications: bool = Field(False, to_server='include', from_server='include')
nometa_enable: bool = Field(False, to_server='include', from_server='include')
nometa_message: str = Field("nometa.xyz", to_server='include', from_server='include')
nometa_attachments: List[str] = Field([], to_server='include', from_server='include')
nometa_delay: float = Field(5 * 60, to_server='include', from_server='include')
self_prefixes: List[str] = Field([".л", "!л"], to_server='include', from_server='include')
duty_prefixes: List[str] = Field([".лд", "!лд"], to_server='include', from_server='include')
ignored_members: List[IgnoredMembers] = Field([], to_server='include', from_server='include')
ignored_global_members: List[IgnoredGlobalMembers] = Field([], to_server='include', from_server='include')
muted_members: List[MutedMembers] = Field([], to_server='include', from_server='include')
aliases: List[Alias] = Field([], to_server='include', from_server='include')
role_play_commands: List[RolePlayCommand] = Field([], to_server='include', from_server='include')
trusted: List[TrustedUser] = Field([], to_server='include', from_server='include')
add_to_friends_on_chat_enter: List[ChatEnterModel] = Field([], to_server='include', from_server='include')
sloumo: List[SlouMo] = Field([], to_server='include', from_server='include')
regex_deleter: List[RegexDeleter] = Field([], to_server='include', from_server='include')
__on_save_listeners: typing.List[typing.Callable] = []
def __enter__(self) -> "Database":
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.save()
@validator('tokens')
def name_must_contain_space(cls, v):
if not v:
raise DatabaseError(
name='Нет токенов',
description='Укажите токены в файле конфигурации'
)
return v
@staticmethod
def get_path() -> str:
if const.USE_APP_DATA:
local_data_path = os.environ["APPDATA"]
return os.path.abspath(
os.path.join(
local_data_path,
"IDM",
const.CONFIG_PATH
)
)
return os.path.abspath(const.CONFIG_PATH)
@staticmethod
def load() -> 'Database':
path_to_file = Database.get_path()
try:
db = Database.parse_file(path_to_file)
except FileNotFoundError:
db = None
if not db:
raise DatabaseError(
'Конфиг не найден',
f"Конфиг не найден по пути {path_to_file}"
)
if not db.tokens:
raise DatabaseError(
'Нет токенов',
f"Укажите токены в файле конфигурации по пути {path_to_file}"
)
return db
@classmethod
def add_on_save(cls, func):
cls.__on_save_listeners.append(func)
return func
def load_from_server(self):
from ..idm_api import IDMAPI
new_config = IDMAPI.get_current().get_lp_info_sync(self.tokens[0])['config']
new_database = {
"tokens": self.tokens,
"service_prefixes": self.service_prefixes,
"secret_code": self.secret_code,
**new_config
}
return Database.parse_obj(new_database)
def get_to_server(self):
to_server = {}
for key, value in json.loads(self.json()).items():
try:
field = self.__fields__[key]
extra = field.field_info.extra
if extra['to_server'] == 'exclude':
continue
to_server[key] = value
except KeyError:
pass
return to_server
def save(self):
path_to_file = Database.get_path()
for __on_save_listener in self.__on_save_listeners:
asyncio.create_task(__on_save_listener(self))
with open(path_to_file, 'w', encoding='utf-8') as file:
file.write(
self.json(exclude={'__on_save_listeners'}, **{"ensure_ascii": False, "indent": 2})
)
| true | true |
1c3efe98da4095058df9a4d3135d99eecccd4a74 | 575 | py | Python | config/celery_app.py | Dm1tryD/estore_api | 1b944d2c3c47303e312581c3fc1a8af658eb3d06 | [
"MIT"
] | null | null | null | config/celery_app.py | Dm1tryD/estore_api | 1b944d2c3c47303e312581c3fc1a8af658eb3d06 | [
"MIT"
] | null | null | null | config/celery_app.py | Dm1tryD/estore_api | 1b944d2c3c47303e312581c3fc1a8af658eb3d06 | [
"MIT"
] | null | null | null | import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("estore_api")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| 31.944444 | 72 | 0.782609 | import os
from celery import Celery
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
app = Celery("estore_api")
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
| true | true |
1c3efea36189ce256d13e4e745168bf3a12a3f29 | 779 | py | Python | Validation/CTPPS/python/simu_config/year_2018_postTS2_cff.py | rishabhCMS/cmssw | 77d83fe564dd8f598d0bb09da8388445d6f4126e | [
"Apache-2.0"
] | 1 | 2020-10-08T06:48:26.000Z | 2020-10-08T06:48:26.000Z | Validation/CTPPS/python/simu_config/year_2018_postTS2_cff.py | rishabhCMS/cmssw | 77d83fe564dd8f598d0bb09da8388445d6f4126e | [
"Apache-2.0"
] | null | null | null | Validation/CTPPS/python/simu_config/year_2018_postTS2_cff.py | rishabhCMS/cmssw | 77d83fe564dd8f598d0bb09da8388445d6f4126e | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
from Validation.CTPPS.simu_config.year_2018_cff import *
# alignment
from CalibPPS.ESProducers.ctppsRPAlignmentCorrectionsDataESSourceXML_cfi import *
alignmentFile = "Validation/CTPPS/alignment/2018_postTS2.xml"
ctppsRPAlignmentCorrectionsDataESSourceXML.MisalignedFiles = [alignmentFile]
ctppsRPAlignmentCorrectionsDataESSourceXML.RealFiles = [alignmentFile]
# timing resolution
ctppsDirectProtonSimulation.timeResolutionDiamonds45 = "2 * (-0.0031 * (x - 3) + 0.16)"
ctppsDirectProtonSimulation.timeResolutionDiamonds56 = "2 * ( (x<10)*(-0.0057*(x-10) + 0.110) + (x>=10)*(-0.0022*(x-10) + 0.110) )"
# xangle distribution
def UseCrossingAngleDistribution(process, f):
UseCrossingAngleHistgoram(process, f, "h_xangle_2018_postTS2")
| 43.277778 | 131 | 0.802311 | import FWCore.ParameterSet.Config as cms
from Validation.CTPPS.simu_config.year_2018_cff import *
from CalibPPS.ESProducers.ctppsRPAlignmentCorrectionsDataESSourceXML_cfi import *
alignmentFile = "Validation/CTPPS/alignment/2018_postTS2.xml"
ctppsRPAlignmentCorrectionsDataESSourceXML.MisalignedFiles = [alignmentFile]
ctppsRPAlignmentCorrectionsDataESSourceXML.RealFiles = [alignmentFile]
ctppsDirectProtonSimulation.timeResolutionDiamonds45 = "2 * (-0.0031 * (x - 3) + 0.16)"
ctppsDirectProtonSimulation.timeResolutionDiamonds56 = "2 * ( (x<10)*(-0.0057*(x-10) + 0.110) + (x>=10)*(-0.0022*(x-10) + 0.110) )"
def UseCrossingAngleDistribution(process, f):
UseCrossingAngleHistgoram(process, f, "h_xangle_2018_postTS2")
| true | true |
1c3f000beb71bb40696ed111f073504de119b2eb | 12,313 | py | Python | assignment3/cs231n/classifiers/rnn.py | kandluis/cs231n | 88afdbc37189f54803f361b9812f48843357349e | [
"MIT"
] | null | null | null | assignment3/cs231n/classifiers/rnn.py | kandluis/cs231n | 88afdbc37189f54803f361b9812f48843357349e | [
"MIT"
] | null | null | null | assignment3/cs231n/classifiers/rnn.py | kandluis/cs231n | 88afdbc37189f54803f361b9812f48843357349e | [
"MIT"
] | null | null | null | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
"""
A CaptioningRNN produces captions from image features using a recurrent
neural network.
The RNN receives input vectors of size D, has a vocab size of V, works on
sequences of length T, has an RNN hidden dimension of H, uses word vectors
of dimension W, and operates on minibatches of size N.
Note that we don't use any regularization for the CaptioningRNN.
"""
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
"""
Construct a new CaptioningRNN instance.
Inputs:
- word_to_idx: A dictionary giving the vocabulary. It contains V entries,
and maps each string to a unique integer in the range [0, V).
- input_dim: Dimension D of input image feature vectors.
- wordvec_dim: Dimension W of word vectors.
- hidden_dim: Dimension H for the hidden state of the RNN.
- cell_type: What type of RNN to use; either 'rnn' or 'lstm'.
- dtype: numpy datatype to use; use float32 for training and float64 for
numeric gradient checking.
"""
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
# Initialize word vectors
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
# Initialize CNN -> hidden state projection parameters
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
# Initialize parameters for the RNN
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
# Initialize output to vocab weights
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
# Cast parameters to correct dtype
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
"""
Compute training-time loss for the RNN. We input image features and
ground-truth captions for those images, and use an RNN (or LSTM) to compute
loss and gradients on all parameters.
Inputs:
- features: Input image features, of shape (N, D)
- captions: Ground-truth captions; an integer array of shape (N, T) where
each element is in the range 0 <= y[i, t] < V
Returns a tuple of:
- loss: Scalar loss
- grads: Dictionary of gradients parallel to self.params
"""
# Cut captions into two pieces: captions_in has everything but the last word
# and will be input to the RNN; captions_out has everything but the first
# word and this is what we will expect the RNN to generate. These are offset
# by one relative to each other because the RNN should produce word (t+1)
# after receiving word t. The first element of captions_in will be the START
# token, and the first element of captions_out will be the first word.
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
# You'll need this
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
h0 = np.dot(features, W_proj) + b_proj
embedding, embedding_cache = word_embedding_forward(captions_in, W_embed)
if self.cell_type == "rnn":
layer_forward_fn, layer_backward_fn = rnn_forward, rnn_backward
elif self.cell_type == "lstm":
layer_forward_fn, layer_backward_fn = lstm_forward, lstm_backward
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
hidden, layer_cache = layer_forward_fn(embedding, h0, Wx, Wh, b)
scores, affine_cache = temporal_affine_forward(hidden, W_vocab, b_vocab)
loss, dscores = temporal_softmax_loss(scores, captions_out, mask)
dhidden, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(
dscores, affine_cache)
dembedding, dh0, grads['Wx'], grads['Wh'], grads['b'] = layer_backward_fn(
dhidden, layer_cache)
grads['W_embed'] = word_embedding_backward(dembedding, embedding_cache)
grads['W_proj'] = np.dot(features.T, dh0)
grads['b_proj'] = np.sum(dh0, axis=0)
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
"""
Run a test-time forward pass for the model, sampling captions for input
feature vectors.
At each timestep, we embed the current word, pass it and the previous hidden
state to the RNN to get the next hidden state, use the hidden state to get
scores for all vocab words, and choose the word with the highest score as
the next word. The initial hidden state is computed by applying an affine
transform to the input image features, and the initial word is the <START>
token.
For LSTMs you will also have to keep track of the cell state; in that case
the initial cell state should be zero.
Inputs:
- features: Array of input image features of shape (N, D).
- max_length: Maximum length T of generated captions.
Returns:
- captions: Array of shape (N, max_length) giving sampled captions,
where each element is an integer in the range [0, V). The first element
of captions should be the first sampled word, not the <START> token.
"""
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in #
# a loop. #
###########################################################################
hidden = np.dot(features, W_proj) + b_proj
if self.cell_type == "lstm":
cell_state = np.zeros(hidden.shape)
tokens = self._start * np.ones(N, dtype=np.int32)
for i in range(max_length):
words, _ = word_embedding_forward(tokens, W_embed)
if self.cell_type == 'rnn':
hidden, _ = rnn_step_forward(words, hidden, Wx, Wh, b)
elif self.cell_type == 'lstm':
hidden, cell_state, _ = lstm_step_forward(
words, hidden, cell_state, Wx, Wh, b)
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
scores = np.dot(hidden, W_vocab) + b_vocab
tokens = np.argmax(scores, axis=1)
captions[:,i] = tokens
############################################################################
# END OF YOUR CODE #
############################################################################
return captions
| 51.304167 | 84 | 0.56282 | from builtins import range
from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.rnn_layers import *
class CaptioningRNN(object):
def __init__(self, word_to_idx, input_dim=512, wordvec_dim=128,
hidden_dim=128, cell_type='rnn', dtype=np.float32):
if cell_type not in {'rnn', 'lstm'}:
raise ValueError('Invalid cell_type "%s"' % cell_type)
self.cell_type = cell_type
self.dtype = dtype
self.word_to_idx = word_to_idx
self.idx_to_word = {i: w for w, i in word_to_idx.items()}
self.params = {}
vocab_size = len(word_to_idx)
self._null = word_to_idx['<NULL>']
self._start = word_to_idx.get('<START>', None)
self._end = word_to_idx.get('<END>', None)
self.params['W_embed'] = np.random.randn(vocab_size, wordvec_dim)
self.params['W_embed'] /= 100
self.params['W_proj'] = np.random.randn(input_dim, hidden_dim)
self.params['W_proj'] /= np.sqrt(input_dim)
self.params['b_proj'] = np.zeros(hidden_dim)
dim_mul = {'lstm': 4, 'rnn': 1}[cell_type]
self.params['Wx'] = np.random.randn(wordvec_dim, dim_mul * hidden_dim)
self.params['Wx'] /= np.sqrt(wordvec_dim)
self.params['Wh'] = np.random.randn(hidden_dim, dim_mul * hidden_dim)
self.params['Wh'] /= np.sqrt(hidden_dim)
self.params['b'] = np.zeros(dim_mul * hidden_dim)
self.params['W_vocab'] = np.random.randn(hidden_dim, vocab_size)
self.params['W_vocab'] /= np.sqrt(hidden_dim)
self.params['b_vocab'] = np.zeros(vocab_size)
for k, v in self.params.items():
self.params[k] = v.astype(self.dtype)
def loss(self, features, captions):
captions_in = captions[:, :-1]
captions_out = captions[:, 1:]
mask = (captions_out != self._null)
# Weight and bias for the affine transform from image features to initial
# hidden state
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
# Word embedding matrix
W_embed = self.params['W_embed']
# Input-to-hidden, hidden-to-hidden, and biases for the RNN
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
# Weight and bias for the hidden-to-vocab transformation.
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
loss, grads = 0.0, {}
############################################################################
# TODO: Implement the forward and backward passes for the CaptioningRNN. #
# In the forward pass you will need to do the following: #
# (1) Use an affine transformation to compute the initial hidden state #
# from the image features. This should produce an array of shape (N, H)#
# (2) Use a word embedding layer to transform the words in captions_in #
# from indices to vectors, giving an array of shape (N, T, W). #
# (3) Use either a vanilla RNN or LSTM (depending on self.cell_type) to #
# process the sequence of input word vectors and produce hidden state #
# vectors for all timesteps, producing an array of shape (N, T, H). #
# (4) Use a (temporal) affine transformation to compute scores over the #
# vocabulary at every timestep using the hidden states, giving an #
# array of shape (N, T, V). #
# (5) Use (temporal) softmax to compute loss using captions_out, ignoring #
# the points where the output word is <NULL> using the mask above. #
# #
# In the backward pass you will need to compute the gradient of the loss #
# with respect to all model parameters. Use the loss and grads variables #
# defined above to store loss and gradients; grads[k] should give the #
# gradients for self.params[k]. #
############################################################################
h0 = np.dot(features, W_proj) + b_proj
embedding, embedding_cache = word_embedding_forward(captions_in, W_embed)
if self.cell_type == "rnn":
layer_forward_fn, layer_backward_fn = rnn_forward, rnn_backward
elif self.cell_type == "lstm":
layer_forward_fn, layer_backward_fn = lstm_forward, lstm_backward
else:
raise ValueError('Invalid cell_type "%s"' % self.cell_type)
hidden, layer_cache = layer_forward_fn(embedding, h0, Wx, Wh, b)
scores, affine_cache = temporal_affine_forward(hidden, W_vocab, b_vocab)
loss, dscores = temporal_softmax_loss(scores, captions_out, mask)
dhidden, grads['W_vocab'], grads['b_vocab'] = temporal_affine_backward(
dscores, affine_cache)
dembedding, dh0, grads['Wx'], grads['Wh'], grads['b'] = layer_backward_fn(
dhidden, layer_cache)
grads['W_embed'] = word_embedding_backward(dembedding, embedding_cache)
grads['W_proj'] = np.dot(features.T, dh0)
grads['b_proj'] = np.sum(dh0, axis=0)
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
def sample(self, features, max_length=30):
N = features.shape[0]
captions = self._null * np.ones((N, max_length), dtype=np.int32)
# Unpack parameters
W_proj, b_proj = self.params['W_proj'], self.params['b_proj']
W_embed = self.params['W_embed']
Wx, Wh, b = self.params['Wx'], self.params['Wh'], self.params['b']
W_vocab, b_vocab = self.params['W_vocab'], self.params['b_vocab']
###########################################################################
# TODO: Implement test-time sampling for the model. You will need to #
# initialize the hidden state of the RNN by applying the learned affine #
# transform to the input image features. The first word that you feed to #
# the RNN should be the <START> token; its value is stored in the #
# variable self._start. At each timestep you will need to do to: #
# (1) Embed the previous word using the learned word embeddings #
# (2) Make an RNN step using the previous hidden state and the embedded #
# current word to get the next hidden state. #
# (3) Apply the learned affine transformation to the next hidden state to #
# get scores for all words in the vocabulary #
# (4) Select the word with the highest score as the next word, writing it #
# to the appropriate slot in the captions variable #
# #
# For simplicity, you do not need to stop generating after an <END> token #
# is sampled, but you can if you want to. #
# #
# HINT: You will not be able to use the rnn_forward or lstm_forward #
# functions; you'll need to call rnn_step_forward or lstm_step_forward in
| true | true |
1c3f01950869cfecf6380046a2eb24ec7d0d2cea | 6,929 | py | Python | research/delf/delf/python/feature_io.py | hamediramin/ObjectDetectionAPI | 38638ce126ab708b1eb22a3cf40d4c7713cc535f | [
"Apache-2.0"
] | 3,326 | 2018-01-26T22:42:25.000Z | 2022-02-16T13:16:39.000Z | research/delf/delf/python/feature_io.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 150 | 2017-08-28T14:59:36.000Z | 2022-03-11T23:21:35.000Z | research/delf/delf/python/feature_io.py | lianlengyunyu/models | 984fbc754943c849c55a57923f4223099a1ff88c | [
"Apache-2.0"
] | 1,474 | 2018-02-01T04:33:18.000Z | 2022-03-08T07:02:20.000Z | # Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python interface for DelfFeatures proto.
Support read and write of DelfFeatures from/to numpy arrays and file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from delf import feature_pb2
from delf import datum_io
import numpy as np
from six.moves import xrange
import tensorflow as tf
def ArraysToDelfFeatures(locations,
scales,
descriptors,
attention,
orientations=None):
"""Converts DELF features to DelfFeatures proto.
Args:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
Returns:
delf_features: DelfFeatures object.
"""
num_features = len(attention)
assert num_features == locations.shape[0]
assert num_features == len(scales)
assert num_features == descriptors.shape[0]
if orientations is None:
orientations = np.zeros([num_features], dtype=np.float32)
else:
assert num_features == len(orientations)
delf_features = feature_pb2.DelfFeatures()
for i in xrange(num_features):
delf_feature = delf_features.feature.add()
delf_feature.y = locations[i, 0]
delf_feature.x = locations[i, 1]
delf_feature.scale = scales[i]
delf_feature.orientation = orientations[i]
delf_feature.strength = attention[i]
delf_feature.descriptor.CopyFrom(datum_io.ArrayToDatum(descriptors[i,]))
return delf_features
def DelfFeaturesToArrays(delf_features):
"""Converts data saved in DelfFeatures to numpy arrays.
If there are no features, the function returns four empty arrays.
Args:
delf_features: DelfFeatures object.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
num_features = len(delf_features.feature)
if num_features == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
# Figure out descriptor dimensionality by parsing first one.
descriptor_dim = len(
datum_io.DatumToArray(delf_features.feature[0].descriptor))
locations = np.zeros([num_features, 2])
scales = np.zeros([num_features])
descriptors = np.zeros([num_features, descriptor_dim])
attention = np.zeros([num_features])
orientations = np.zeros([num_features])
for i in xrange(num_features):
delf_feature = delf_features.feature[i]
locations[i, 0] = delf_feature.y
locations[i, 1] = delf_feature.x
scales[i] = delf_feature.scale
descriptors[i,] = datum_io.DatumToArray(delf_feature.descriptor)
attention[i] = delf_feature.strength
orientations[i] = delf_feature.orientation
return locations, scales, descriptors, attention, orientations
def SerializeToString(locations,
scales,
descriptors,
attention,
orientations=None):
"""Converts numpy arrays to serialized DelfFeatures.
Args:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
Returns:
Serialized DelfFeatures string.
"""
delf_features = ArraysToDelfFeatures(locations, scales, descriptors,
attention, orientations)
return delf_features.SerializeToString()
def ParseFromString(string):
"""Converts serialized DelfFeatures string to numpy arrays.
Args:
string: Serialized DelfFeatures string.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
delf_features = feature_pb2.DelfFeatures()
delf_features.ParseFromString(string)
return DelfFeaturesToArrays(delf_features)
def ReadFromFile(file_path):
"""Helper function to load data from a DelfFeatures format in a file.
Args:
file_path: Path to file containing data.
Returns:
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations.
"""
with tf.gfile.FastGFile(file_path, 'r') as f:
return ParseFromString(f.read())
def WriteToFile(file_path,
locations,
scales,
descriptors,
attention,
orientations=None):
"""Helper function to write data to a file in DelfFeatures format.
Args:
file_path: Path to file that will be written.
locations: [N, 2] float array which denotes the selected keypoint
locations. N is the number of features.
scales: [N] float array with feature scales.
descriptors: [N, depth] float array with DELF descriptors.
attention: [N] float array with attention scores.
orientations: [N] float array with orientations. If None, all orientations
are set to zero.
"""
serialized_data = SerializeToString(locations, scales, descriptors, attention,
orientations)
with tf.gfile.FastGFile(file_path, 'w') as f:
f.write(serialized_data)
| 34.994949 | 80 | 0.693895 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from delf import feature_pb2
from delf import datum_io
import numpy as np
from six.moves import xrange
import tensorflow as tf
def ArraysToDelfFeatures(locations,
scales,
descriptors,
attention,
orientations=None):
num_features = len(attention)
assert num_features == locations.shape[0]
assert num_features == len(scales)
assert num_features == descriptors.shape[0]
if orientations is None:
orientations = np.zeros([num_features], dtype=np.float32)
else:
assert num_features == len(orientations)
delf_features = feature_pb2.DelfFeatures()
for i in xrange(num_features):
delf_feature = delf_features.feature.add()
delf_feature.y = locations[i, 0]
delf_feature.x = locations[i, 1]
delf_feature.scale = scales[i]
delf_feature.orientation = orientations[i]
delf_feature.strength = attention[i]
delf_feature.descriptor.CopyFrom(datum_io.ArrayToDatum(descriptors[i,]))
return delf_features
def DelfFeaturesToArrays(delf_features):
num_features = len(delf_features.feature)
if num_features == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
descriptor_dim = len(
datum_io.DatumToArray(delf_features.feature[0].descriptor))
locations = np.zeros([num_features, 2])
scales = np.zeros([num_features])
descriptors = np.zeros([num_features, descriptor_dim])
attention = np.zeros([num_features])
orientations = np.zeros([num_features])
for i in xrange(num_features):
delf_feature = delf_features.feature[i]
locations[i, 0] = delf_feature.y
locations[i, 1] = delf_feature.x
scales[i] = delf_feature.scale
descriptors[i,] = datum_io.DatumToArray(delf_feature.descriptor)
attention[i] = delf_feature.strength
orientations[i] = delf_feature.orientation
return locations, scales, descriptors, attention, orientations
def SerializeToString(locations,
scales,
descriptors,
attention,
orientations=None):
delf_features = ArraysToDelfFeatures(locations, scales, descriptors,
attention, orientations)
return delf_features.SerializeToString()
def ParseFromString(string):
delf_features = feature_pb2.DelfFeatures()
delf_features.ParseFromString(string)
return DelfFeaturesToArrays(delf_features)
def ReadFromFile(file_path):
with tf.gfile.FastGFile(file_path, 'r') as f:
return ParseFromString(f.read())
def WriteToFile(file_path,
locations,
scales,
descriptors,
attention,
orientations=None):
serialized_data = SerializeToString(locations, scales, descriptors, attention,
orientations)
with tf.gfile.FastGFile(file_path, 'w') as f:
f.write(serialized_data)
| true | true |
1c3f0197b9aad6e08ff29278749e9495d7b8f1f4 | 846 | py | Python | apis/alembic/versions/d10a9ad9f863_add_restart_number_for_deploy_.py | iii-org/devops-system | 71f938c9e225ac24ab9102a8221dc5341a01889c | [
"Apache-2.0"
] | 4 | 2021-07-15T15:59:01.000Z | 2022-02-24T02:58:52.000Z | apis/alembic/versions/d10a9ad9f863_add_restart_number_for_deploy_.py | iii-org/devops-system | 71f938c9e225ac24ab9102a8221dc5341a01889c | [
"Apache-2.0"
] | 4 | 2020-06-12T04:05:46.000Z | 2021-11-09T03:53:13.000Z | apis/alembic/versions/d10a9ad9f863_add_restart_number_for_deploy_.py | iii-org/devops-system | 71f938c9e225ac24ab9102a8221dc5341a01889c | [
"Apache-2.0"
] | 2 | 2020-09-29T05:39:28.000Z | 2021-11-26T09:52:17.000Z | """add restart number for deploy application
Revision ID: d10a9ad9f863
Revises: 90a8f40d4f2c
Create Date: 2021-09-06 10:33:18.670376
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd10a9ad9f863'
down_revision = '90a8f40d4f2c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('application', sa.Column('restart_number', sa.Integer(), nullable=True))
op.add_column('application', sa.Column('restarted_at', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('application', 'restarted_at')
op.drop_column('application', 'restart_number')
# ### end Alembic commands ###
| 27.290323 | 90 | 0.710402 | from alembic import op
import sqlalchemy as sa
revision = 'd10a9ad9f863'
down_revision = '90a8f40d4f2c'
branch_labels = None
depends_on = None
def upgrade():
)
| true | true |
1c3f024bb5c2bb49fd2bbfa7cfe9f781f205d339 | 47,928 | py | Python | Algorithm.Python/PL_Stat6_fx/hp3.py | pasztorlacos/Lean | ca204c07d9bb390f853eb2f3da0ebc08150fef36 | [
"Apache-2.0"
] | null | null | null | Algorithm.Python/PL_Stat6_fx/hp3.py | pasztorlacos/Lean | ca204c07d9bb390f853eb2f3da0ebc08150fef36 | [
"Apache-2.0"
] | null | null | null | Algorithm.Python/PL_Stat6_fx/hp3.py | pasztorlacos/Lean | ca204c07d9bb390f853eb2f3da0ebc08150fef36 | [
"Apache-2.0"
] | null | null | null | ### <summary>
### Helpers
###
### </summary>
from QuantConnect.Orders import *
from QuantConnect.Orders.Fills import *
from QuantConnect.Orders.Fees import *
import tensorflow as tf
from QuantConnect.Orders import OrderStatus
from QuantConnect import Resolution, SecurityType
#import math
from math import log
#import random
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import tensorflow
import json
import pickle
import codecs
import tempfile
import io
import torch
import operator
#from pm3 import MyPositionManager
#from pmB3 import MyPositionManagerB
from var3 import MyVaR
class MyHelpers:
'''
Commonly used functionality
'''
file = __file__
'''
SYMBOL LISTS
'''
#a) For quick Equity Debug (AAPL R735QTJ8XC9X)
# ["AAPL" ,"AES", "WMT"]
#b) DOW30 (29 excl. DOW) and 1/2 and 1/2
# ["IBM", "MSFT", "XOM", "MMM", "CVX", "PG", "GS", "HD", "CSCO", "INTC", "PFE", "WBA", "V", "WMT", "UTX", "MCD", "JPM", "NKE", "VZ", "KO", "DIS", "JNJ", "AAPL", "UNH", "MRK", "TRV", "CAT", "AXP", "BA"]
# ["IBM", "MSFT", "XOM", "MMM", "CVX", "PG", "GS", "HD", "CSCO", "INTC", "PFE", "WBA", "V", "WMT", "UTX"]
# ["MCD", "JPM","NKE", "VZ", "KO", "DIS", "JNJ", "AAPL", "UNH", "MRK", "TRV", "CAT", "AXP", "BA"]
#c) SP100 (100) and 1/2 and 1/2
# ["AAPL", "ABBV", "ABT", "ACN", "ADBE", "AGN", "AIG", "ALL", "AMGN", "AMZN", "AXP", "BA", "BAC", "BIIB", "BK", "BKNG", "BLK", "BMY", "BRK.B", "C", "CAT", "CELG", "CHTR", "CL", "CMCSA", "COF", "COP", "COST", "CSCO", "CVS", "CVX", "DD", "DHR", "DIS", "DUK", "EMR", "EXC", "F", "FB", "FDX", "GD", "GE", "GILD", "GM", "GOOG", "GOOGL", "GS", "HD", "HON", "IBM", "INTC", "JNJ", "JPM", "KHC", "KMI", "KO", "LLY", "LMT", "LOW", "MA", "MCD", "MDLZ", "MDT", "MET", "MMM", "MO", "MRK", "MS", "MSFT", "NEE", "NFLX", "NKE", "NVDA", "ORCL", "OXY", "PEP", "PFE", "PG", "PM", "PYPL", "QCOM", "RTN", "SBUX", "SLB", "SO", "SPG", "T", "TGT", "TXN", "UNH", "UNP", "UPS", "USB", "UTX", "V", "VZ", "WBA", "WFC", "WMT", "XOM"]
# ["AAPL", "ABBV", "ABT", "ACN", "ADBE", "AGN", "AIG", "ALL", "AMGN", "AMZN", "AXP", "BA", "BAC", "BIIB", "BK", "BKNG", "BLK", "BMY", "BRK.B", "C", "CAT", "CELG", "CHTR", "CL", "CMCSA", "COF", "COP", "COST", "CSCO", "CVS", "CVX", "DD", "DHR"]
# ["DIS", "DUK", "EMR", "EXC", "F", "FB", "FDX", "GD", "GE", "GILD", "GM", "GOOG", "GS", "HD", "HON", "IBM", "INTC", "JNJ", "JPM", "KHC", "KMI", "KO", "LLY", "LMT", "LOW", "MA", "MCD", "MDLZ", "MDT", "MET", "MMM", "MO", "MRK", "MS", "MSFT", "NEE", "NFLX", "NKE", "NVDA", "ORCL", "OXY", "PEP", "PFE", "PG", "PM", "PYPL", "QCOM", "RTN", "SBUX", "SLB", "SO", "SPG", "T", "TGT", "TXN", "UNH", "UNP", "UPS", "USB", "UTX", "V", "VZ", "WBA", "WFC", "WMT", "XOM"]
#d) NQ100 (107)
# ["ATVI", "ADBE", "AMD", "ALXN", "ALGN", "GOOG", "AMZN", "AAL", "AMGN", "ADI", "AAPL", "AMAT", "ASML", "ADSK", "ADP", "BIDU", "BIIB", "BMRN", "BKNG", "AVGO", "CDNS", "CELG", "CERN", "CHTR", "CHKP", "CTAS", "CSCO", "CTXS", "CTSH", "CMCSA", "COST", "CSX", "CTRP", "DLTR", "EBAY", "EA", "EXPE", "FB", "FAST", "FISV", "FOX", "FOXA", "GILD", "HAS", "HSIC", "IDXX", "ILMN", "INCY", "INTC", "INTU", "ISRG", "JBHT", "JD", "KLAC", "LRCX", "LBTYA", "LBTYK", "LULU", "MAR", "MXIM", "MELI", "MCHP", "MU", "MSFT", "MDLZ", "MNST", "MYL", "NTAP", "NTES", "NFLX", "NVDA", "NXPI", "ORLY", "PCAR", "PAYX", "PYPL", "PEP", "QCOM", "REGN", "ROST", "SIRI", "SWKS", "SBUX", "SYMC", "SNPS", "TMUS", "TTWO", "TSLA", "TXN", "KHC", "ULTA", "UAL", "VRSN", "VRSK", "VRTX", "WBA", "WDAY", "WDC", "WLTW", "WYNN", "XEL", "XLNX", "STX", "TSLA", "VRSK", "WYNN", "XLNX"]
#e) SP&NQ (180) and 1/2 and 1/2
# ["AAPL", "ABBV", "ABT", "ACN", "ADBE", "AGN", "AIG", "ALL", "AMGN", "AMZN", "AXP", "BA", "BAC", "BIIB", "BK", "BKNG", "BLK", "BMY", "BRK.B", "C", "CAT", "CELG", "CHTR", "CL", "CMCSA", "COF", "COP", "COST", "CSCO", "CVS", "CVX", "DD", "DHR", "DIS", "DUK", "EMR", "EXC", "F", "FB", "FDX", "GD", "GE", "GILD", "GM", "GOOG", "GS", "HD", "HON", "IBM", "INTC", "JNJ", "JPM", "KHC", "KMI", "KO", "LLY", "LMT", "LOW", "MA", "MCD", "MDLZ", "MDT", "MET", "MMM", "MO", "MRK", "MS", "MSFT", "NEE", "NFLX", "NKE", "NVDA", "ORCL", "OXY", "PEP", "PFE", "PG", "PM", "PYPL", "QCOM", "RTN", "SBUX", "SLB", "SO", "SPG", "T", "TGT", "TXN", "UNH", "UNP", "UPS", "USB", "UTX", "V", "VZ", "WBA", "WFC", "WMT", "XOM", "ATVI", "AMD", "ALXN", "ALGN", "AAL", "ADI", "AMAT", "ASML", "ADSK", "ADP", "BIDU", "BMRN", "AVGO", "CDNS", "CERN", "CHKP", "CTAS", "CTXS", "CTSH", "CSX", "CTRP", "DLTR", "EBAY", "EA", "EXPE", "FAST", "FISV", "FOX", "FOXA", "HAS", "HSIC", "IDXX", "ILMN", "INCY", "INTU", "ISRG", "JBHT", "JD", "KLAC", "LRCX", "LBTYA", "LBTYK", "LULU", "MAR", "MXIM", "MELI", "MCHP", "MU", "MNST", "MYL", "NTAP", "NTES", "NXPI", "ORLY", "PCAR", "PAYX", "REGN", "ROST", "SIRI", "SWKS", "SYMC", "SNPS", "TMUS", "TTWO", "TSLA", "ULTA", "UAL", "VRSN", "VRSK", "VRTX", "WDAY", "WDC", "WLTW", "WYNN", "XEL", "XLNX", "STX", "TSLA", "VRSK", "WYNN", "XLNX"]
# ["AAPL", "ABBV", "ABT", "ACN", "ADBE", "AGN", "AIG", "ALL", "AMGN", "AMZN", "AXP", "BA", "BAC", "BIIB", "BK", "BKNG", "BLK", "BMY", "BRK.B", "C", "CAT", "CELG", "CHTR", "CL", "CMCSA", "COF", "COP", "COST", "CSCO", "CVS", "CVX", "DD", "DHR", "DIS", "DOW", "DUK", "EMR", "EXC", "F", "FB", "FDX", "GD", "GE", "GILD", "GM", "GOOG", "GOOGL", "GS", "HD", "HON", "IBM", "INTC", "JNJ", "JPM", "KHC", "KMI", "KO", "LLY", "LMT", "LOW", "MA", "MCD", "MDLZ", "MDT", "MET", "MMM", "MO", "MRK", "MS", "MSFT", "NEE", "NFLX", "NKE", "NVDA", "ORCL", "OXY", "PEP", "PFE", "PG", "PM", "PYPL", "QCOM", "RTN", "SBUX", "SLB", "SO", "SPG", "T", "TGT", "TXN", "UNH", "UNP"]
# ["UPS", "USB", "UTX", "V", "VZ", "WBA", "WFC", "WMT", "XOM", "ATVI", "AMD", "ALXN", "ALGN", "AAL", "ADI", "AMAT", "ASML", "ADSK", "ADP", "BIDU", "BMRN", "AVGO", "CDNS", "CERN", "CHKP", "CTAS", "CTXS", "CTSH", "CSX", "CTRP", "DLTR", "EBAY", "EA", "EXPE", "FAST", "FISV", "FOX", "FOXA", "HAS", "HSIC", "IDXX", "ILMN", "INCY", "INTU", "ISRG", "JBHT", "JD", "KLAC", "LRCX", "LBTYA", "LBTYK", "LULU", "MAR", "MXIM", "MELI", "MCHP", "MU", "MNST", "MYL", "NTAP", "NTES", "NXPI", "ORLY", "PCAR", "PAYX", "REGN", "ROST", "SIRI", "SWKS", "SYMC", "SNPS", "TMUS", "TTWO", "TSLA", "ULTA", "UAL", "VRSN", "VRSK", "VRTX", "WDAY", "WDC", "WLTW", "WYNN", "XEL", "XLNX", "STX", "TSLA", "VRSK", "WYNN", "XLNX"]
# SP500-ES&NQ/100_1
# ["CRM", "TMO", "LIN", "AMT", "FIS", "CME", "CB", "BDX", "SYK", "TJX", "ANTM", "SPGI", "NOC", "D", "CCI", "ZTS", "BSX", "PNC", "CI", "PLD", "ECL", "ICE", "MMC", "DE", "APD", "KMB", "LHX", "EQIX", "WM", "NSC", "AON", "EW", "SCHW", "EL", "AEP", "ITW", "PGR", "EOG", "SHW", "BAX", "PSX", "DG", "PSA", "SRE", "TRV", "ROP", "HUM", "AFL", "WELL", "BBT", "YUM", "MCO", "SYY", "DAL", "STZ", "JCI", "ETN", "NEM", "PRU", "MPC", "HCA", "GIS", "VLO", "EQR", "TEL", "TWTR", "PEG", "WEC", "MSI", "SBAC", "AVB", "OKE", "IR", "ED", "WMB", "ZBH", "AZO", "HPQ", "VTR", "VFC", "TSN", "STI", "HLT", "BLL", "APH", "MCK", "TROW", "PPG", "DFS", "GPN", "ES", "TDG", "FLT", "LUV", "DLR", "EIX", "IQV", "DTE", "INFO", "O"]
# SP500-ES&NQ/100_2
# ["FE", "AWK", "A", "CTVA", "HSY", "TSS", "GLW", "APTV", "CMI", "ETR", "PPL", "HIG", "PH", "ADM", "ESS", "FTV", "PXD", "LYB", "SYF", "CMG", "CLX", "SWK", "MTB", "MKC", "MSCI", "RMD", "BXP", "CHD", "AME", "WY", "RSG", "STT", "FITB", "KR", "CNC", "NTRS", "AEE", "VMC", "HPE", "KEYS", "ROK", "CMS", "RCL", "EFX", "ANSS", "CCL", "AMP", "CINF", "TFX", "ARE", "OMC", "HCP", "DHI", "LH", "KEY", "AJG", "MTD", "COO", "CBRE", "HAL", "EVRG", "AMCR", "MLM", "HES", "K", "EXR", "CFG", "IP", "CPRT", "FANG", "BR", "CBS", "NUE", "DRI", "FRC", "MKTX", "BBY", "LEN", "WAT", "RF", "AKAM", "CXO", "MAA", "MGM", "CE", "HBAN", "CAG", "CNP", "KMX", "PFG", "XYL", "DGX", "WCG", "UDR", "DOV", "CBOE", "FCX", "HOLX", "GPC", "L"]
# FX (16)
# ["EURUSD", "GBPUSD", "AUDUSD", "NZDUSD", "USDJPY", "USDCHF", "USDCAD", "USDCNH", "EURJPY", "EURSEK", "EURNOK","USDMXN", "USDZAR", "USDSEK", "USDNOK", "EURHUF"]
#PiTrading_All
#["A", "AA", "AABA", "AAL", "AAXN", "ABBV", "ACIA", "ADM", "ADT", "AIG", "AKAM", "AKS", "ALLY", "ALTR", "AMAT", "AMC", "AMCX", "AMD", "AMGN", "AMZN", "AN", "ANF", "ANTM", "AOBC", "APO", "APRN", "ARLO", "ATUS", "ATVI", "AUY", "AVGO", "AVTR", "AWK", "BABA", "BAC", "BAH", "BB", "BBBY", "BBH", "BBY", "BIDU", "BJ", "BKNG", "BLK", "BOX", "BP", "BRK-B", "BSX", "BTU", "BURL", "BX", "BYND", "C", "CAKE", "CARS", "CBOE", "CCJ", "CDLX", "CELG", "CHK", "CHWY", "CIEN", "CLDR", "CLF", "CLNE", "CMCSA", "CME", "CMG", "CMI", "CNDT", "COP", "COST", "COUP", "CPB", "CREE", "CRM", "CRSP", "CRUS", "CRWD", "CSX", "CTRP", "CTSH", "CVS", "DBI", "DBX", "DD", "DE", "DECK", "DELL", "DG", "DIA", "DKS", "DLTR", "DNKN", "DNN", "DO", "DOCU", "DRYS", "DT", "DUK", "EA", "EBAY", "EEM", "ELAN", "EOG", "EQT", "ESTC", "ET", "ETFC", "ETRN", "ETSY", "EWJ", "EXC", "F", "FANG", "FAS", "FAZ", "FB", "FCX", "FDX", "FEYE", "FISV", "FIT", "FIVE", "FLR", "FLT", "FMCC", "FNMA", "FSCT", "FSLR", "FTCH", "FXE", "FXI", "GDDY", "GDX", "GE", "GH", "GLBR", "GLD", "GLW", "GM", "GME", "GNRC", "GOLD", "GOOGL", "GOOS", "GPRO", "GPS", "GRPN", "GRUB", "GSK", "GSKY", "HAL", "HCA", "HCAT", "HIG", "HLF", "HLT", "HOG", "HON", "HPE", "HPQ", "HRI", "HTZ", "IBKR", "ICE", "INFO", "INMD", "IQ", "IQV", "ISRG", "IWM", "IYR", "JBLU", "JCP", "JMIA", "JNPR", "KBR", "KLAC", "KMI", "KMX", "KNX", "KSS", "LC", "LEVI", "LHCG", "LLY", "LN", "LOW", "LULU", "LVS", "LYFT", "MA", "MDLZ", "MDR", "MDY", "MGM", "MLCO", "MNK", "MO", "MOMO", "MRNA", "MRVL", "MS", "MSI", "MU", "MXIM", "NAVI", "NEM", "NET", "NFLX", "NIO", "NOK", "NOV", "NOW", "NTNX", "NTR", "NUAN", "NUE", "NVDA", "NVR", "NVS", "NWSA", "NXPI", "OAS", "OIH", "OKTA", "OPRA", "ORCL", "OXY", "PANW", "PAYX", "PBR", "PCG", "PDD", "PE", "PEP", "PHM", "PINS", "PIR", "PM", "PPH", "PRGO", "PS", "PSTG", "PTON", "PVTL", "PYPL", "QCOM", "QQQ", "QRTEA", "QRVO", "RACE", "RAD", "REEMF", "RGR", "RIG", "RIO", "RMBS", "ROKU", "RRC", "RSX", "RTH", "S", "SAVE", "SBUX", "SCCO", "SCHW", "SD", "SDC", "SDS", "SHAK", "SHLDQ", "SHOP", "SINA", "SIRI", "SLB", "SLV", "SMH", "SNAP", "SOHU", "SONO", "SPLK", "SPOT", "SPY", "SQ", "STNE", "STX", "SU", "SWAV", "SWCH", "SWI", "SWN", "SYMC", "T", "TAL", "TDC", "TEVA", "TGT", "TIF", "TLRY", "TLT", "TM", "TME", "TNA", "TOL", "TPR", "TPTX", "TRU", "TRUE", "TSLA", "TTD", "TW", "TWLO", "TWTR", "TXN", "TZA", "UAA", "UBER", "UNG", "UPS", "UPWK", "USFD", "USO", "UUUU", "VICI", "VLO", "VMW", "VRSN", "VVV", "VXX", "W", "WB", "WDAY", "WDC", "WFC", "WFTIQ", "WHR", "WORK", "WYNN", "X", "XLC", "XLE", "XLF", "XLU", "XLV", "YELP", "YETI", "YNDX", "YRD", "YUM", "YUMC", "ZAYO", "ZEUS", "ZG", "ZM", "ZNGA", "ZS", "ZUO"]
#PiTrading_1
#["A", "AA", "AABA", "AAL", "AAXN", "ABBV", "ACIA", "ADM", "ADT", "AIG", "AKAM", "AKS", "ALLY", "ALTR", "AMAT", "AMC", "AMCX", "AMD", "AMGN", "AMZN", "AN", "ANF", "ANTM", "AOBC", "APO", "APRN", "ARLO", "ATUS", "ATVI", "AUY", "AVGO", "AVTR", "AWK", "BABA", "BAC", "BAH", "BB", "BBBY", "BBH", "BBY", "BIDU", "BJ", "BKNG", "BLK", "BOX", "BP", "BRK-B", "BSX", "BTU", "BURL", "BX", "BYND", "C", "CAKE", "CARS", "CBOE", "CCJ", "CDLX", "CELG", "CHK", "CHWY", "CIEN", "CLDR", "CLF", "CLNE", "CMCSA", "CME", "CMG", "CMI", "CNDT", "COP", "COST", "COUP", "CPB", "CREE", "CRM", "CRSP", "CRUS", "CRWD", "CSX", "CTRP", "CTSH", "CVS", "DBI", "DBX", "DD", "DE", "DECK", "DELL", "DG", "DIA", "DKS", "DLTR", "DNKN", "DNN", "DO", "DOCU", "DRYS", "DT", "DUK", "EA", "EBAY", "EEM", "ELAN", "EOG", "EQT", "ESTC", "ET", "ETFC", "ETRN", "ETSY", "EWJ", "EXC", "F", "FANG", "FAS", "FAZ", "FB", "FCX", "FDX", "FEYE", "FISV", "FIT", "FIVE", "FLR", "FLT", "FMCC", "FNMA", "FSCT", "FSLR", "FTCH", "FXE", "FXI", "GDDY", "GDX", "GE", "GH", "GLBR", "GLD", "GLW", "GM", "GME", "GNRC", "GOLD", "GOOGL", "GOOS", "GPRO", "GPS", "GRPN", "GRUB", "GSK", "GSKY", "HAL", "HCA", "HCAT", "HIG", "HLF", "HLT", "HOG", "HON", "HPE", "HPQ", "HRI", "HTZ", "IBKR", "ICE", "INFO", "INMD", "IQ", "IQV", "ISRG", "IWM", "IYR", "JBLU", "JCP", "JMIA", "JNPR", "KBR", "KLAC", "KMI"]
#PiTrading_2
#["KMX", "KNX", "KSS", "LC", "LEVI", "LHCG", "LLY", "LN", "LOW", "LULU", "LVS", "LYFT", "MA", "MDLZ", "MDR", "MDY", "MGM", "MLCO", "MNK", "MO", "MOMO", "MRNA", "MRVL", "MS", "MSI", "MU", "MXIM", "NAVI", "NEM", "NET", "NFLX", "NIO", "NOK", "NOV", "NOW", "NTNX", "NTR", "NUAN", "NUE", "NVDA", "NVR", "NVS", "NWSA", "NXPI", "OAS", "OIH", "OKTA", "OPRA", "ORCL", "OXY", "PANW", "PAYX", "PBR", "PCG", "PDD", "PE", "PEP", "PHM", "PINS", "PIR", "PM", "PPH", "PRGO", "PS", "PSTG", "PTON", "PVTL", "PYPL", "QCOM", "QQQ", "QRTEA", "QRVO", "RACE", "RAD", "REEMF", "RGR", "RIG", "RIO", "RMBS", "ROKU", "RRC", "RSX", "RTH", "S", "SAVE", "SBUX", "SCCO", "SCHW", "SD", "SDC", "SDS", "SHAK", "SHLDQ", "SHOP", "SINA", "SIRI", "SLB", "SLV", "SMH", "SNAP", "SOHU", "SONO", "SPLK", "SPOT", "SPY", "SQ", "STNE", "STX", "SU", "SWAV", "SWCH", "SWI", "SWN", "SYMC", "T", "TAL", "TDC", "TEVA", "TGT", "TIF", "TLRY", "TLT", "TM", "TME", "TNA", "TOL", "TPR", "TPTX", "TRU", "TRUE", "TSLA", "TTD", "TW", "TWLO", "TWTR", "TXN", "TZA", "UAA", "UBER", "UNG", "UPS", "UPWK", "USFD", "USO", "UUUU", "VICI", "VLO", "VMW", "VRSN", "VVV", "VXX", "W", "WB", "WDAY", "WDC", "WFC", "WFTIQ", "WHR", "WORK", "WYNN", "X", "XLC", "XLE", "XLF", "XLU", "XLV", "YELP", "YETI", "YNDX", "YRD", "YUM", "YUMC", "ZAYO", "ZEUS", "ZG", "ZM", "ZNGA", "ZS", "ZUO"]
#BenchMarks
#IWV iShares Russell 3000 ETF
#IWB Russell 1000: 1,000 large-cap American companies in the Russell 3000 Index
#IWM Russell 2000: 2,000 smallest-cap American companies in the Russell 3000 Index
'''Global Variables
'''
_totalSymbolsAdded = 0
def __init__(self, caller):
self.CL = self.__class__
self.algo = caller
self.debug = self.algo.debug
'''
AFTER WARMUP
'''
def MyOnWarmupFinished(self):
#Check Warmup Status for each Symbol
for sd, value in self.algo.mySymbolDict.items():
if not value.IsReady():
self.algo.MyDebug(" Symbol: {}({}) is NOT READY AFTER WARMUP!".format(str(value.symbol), str(value.CL.strategyCode)))
'''
IN LIVE MODE: Syncs Orders with Broker, Checks Order Consistency, Lists Order and Portfolio Items
'''
self.PortfolioCheckSymbolDict()
if not self.algo.LiveMode: self.algo.twsSynced = True
if self.algo.LiveMode or False:
self.algo.MyDebug(" ---- WarmUp Finished Startup Sync Started:" )
self.PortfolioCheckSymbolDict()
#Sync TWS orders
totalOrdersAdded = self.algo.myPositionManager.TWS_Sync()
#List Active Orders
if totalOrdersAdded != 0:
self.algo.myVaR.OrderList()
#Check consistency for all symbols
self.algo.myPositionManagerB.AllOrdersConsistency()
self.algo.MyDebug(" ---- Initial TWS Sync and Consistency Check Finished")
#List Portfolio Items
self.algo.myVaR.PortfolioList(True) #True if position only
#Freeze consistency as things could mess up at startup due to sync with IB
self.algo.consistencyStartUpReleaseTime = self.algo.Time + timedelta(seconds=120)
#SET SCHEDULED TASKS
#AllOrdersConsistency so it is run regularly not only in onData
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(self.algo.myVaR.CL.consistencyCheckSec), \
Action(self.algo.myPositionManagerB.AllOrdersConsistency))
#VaR Calculations so it is updated regularly in LiveMode not only in onData
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(seconds=68.123456789)), Action(self.algo.myVaR.Update))
#Pending Entries
#self.algo.myPositionManagerB.AllOrdersConsistency() cannot call it due to RECURSIVE LOOP as CheckPendingEntry->EnterPosition->VaR->AllOrdersConsistency->CheckPendingEntry
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(seconds=196.80625)), Action(self.algo.myPositionManager.CheckPendingEntry))
if self.algo.updateSettings:
#Update Setting For the first time
self.algo.strategySettings.UpdateSettings()
self.algo.MyDebug(" ---- UPDATE SETTINGS IS ON! First update is completed.")
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(minutes=6.251968)), Action(self.algo.strategySettings.UpdateSettings))
#Update VaR and Order Statistics on DashBoard
self.algo.myVaR.Update()
self.algo.MyDebug(" ---- OnWarmupFinished Total mySymbolDict:" + str(len(self.algo.mySymbolDict)) \
+ " Portfolio Holdings Value:" + str(round(self.algo.Portfolio.TotalHoldingsValue)))
return
'''
ON DATA
'''
def MyOnData(self, data):
#EXIT HERE IF WarmingUp or initial consistency blocked at Startup
# none of the consolidators have new data
if self.algo.IsWarmingUp or self.algo.Time < self.algo.consistencyStartUpReleaseTime: return
#Only if at least one symbol is ready to speed up backtest
isReady = False
for sd, value in self.algo.mySymbolDict.items():
if value.IsReady() and value.WasJustUpdated(self.algo.Time): isReady = True
if not isReady: return
#ORDER CONSISTENCY Check for all Symbols not only Portfolio
self.algo.myPositionManagerB.AllOrdersConsistency()
#TRAIL STOPS
self.algo.myPositionManager.TrailStops()
#TRAIL TARGETS
self.algo.myPositionManager.TrailTargets()
#REMOVE OLD ORDERS
self.algo.myPositionManagerB.ClearOrderList()
#EXPOSURE and VaR Calculation
self.algo.myVaR.Update()
#PENDING ENTRIES
#Tself.algo.myPositionManagerB.AllOrdersConsistency() cannot call it due to RECURSIVE LOOP as CheckPendingEntry->EnterPosition->VaR->AllOrdersConsistency->CheckPendingEntry
self.algo.myPositionManager.CheckPendingEntry()
# #STRESS TEST
# if self.algo.Time.minute == 10 or self.algo.Time.minute == 30 or self.algo.Time.minute == 50:
# for x in self.algo.Portfolio:
# if self.algo.Portfolio[x.Key].Quantity != 0:
# self.algo.myPositionManagerB.LiquidatePosition(x.Key, "STest", " --- STRESS TEST")
return
'''
INSTALLING NEW strategy
'''
def InstallStrategy (self, strategy, myAllocation=-1):
if not strategy.enabled or myAllocation==0 or (myAllocation==-1 and strategy.strategyAllocation==0):
self.algo.MyDebug(" STARTEGY: {} IS NOT INSTALLED! Enabled:{}, Allocation:{}/{}".format(str(strategy.strategyCode),str(strategy.enabled),str(myAllocation),str(strategy.strategyAllocation)))
return
#OverWrite strategyAllocation if needed
if myAllocation !=-1: strategy.strategyAllocation = myAllocation
#If this is the first strategy
if not self.algo.myStrategyClassList:
#Setup VaR for benchmark and Chartsymbol
self.algo.myVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(self.algo.myVaR)
#Setup VaR for TWS and Chartsymbol
self.algo.foreignVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(self.algo.foreignVaR)
self.algo.foreignVaR.icnludeinTotalVaR = self.algo.myVaR.CL.manageTWSSymbols
#Add VaR module to startegy
self.algo.myStrategyClassList.append(strategy)
strategy.mainVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(strategy.mainVaR)
#Tickers
tickerlist = strategy.myTickers if hasattr(strategy, 'myTickers') else strategy.mySymbols #keep mySymbols for compatibility reasons
#Check for ticker duplication
for ticker in tickerlist:
for symbol in self.algo.mySymbolDict:
#Symbol.Value == ticker??
if ticker == symbol.Value:
self.algo.MyDebug(" SYMBOL DUPLICATION IN STRATEGIES: "+str(ticker)+" IS IN: "+str(strategy.strategyCode)+" AND IS ALREADY IN: "+str(self.algo.mySymbolDict[symbol].CL.strategyCode))
#Resolution
resolution = Resolution.Daily
if strategy.resolutionMinutes < 60:
resolution = Resolution.Minute
elif strategy.resolutionMinutes < 60*24:
resolution = Resolution.Hour
#Add tickers/symbols/securities
for ticker in tickerlist:
if strategy.isEquity:
self.algo.AddEquity(ticker, resolution)
self.algo.Securities[ticker].SetDataNormalizationMode(self.algo.myDataNormalizationMode)
else:
self.algo.AddForex(ticker, resolution)
symbol = self.algo.Securities[ticker].Symbol
security = self.algo.Securities[ticker]
self.AddSymbolDict(symbol, strategy, strategy.mainVaR)
if strategy.customFillModel != 0:
security.SetFillModel(MyFillModel(self.algo, symbol))
if strategy.customSlippageModel != 0:
security.SetSlippageModel(MySlippageModel(self.algo, symbol))
#Checking allocation breach
totalAllocation = 0
for strategy in self.algo.myStrategyClassList:
totalAllocation += strategy.strategyAllocation
self.algo.MyDebug(" STRATEGY INSTALLED: {} Strategy Allocation:{} Total Allocation:{}, Total Symbols:{}, Resolution(min):{}".format(str(strategy.strategyCode),str(strategy.strategyAllocation),str(round(totalAllocation,2)),str(self.CL._totalSymbolsAdded),str(strategy.resolutionMinutes)))
if totalAllocation > 1:
self.algo.MyDebug(" TOTAL ALLOCATION IS GREATER THAN 1.00: {} ALGO IS DISABLED!".format(str(round(totalAllocation,2))))
self.algo.enabled = False
raise Exception(" TOTAL ALLOCATION IS GREATER THAN 1.00: {} ALGO IS DISABLED!".format(str(round(totalAllocation,2))))
return
'''
SETTING RESOLUTION
'''
def MyResolution (self):
resolution = Resolution.Daily
minResolutionMinites = 60*24
for st in self.algo.myStrategyClassList:
if st.resolutionMinutes < minResolutionMinites and st.enabled: minResolutionMinites = st.resolutionMinutes
self.algo.minResolutionMinutes = minResolutionMinites
if minResolutionMinites < 60:
resolution = Resolution.Minute
elif minResolutionMinites < 6*24:
resolution = Resolution.Hour
return resolution
'''
WARMUP IN DAYS
'''
def WarUpDays (self):
warmupcalendardays = 1
extraDays = 1
for strategy in self.algo.myStrategyClassList:
if strategy.enabled and strategy.warmupcalendardays > warmupcalendardays:
warmupcalendardays = strategy.warmupcalendardays
warmupdays = timedelta(days=warmupcalendardays+extraDays)
self.algo.MyDebug(" WarmUp Calendar Days: {} ({} Extra Days Added) ".format(str(warmupdays.days), str(extraDays)))
return warmupdays
'''
ADDING NEW SYMBOL
'''
def AddSymbolDict (self, symbol, strategy, var):
if symbol not in self.algo.mySymbolDict:
self.algo.mySymbolDict[symbol] = strategy(self.algo, symbol, var)
self.CL._totalSymbolsAdded +=1
#if self.algo.LiveMode: self.algo.MyDebug(" Added to mySymbolDict:" + str(symbol))
'''
CHECK PORTFOLIO SYMBOLS
'''
def PortfolioCheckSymbolDict (self):
'''Need this check if conversion rate currency is added
'''
for x in self.algo.Portfolio:
if x.Key not in self.algo.mySymbolDict:
#Subscribe to Data
if x.Key.SecurityType == SecurityType.Equity:
self.algo.AddEquity(x.Key.Value, self.algo.mainResolution)
elif x.Key.SecurityType == SecurityType.Forex:
self.algo.AddForex(self.algo.Securities[x.Key].Symbol.Value, self.algo.mainResolution)
#Add to mySymbolDict
self.AddSymbolDict(x.Key, self.algo.myStrategyClassList[0], self.algo.foreignVaR)
self.algo.mySymbolDict[x.Key].posEnabled = False
if self.algo.Portfolio[x.Key].Quantity != 0: self.algo.mySymbolDict[x.Key].fromTWS = True
if self.algo.LiveMode or self.debug: self.algo.MyDebug(" PORTFOLIO SYMBOL ADDED Symbol:{}, Position Quantity:{}"
.format(str(x.Key),
str(self.algo.Portfolio[x.Key].Quantity)))
'''
Check if History Download was succesful
NOT USED
'''
def AssertHistoryCount(self, tradeBarHistory, expected):
count = len(tradeBarHistory.index)
if count == expected:
return True
else:
return False
'''
SCURITY CHANGE EVENT HANDLER
NOT USED
'''
def OnSecuritiesChanged (self, changes):
'''This is not called during Warmup even if self.AddEquity is used! History data download can be put here
'''
return
for security in changes.AddedSecurities:
if security.Symbol not in self.algo.mySymbolDict:
self.AddSymbolDict(security.Symbol, self.algo.myVaR)
if self.algo.LiveMode: self.algo.MyDebug(" " + str(security.Symbol) + "Added OnSecuritiesChanged")
for security in changes.RemovedSecurities:
if security.Symbol in self.algo.mySymbolDict:
del self.algo.mySymbolDict[security.Symbol]
if self.algo.LiveMode: self.algo.MyDebug(" " + str(security.Symbol) + " Removed OnSecuritiesChanged")
'''
FEATURES TO PANDAS
'''
#slicer must be a slice object: slice(start, stop, step) or slice(stop) (https://data-flair.training/blogs/python-slice/) example: slice(0, 400, None)
def UnpackFeatures (self, features, featureType=1, featureRegex='Feat', reshapeTuple=None, mySlicer=None):
useSingleFeatureList = False
dataBase = []
rawDataHeader = []
rawData = []
if isinstance(features[0], list) and not useSingleFeatureList:
#if features is a list of lists
for i in range(0, len(features)):
for j in range(0, len(features[i])):
rawDataHeader.append("Feat"+str(i)+'_'+str(j))
rawData.append(features[i][j])
else:
#if features is a single list or useSingleFeatureList
for i in range(len(features)):
rawDataHeader.append("Feat"+str(i))
rawData.append(features[i])
dataBase.append(rawDataHeader)
dataBase.append(rawData)
df = pd.DataFrame(dataBase[1:], columns=dataBase[0])
#SELECTING FEATURES with featureRegex and SLICING with mySlicer
if mySlicer==None:
df_filtered = df.filter(regex = featureRegex)[:]
else:
df_filtered = df.filter(regex = featureRegex)[mySlicer]
#Types
if featureType==1:
#keep original Pandas
convertedFeatures = df_filtered
if featureType==2:
#original Pandas Transposed
convertedFeatures = df_filtered.T
elif featureType==3:
#converted to list
convertedFeatures = df_filtered.values.tolist()[0]
elif featureType==4:
#numpy Array
convertedFeatures = np.asarray(df_filtered)
elif featureType==5:
#numpy Array Reshaped (for CNN)
convertedFeatures = np.asarray(df_filtered)
convertedFeatures = np.reshape(convertedFeatures, reshapeTuple)
return convertedFeatures
#CUSTOM FILTERS with customColumnFilters
#expect one row on df
#returns true if at least one row meets the conditions
def FeatureCustomColumnFilter(self, df, customColumnFilters):
#CUSTOM FILTERS with customColumnFilterslist of tuples ('col', 'opearator', 'treshold value') like [('Feat8_15', '>', 0.55)]
myOperators = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
for filter in customColumnFilters:
opFilteredCol = filter[0]
opRelate = filter[1]
opTreshold = filter[2]
if opFilteredCol in df.columns:
df = df.loc[myOperators[opRelate](df[opFilteredCol], opTreshold)]
#df.reset_index(inplace=True,drop=True)
if df.empty:
return False
else:
return True
'''
Custom Fill Model Class
'''
class MyFillModel(FillModel):
def __init__(self, algo, symbol):
self.CL = self.__class__
self.algo = algo
self.symbol = symbol
self.debug = False
#super().__init__(self, algo)
if self.debug: self.algo.MyDebug(" MyFillModel __init__ Symbol: " + str(symbol))
#It look as QC doesn't use slippage so all the fill prices to be recalculated
#QC is too conservative if price walks through the stop
def StopMarketFill(self, asset, order):
fill = super().StopMarketFill(asset, order)
prices = super().GetPrices(asset, order.Direction)
slippage = asset.SlippageModel.GetSlippageApproximation(asset, order)
oldfillprice = fill.FillPrice
if self.debug: self.algo.MyDebug(" {} Quantity:{} oldFillPrice:{} StopPrice:{} Open:{} High:{} Low:{}".format(str(asset.Symbol), str(order.Quantity), str(oldfillprice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low)))
if order.Direction == OrderDirection.Sell and prices.Low <= order.StopPrice:
#fill.Status = OrderStatus.Filled
#fill.FillQuantity = order.Quantity
#if self.debug: self.algo.MyDebug(" {} StopMarket Fill".format(str(asset.Symbol)))
pass
elif order.Direction == OrderDirection.Buy and prices.High >= order.StopPrice:
#fill.Status = OrderStatus.Filled
#fill.FillQuantity = order.Quantity
#if self.debug: self.algo.MyDebug(" {} StopMarket Fill".format(str(asset.Symbol)))
pass
if fill.Status == OrderStatus.Filled or fill.Status == OrderStatus.PartiallyFilled:
if order.Direction == OrderDirection.Sell:
#Price walks through the Stop
if prices.Open > order.StopPrice and prices.Close < order.StopPrice:
fill.FillPrice = order.StopPrice - slippage
#Stops and reverses
elif prices.Open > order.StopPrice and prices.Low <= order.StopPrice and prices.Close > order.StopPrice:
fill.FillPrice = order.StopPrice - slippage
#Gaps Down
elif prices.Open <= order.StopPrice:
fill.FillPrice = prices.Open - slippage
if self.debug: self.algo.MyDebug(" StopMarketFill({}): Fill Price Modidied from:{} to:{} StopPrice:{} bar.Open:{} bar.High:{} bar.Low:{} bar.Close:{}".format(str(asset.Symbol), str(oldfillprice), str(fill.FillPrice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low), str(prices.Close)))
elif order.Direction == OrderDirection.Buy:
#Price walks through the Stop
if prices.Open < order.StopPrice and prices.Close > order.StopPrice:
fill.FillPrice = order.StopPrice + slippage
#Stops and reverses
elif prices.Open < order.StopPrice and prices.High >= order.StopPrice and prices.Close < order.StopPrice:
fill.FillPrice = order.StopPrice + slippage
#Gaps Up
elif prices.Open >= order.StopPrice:
fill.FillPrice = prices.Open + slippage
if self.debug: self.algo.MyDebug(" StopMarketFill({}): Fill Price Modidied from:{} to:{} StopPrice:{} bar.Open:{} bar.High:{} bar.Low:{} bar.Close:{}".format(str(asset.Symbol), str(oldfillprice), str(fill.FillPrice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low), str(prices.Close)))
return fill
#For market orders the slippage is correct
def MarketFill(self, asset, order):
fill = super().MarketFill(asset, order)
prices = super().GetPrices(asset, order.Direction)
slippage = asset.SlippageModel.GetSlippageApproximation(asset, order)
oldfillprice = fill.FillPrice
if self.debug: self.algo.MyDebug(" {} oldFillPrice:{} OpenPrice:{}".format(str(asset.Symbol), str(oldfillprice), str(prices.Open)))
return fill
'''
Custom Slippage Model Class
'''
class MySlippageModel:
applyMinVariation = True
roundSlippage = False
def __init__(self, algo, symbol):
self.CL = self.__class__
self.algo = algo
self.symbol = symbol
self.debug = False
def GetSlippageApproximation(self, asset, order):
slippage = 0
#Percent Based Slippage Model
if self.algo.mySymbolDict[self.symbol].CL.customSlippageModel == 1:
slippage = self.PercentSlippage1 (asset, order)
#ATR Based Slippage Model
elif self.algo.mySymbolDict[self.symbol].CL.customSlippageModel == 2:
slippage = self.ATRSlippage1 (asset, order)
if self.debug: self.algo.MyDebug(" {} CustomSlippageModel:{} ".format(str(asset.Symbol), str(slippage)))
return slippage
def PercentSlippage1 (self, asset, order):
slippageRatioEq = 0.001
slippageRatioFX = 0.0001
minPriceVariation = self.algo.Securities[self.symbol].SymbolProperties.MinimumPriceVariation
priceRoundingDigits = round(-1*log(minPriceVariation,10))
#slippage = asset.Price * 0.0001 * np.log10(2*float(order.AbsoluteQuantity))
if self.symbol.SecurityType == SecurityType.Equity:
slippageRatio = slippageRatioEq
else:
slippageRatio = slippageRatioFX
baseSlippage = asset.Price * slippageRatio
if self.CL.applyMinVariation: baseSlippage = max(baseSlippage, minPriceVariation)
if self.CL.roundSlippage:
slippage = round(baseSlippage, priceRoundingDigits)
else:
slippage = baseSlippage
return slippage
def ATRSlippage1 (self, asset, order):
slippageRatioEq = 0.1
slippageRatioFX = 0.1
slippage = 0
atr = self.algo.mySymbolDict[self.symbol].atr1.Current.Value
minPriceVariation = self.algo.Securities[self.symbol].SymbolProperties.MinimumPriceVariation
priceRoundingDigits = round(-1*log(minPriceVariation,10))
#slippage = asset.Price * 0.0001 * np.log10(2*float(order.AbsoluteQuantity))
if self.symbol.SecurityType == SecurityType.Equity:
slippageRatio = slippageRatioEq
else:
slippageRatio = slippageRatioFX
baseSlippage = atr * slippageRatio
if self.CL.applyMinVariation: baseSlippage = max(baseSlippage, minPriceVariation)
if self.CL.roundSlippage:
slippage = round(baseSlippage, priceRoundingDigits)
else:
slippage = baseSlippage
return slippage
'''
AI Model Loader
'''
class MyModelLoader:
session = 0
@classmethod
def LoadModelTorch(cls, caller, url, existingmodel=None):
algo = caller.algo
response = algo.Download(url)
decoded = codecs.decode(response.encode(), "base64")
stream = io.BytesIO(decoded)
if existingmodel==None:
model = torch.load(stream, map_location='cpu')
else:
model = existingmodel
model.load_state_dict(torch.load(stream, map_location='cpu'))
if False:
algo.Debug(str(model))
algo.Debug(str(model.state_dict()))
model.eval()
# algo.Debug(' MODEL LOADED: '+str(url1))
return model
@classmethod
def LoadModelPickled(cls, caller, url):
response = self.algo.Download(self.url1)
model = pickle.loads(codecs.decode(response.encode(), "base64"))
return model
def __init__(self, algo, loadtype, url1, url2=None, printSummary=False):
self.algo=algo
self.loadtype=loadtype
self.url1=url1
self.url2=url2
self.printSummary= printSummary
self.model=None
self.stream=None
if self.loadtype in [2,3,4,5]:
self.tfGraph = tensorflow.Graph() #tensorflow.Graph() #tensorflow.get_default_graph()
#self.tfSession = tensorflow.keras.backend.get_session() #tensorflow.Session(graph=self.tfGraph)
self.tfConfig = tensorflow.ConfigProto()
self.tfConfig.operation_timeout_in_ms = 10000
self.tfConfig.allow_soft_placement = True
self.LoadModel()
return
def LoadModel(self):
model = None
#Pickle the whole model. Works for sklearn
if self.loadtype==1:
response = self.algo.Download(self.url1)
self.model = pickle.loads(codecs.decode(response.encode(), "base64"))
#keras only: load model from json and pickle weights
#model.set_weights(weights) sets the values of the weights of the model, from a list of Numpy arrays. The arrays in the list should have the same shape as those returned by get_weights()
#https://keras.io/models/about-keras-models/
elif self.loadtype==2:
#get the model first
response = self.Download(self.url1)
model_json = json.loads(response)
self.model = tensorflow.keras.models.model_from_json(model_json)
#get the pickled weights
response = self.Download(self.url2)
weights = pickle.loads(codecs.decode(response.encode(), "base64"))
self.model.set_weights(weights)
self.model._make_predict_function()
#keras only: load model from json and h5 weights. Works if keras.get_file whitelisted on QC proxy
elif self.loadtype==3:
#get the model first
response = self.Download(self.url1)
self.model_json = json.loads(response)
self.model = tensorflow.keras.models.model_from_json(model_json)
#get the weights in h5 format
weights_path = tensorflow.keras.utils.get_file('model.h5',self.url2)
self.model.load_weights(weights_path)
self.model._make_predict_function()
#keras only: load model from h5 using tempfile
elif self.loadtype==4:
response = self.algo.Download(self.url1)
h5file_fromtxt = codecs.decode(response.encode(), "base64")
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as fd:
fd.write(h5file_fromtxt)
fd.flush()
self.model = tensorflow.keras.models.load_model(fd.name)
self.model._make_predict_function()
try:
fd.close()
os.unlink(fd.name)
except:
pass
if self.printSummary:
self.algo.MyDebug("Summary of the loaded model: " + self.url1)
model.summary(print_fn=lambda x: self.algo.MyDebug(x))
#keras only: load model from h5txt using BytesIO
elif self.loadtype==5:
dummyImput = [np.random.rand(1,400), np.random.rand(1,100)]
response = self.algo.Download(self.url1)
decoded = codecs.decode(response.encode(), "base64")
stream = io.BytesIO(decoded)
self.stream = stream
#self.tfGraph = tensorflow.Graph()
with self.tfGraph.as_default():
self.tfSession = tensorflow.Session(config=self.tfConfig, graph=self.tfGraph)
tensorflow.keras.backend.set_session(self.tfSession)
with self.tfSession.as_default():
self.model = tensorflow.keras.models.load_model(stream)
#self.model.predict(dummyImput)
self.model._make_predict_function()
#self.tfSession.run(tensorflow.global_variables_initializer())
#self.tfSession.run(tensorflow.local_variables_initializer())
#self.tfGraph.finalize()
if self.printSummary:
self.algo.MyDebug("Summary of the loaded model: " + self.url1)
self.model.summary(print_fn=lambda x: self.algo.MyDebug(x))
self.algo.MyDebug(' MODEL LOADED: '+str(self.url1))
return
def tfPredict(self, features):
#with self.tfGraph.as_default(), self.tfSession.as_default():
# with self.tfGraph.as_default():
# with self.tfSession.as_default():
#Brute force solution to the multithread problem to load the actual model again
#Properly managing Graphs and Sessions it to be investigated
if self.algo.LiveMode or True:
#self.tfGraph = tensorflow.get_default_graph() #tensorflow.Graph()
with self.tfGraph.as_default():
#self.model = tensorflow.keras.models.load_model(self.stream)
#self.model._make_predict_function()
#tfSession = tensorflow.Session(graph=self.tfGraph, config=self.tfConfig)
tensorflow.keras.backend.set_session(self.tfSession)
with self.tfSession.as_default():
prediction = self.model.predict(features)
#tensorflow.keras.backend.clear_session()
#tensorflow.reset_default_graph()
else:
with self.tfGraph.as_default():
# self.tfSession = tensorflow.Session(graph=self.tfGraph)
# with self.tfSession.as_default():
prediction = self.model.predict(features)
return (np.argmax(prediction), prediction)
'''
Strategy Settings from Cloud
'''
class MyStrategySettings():
debugChanges = True
debug = False
dataValidation = {
'enabled': (bool, True, False),
'debug': (bool, True, False),
'strategyAllocation': (float, 0.00, 1.00),
'enableLong': (bool, True, False),
'enableShort': (bool, True, False),
'liquidateLong': (bool, True, False),
'liquidateShort': (bool, True, False),
'riskperLongTrade': (float, 0.00, 0.02),
'riskperShortTrade': (float, 0.00, 0.02),
'maxAbsExposure' : (float, 0.00, 4.00),
'maxLongExposure' : (float, 0.00, 4.00),
'maxNetLongExposure' : (float, 0.00, 4.00),
'maxShortExposure' : (float, -4.00, 0.00),
'maxNetShortExposure' : (float, -4.00, 0.00),
'maxSymbolAbsExposure' : (float, 0.00, 2.00),
'maxLongVaR' : (float, 0.00, 0.20),
'maxShortVaR' : (float, 0.00, 0.20),
'maxTotalVaR' : (float, 0.00, 0.20)
}
def __init__(self, algo):
self.CL = self.__class__
self.algo = algo
def ReadSettings(self):
try:
file_str = self.algo.Download(self.algo.settingsURL)
csv_stream = io.StringIO(file_str)
df = pd.read_csv(csv_stream, sep=',', index_col=0, header=0)
df = self.ConvertDataType_pd(df)
return df
except:
self.algo.MyDebug('--- SETTING READ ERROR!')
return None
def UpdateSettings(self):
df = self.ReadSettings()
if df is None:
return
if self.CL.debug: self.algo.MyDebug('Settings Up')
#Update algo Settings
if 'algo' in df:
for row in range(df.shape[0]):
prop = df.index[row]
value = df.loc[df.index[row], 'algo']
if hasattr(self.algo, prop) and not pd.isna(value):
oldvalue = getattr(self.algo, prop)
if value!=oldvalue and ((isinstance(value, float) and isinstance(oldvalue, float)) or (isinstance(value, bool) and isinstance(oldvalue, bool))) and self.ValidateData(value, prop):
setattr(self.algo, prop, value)
if self.CL.debugChanges: self.algo.MyDebug(' ---- SETTING CHANGED! algo.{} = {}, oldvalue:{}, equal:{}'.format(prop, str(getattr(self.algo, prop)), str(oldvalue), getattr(self.algo, prop)==df.loc[df.index[row], 'algo']))
if self.CL.debug: self.algo.MyDebug('algo.{} value:{} csv_value:{} equal:{}'.format(prop, str(getattr(self.algo, prop)),df.loc[df.index[row], 'algo'], getattr(self.algo, prop)==df.loc[df.index[row], 'algo']))
#Update Strategies
for strategy in self.algo.myStrategyClassList:
if hasattr(strategy, "strategyCodeOriginal"):
strCode = strategy.strategyCodeOriginal
else:
strCode = strategy.strategyCode
if strCode in df:
for row in range(df.shape[0]):
prop = df.index[row]
value = df.loc[df.index[row], strCode]
if hasattr(strategy, prop) and not pd.isna(value):
oldvalue = getattr(strategy, prop)
if value!=oldvalue and ((isinstance(value, float) and isinstance(oldvalue, float)) or (isinstance(value, bool) and isinstance(oldvalue, bool))) and self.ValidateData(value, prop):
setattr(strategy, prop, value)
if self.CL.debugChanges: self.algo.MyDebug(' ---- SETTING CHANGED! {}.CL.{} = {}, oldvalue:{}, equal:{}'.format(strCode, prop, str(getattr(strategy, prop)), str(oldvalue), getattr(strategy, prop)==df.loc[df.index[row], strCode]))
if self.CL.debug: self.algo.MyDebug('{}.CL.{} value:{} csv_value:{}, equal:{}'.format(strCode, prop, str(getattr(strategy, prop)), df.loc[df.index[row], strCode], getattr(strategy, prop)==df.loc[df.index[row], strCode]))
return
def ValidateData(self, value, prop):
if prop in self.CL.dataValidation:
if self.CL.dataValidation[prop][0] == bool:
return value==self.CL.dataValidation[prop][1] or value==self.CL.dataValidation[prop][2]
if self.CL.dataValidation[prop][0] == float:
return value>=self.CL.dataValidation[prop][1] and value<=self.CL.dataValidation[prop][2]
else:
return False
else:
return False
def ConvertDataType_pd (self, df):
for row in range(df.shape[0]):
for col in range(df.shape[1]):
#Check if string is Boolean
cell = df.iloc[row, col]
cellStr = str(cell).lower()
if cellStr in ("yes", "true", "t"):
df.iloc[row, col] = True
elif cellStr in ("no", "false", "f"):
df.iloc[row, col] = False
#Check if sting is Float
cell = df.iloc[row, col]
if cell!=True and cell!=False and not pd.isna(cell):
try:
float(cell)
df.iloc[row, col] = float(cell)
except ValueError:
pass
return df | 60.899619 | 2,637 | 0.571378 | ort *
from QuantConnect.Orders.Fees import *
import tensorflow as tf
from QuantConnect.Orders import OrderStatus
from QuantConnect import Resolution, SecurityType
from math import log
import pandas as pd
import numpy as np
from datetime import datetime, timedelta
import tensorflow
import json
import pickle
import codecs
import tempfile
import io
import torch
import operator
from var3 import MyVaR
class MyHelpers:
file = __file__
_totalSymbolsAdded = 0
def __init__(self, caller):
self.CL = self.__class__
self.algo = caller
self.debug = self.algo.debug
def MyOnWarmupFinished(self):
for sd, value in self.algo.mySymbolDict.items():
if not value.IsReady():
self.algo.MyDebug(" Symbol: {}({}) is NOT READY AFTER WARMUP!".format(str(value.symbol), str(value.CL.strategyCode)))
self.PortfolioCheckSymbolDict()
if not self.algo.LiveMode: self.algo.twsSynced = True
if self.algo.LiveMode or False:
self.algo.MyDebug(" ---- WarmUp Finished Startup Sync Started:" )
self.PortfolioCheckSymbolDict()
totalOrdersAdded = self.algo.myPositionManager.TWS_Sync()
if totalOrdersAdded != 0:
self.algo.myVaR.OrderList()
self.algo.myPositionManagerB.AllOrdersConsistency()
self.algo.MyDebug(" ---- Initial TWS Sync and Consistency Check Finished")
self.algo.myVaR.PortfolioList(True)
self.algo.consistencyStartUpReleaseTime = self.algo.Time + timedelta(seconds=120)
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(self.algo.myVaR.CL.consistencyCheckSec), \
Action(self.algo.myPositionManagerB.AllOrdersConsistency))
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(seconds=68.123456789)), Action(self.algo.myVaR.Update))
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(seconds=196.80625)), Action(self.algo.myPositionManager.CheckPendingEntry))
if self.algo.updateSettings:
self.algo.strategySettings.UpdateSettings()
self.algo.MyDebug(" ---- UPDATE SETTINGS IS ON! First update is completed.")
self.algo.Schedule.On(self.algo.DateRules.EveryDay(), self.algo.TimeRules.Every(timedelta(minutes=6.251968)), Action(self.algo.strategySettings.UpdateSettings))
self.algo.myVaR.Update()
self.algo.MyDebug(" ---- OnWarmupFinished Total mySymbolDict:" + str(len(self.algo.mySymbolDict)) \
+ " Portfolio Holdings Value:" + str(round(self.algo.Portfolio.TotalHoldingsValue)))
return
def MyOnData(self, data):
if self.algo.IsWarmingUp or self.algo.Time < self.algo.consistencyStartUpReleaseTime: return
isReady = False
for sd, value in self.algo.mySymbolDict.items():
if value.IsReady() and value.WasJustUpdated(self.algo.Time): isReady = True
if not isReady: return
self.algo.myPositionManagerB.AllOrdersConsistency()
self.algo.myPositionManager.TrailStops()
self.algo.myPositionManager.TrailTargets()
self.algo.myPositionManagerB.ClearOrderList()
self.algo.myVaR.Update()
self.algo.myPositionManager.CheckPendingEntry()
return
def InstallStrategy (self, strategy, myAllocation=-1):
if not strategy.enabled or myAllocation==0 or (myAllocation==-1 and strategy.strategyAllocation==0):
self.algo.MyDebug(" STARTEGY: {} IS NOT INSTALLED! Enabled:{}, Allocation:{}/{}".format(str(strategy.strategyCode),str(strategy.enabled),str(myAllocation),str(strategy.strategyAllocation)))
return
if myAllocation !=-1: strategy.strategyAllocation = myAllocation
if not self.algo.myStrategyClassList:
self.algo.myVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(self.algo.myVaR)
self.algo.foreignVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(self.algo.foreignVaR)
self.algo.foreignVaR.icnludeinTotalVaR = self.algo.myVaR.CL.manageTWSSymbols
self.algo.myStrategyClassList.append(strategy)
strategy.mainVaR = MyVaR(self.algo, strategy)
self.algo.myVaRList.append(strategy.mainVaR)
tickerlist = strategy.myTickers if hasattr(strategy, 'myTickers') else strategy.mySymbols
for ticker in tickerlist:
for symbol in self.algo.mySymbolDict:
if ticker == symbol.Value:
self.algo.MyDebug(" SYMBOL DUPLICATION IN STRATEGIES: "+str(ticker)+" IS IN: "+str(strategy.strategyCode)+" AND IS ALREADY IN: "+str(self.algo.mySymbolDict[symbol].CL.strategyCode))
resolution = Resolution.Daily
if strategy.resolutionMinutes < 60:
resolution = Resolution.Minute
elif strategy.resolutionMinutes < 60*24:
resolution = Resolution.Hour
for ticker in tickerlist:
if strategy.isEquity:
self.algo.AddEquity(ticker, resolution)
self.algo.Securities[ticker].SetDataNormalizationMode(self.algo.myDataNormalizationMode)
else:
self.algo.AddForex(ticker, resolution)
symbol = self.algo.Securities[ticker].Symbol
security = self.algo.Securities[ticker]
self.AddSymbolDict(symbol, strategy, strategy.mainVaR)
if strategy.customFillModel != 0:
security.SetFillModel(MyFillModel(self.algo, symbol))
if strategy.customSlippageModel != 0:
security.SetSlippageModel(MySlippageModel(self.algo, symbol))
totalAllocation = 0
for strategy in self.algo.myStrategyClassList:
totalAllocation += strategy.strategyAllocation
self.algo.MyDebug(" STRATEGY INSTALLED: {} Strategy Allocation:{} Total Allocation:{}, Total Symbols:{}, Resolution(min):{}".format(str(strategy.strategyCode),str(strategy.strategyAllocation),str(round(totalAllocation,2)),str(self.CL._totalSymbolsAdded),str(strategy.resolutionMinutes)))
if totalAllocation > 1:
self.algo.MyDebug(" TOTAL ALLOCATION IS GREATER THAN 1.00: {} ALGO IS DISABLED!".format(str(round(totalAllocation,2))))
self.algo.enabled = False
raise Exception(" TOTAL ALLOCATION IS GREATER THAN 1.00: {} ALGO IS DISABLED!".format(str(round(totalAllocation,2))))
return
def MyResolution (self):
resolution = Resolution.Daily
minResolutionMinites = 60*24
for st in self.algo.myStrategyClassList:
if st.resolutionMinutes < minResolutionMinites and st.enabled: minResolutionMinites = st.resolutionMinutes
self.algo.minResolutionMinutes = minResolutionMinites
if minResolutionMinites < 60:
resolution = Resolution.Minute
elif minResolutionMinites < 6*24:
resolution = Resolution.Hour
return resolution
def WarUpDays (self):
warmupcalendardays = 1
extraDays = 1
for strategy in self.algo.myStrategyClassList:
if strategy.enabled and strategy.warmupcalendardays > warmupcalendardays:
warmupcalendardays = strategy.warmupcalendardays
warmupdays = timedelta(days=warmupcalendardays+extraDays)
self.algo.MyDebug(" WarmUp Calendar Days: {} ({} Extra Days Added) ".format(str(warmupdays.days), str(extraDays)))
return warmupdays
def AddSymbolDict (self, symbol, strategy, var):
if symbol not in self.algo.mySymbolDict:
self.algo.mySymbolDict[symbol] = strategy(self.algo, symbol, var)
self.CL._totalSymbolsAdded +=1
def PortfolioCheckSymbolDict (self):
for x in self.algo.Portfolio:
if x.Key not in self.algo.mySymbolDict:
if x.Key.SecurityType == SecurityType.Equity:
self.algo.AddEquity(x.Key.Value, self.algo.mainResolution)
elif x.Key.SecurityType == SecurityType.Forex:
self.algo.AddForex(self.algo.Securities[x.Key].Symbol.Value, self.algo.mainResolution)
self.AddSymbolDict(x.Key, self.algo.myStrategyClassList[0], self.algo.foreignVaR)
self.algo.mySymbolDict[x.Key].posEnabled = False
if self.algo.Portfolio[x.Key].Quantity != 0: self.algo.mySymbolDict[x.Key].fromTWS = True
if self.algo.LiveMode or self.debug: self.algo.MyDebug(" PORTFOLIO SYMBOL ADDED Symbol:{}, Position Quantity:{}"
.format(str(x.Key),
str(self.algo.Portfolio[x.Key].Quantity)))
def AssertHistoryCount(self, tradeBarHistory, expected):
count = len(tradeBarHistory.index)
if count == expected:
return True
else:
return False
def OnSecuritiesChanged (self, changes):
return
for security in changes.AddedSecurities:
if security.Symbol not in self.algo.mySymbolDict:
self.AddSymbolDict(security.Symbol, self.algo.myVaR)
if self.algo.LiveMode: self.algo.MyDebug(" " + str(security.Symbol) + "Added OnSecuritiesChanged")
for security in changes.RemovedSecurities:
if security.Symbol in self.algo.mySymbolDict:
del self.algo.mySymbolDict[security.Symbol]
if self.algo.LiveMode: self.algo.MyDebug(" " + str(security.Symbol) + " Removed OnSecuritiesChanged")
def UnpackFeatures (self, features, featureType=1, featureRegex='Feat', reshapeTuple=None, mySlicer=None):
useSingleFeatureList = False
dataBase = []
rawDataHeader = []
rawData = []
if isinstance(features[0], list) and not useSingleFeatureList:
for i in range(0, len(features)):
for j in range(0, len(features[i])):
rawDataHeader.append("Feat"+str(i)+'_'+str(j))
rawData.append(features[i][j])
else:
for i in range(len(features)):
rawDataHeader.append("Feat"+str(i))
rawData.append(features[i])
dataBase.append(rawDataHeader)
dataBase.append(rawData)
df = pd.DataFrame(dataBase[1:], columns=dataBase[0])
if mySlicer==None:
df_filtered = df.filter(regex = featureRegex)[:]
else:
df_filtered = df.filter(regex = featureRegex)[mySlicer]
if featureType==1:
convertedFeatures = df_filtered
if featureType==2:
convertedFeatures = df_filtered.T
elif featureType==3:
convertedFeatures = df_filtered.values.tolist()[0]
elif featureType==4:
convertedFeatures = np.asarray(df_filtered)
elif featureType==5:
convertedFeatures = np.asarray(df_filtered)
convertedFeatures = np.reshape(convertedFeatures, reshapeTuple)
return convertedFeatures
def FeatureCustomColumnFilter(self, df, customColumnFilters):
myOperators = {'>': operator.gt,
'<': operator.lt,
'>=': operator.ge,
'<=': operator.le,
'=': operator.eq}
for filter in customColumnFilters:
opFilteredCol = filter[0]
opRelate = filter[1]
opTreshold = filter[2]
if opFilteredCol in df.columns:
df = df.loc[myOperators[opRelate](df[opFilteredCol], opTreshold)]
if df.empty:
return False
else:
return True
class MyFillModel(FillModel):
def __init__(self, algo, symbol):
self.CL = self.__class__
self.algo = algo
self.symbol = symbol
self.debug = False
if self.debug: self.algo.MyDebug(" MyFillModel __init__ Symbol: " + str(symbol))
#QC is too conservative if price walks through the stop
def StopMarketFill(self, asset, order):
fill = super().StopMarketFill(asset, order)
prices = super().GetPrices(asset, order.Direction)
slippage = asset.SlippageModel.GetSlippageApproximation(asset, order)
oldfillprice = fill.FillPrice
if self.debug: self.algo.MyDebug(" {} Quantity:{} oldFillPrice:{} StopPrice:{} Open:{} High:{} Low:{}".format(str(asset.Symbol), str(order.Quantity), str(oldfillprice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low)))
if order.Direction == OrderDirection.Sell and prices.Low <= order.StopPrice:
#fill.Status = OrderStatus.Filled
#fill.FillQuantity = order.Quantity
#if self.debug: self.algo.MyDebug(" {} StopMarket Fill".format(str(asset.Symbol)))
pass
elif order.Direction == OrderDirection.Buy and prices.High >= order.StopPrice:
#fill.Status = OrderStatus.Filled
#fill.FillQuantity = order.Quantity
#if self.debug: self.algo.MyDebug(" {} StopMarket Fill".format(str(asset.Symbol)))
pass
if fill.Status == OrderStatus.Filled or fill.Status == OrderStatus.PartiallyFilled:
if order.Direction == OrderDirection.Sell:
#Price walks through the Stop
if prices.Open > order.StopPrice and prices.Close < order.StopPrice:
fill.FillPrice = order.StopPrice - slippage
#Stops and reverses
elif prices.Open > order.StopPrice and prices.Low <= order.StopPrice and prices.Close > order.StopPrice:
fill.FillPrice = order.StopPrice - slippage
#Gaps Down
elif prices.Open <= order.StopPrice:
fill.FillPrice = prices.Open - slippage
if self.debug: self.algo.MyDebug(" StopMarketFill({}): Fill Price Modidied from:{} to:{} StopPrice:{} bar.Open:{} bar.High:{} bar.Low:{} bar.Close:{}".format(str(asset.Symbol), str(oldfillprice), str(fill.FillPrice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low), str(prices.Close)))
elif order.Direction == OrderDirection.Buy:
#Price walks through the Stop
if prices.Open < order.StopPrice and prices.Close > order.StopPrice:
fill.FillPrice = order.StopPrice + slippage
#Stops and reverses
elif prices.Open < order.StopPrice and prices.High >= order.StopPrice and prices.Close < order.StopPrice:
fill.FillPrice = order.StopPrice + slippage
#Gaps Up
elif prices.Open >= order.StopPrice:
fill.FillPrice = prices.Open + slippage
if self.debug: self.algo.MyDebug(" StopMarketFill({}): Fill Price Modidied from:{} to:{} StopPrice:{} bar.Open:{} bar.High:{} bar.Low:{} bar.Close:{}".format(str(asset.Symbol), str(oldfillprice), str(fill.FillPrice), str(order.StopPrice), str(prices.Open), str(prices.High), str(prices.Low), str(prices.Close)))
return fill
#For market orders the slippage is correct
def MarketFill(self, asset, order):
fill = super().MarketFill(asset, order)
prices = super().GetPrices(asset, order.Direction)
slippage = asset.SlippageModel.GetSlippageApproximation(asset, order)
oldfillprice = fill.FillPrice
if self.debug: self.algo.MyDebug(" {} oldFillPrice:{} OpenPrice:{}".format(str(asset.Symbol), str(oldfillprice), str(prices.Open)))
return fill
class MySlippageModel:
applyMinVariation = True
roundSlippage = False
def __init__(self, algo, symbol):
self.CL = self.__class__
self.algo = algo
self.symbol = symbol
self.debug = False
def GetSlippageApproximation(self, asset, order):
slippage = 0
#Percent Based Slippage Model
if self.algo.mySymbolDict[self.symbol].CL.customSlippageModel == 1:
slippage = self.PercentSlippage1 (asset, order)
#ATR Based Slippage Model
elif self.algo.mySymbolDict[self.symbol].CL.customSlippageModel == 2:
slippage = self.ATRSlippage1 (asset, order)
if self.debug: self.algo.MyDebug(" {} CustomSlippageModel:{} ".format(str(asset.Symbol), str(slippage)))
return slippage
def PercentSlippage1 (self, asset, order):
slippageRatioEq = 0.001
slippageRatioFX = 0.0001
minPriceVariation = self.algo.Securities[self.symbol].SymbolProperties.MinimumPriceVariation
priceRoundingDigits = round(-1*log(minPriceVariation,10))
#slippage = asset.Price * 0.0001 * np.log10(2*float(order.AbsoluteQuantity))
if self.symbol.SecurityType == SecurityType.Equity:
slippageRatio = slippageRatioEq
else:
slippageRatio = slippageRatioFX
baseSlippage = asset.Price * slippageRatio
if self.CL.applyMinVariation: baseSlippage = max(baseSlippage, minPriceVariation)
if self.CL.roundSlippage:
slippage = round(baseSlippage, priceRoundingDigits)
else:
slippage = baseSlippage
return slippage
def ATRSlippage1 (self, asset, order):
slippageRatioEq = 0.1
slippageRatioFX = 0.1
slippage = 0
atr = self.algo.mySymbolDict[self.symbol].atr1.Current.Value
minPriceVariation = self.algo.Securities[self.symbol].SymbolProperties.MinimumPriceVariation
priceRoundingDigits = round(-1*log(minPriceVariation,10))
#slippage = asset.Price * 0.0001 * np.log10(2*float(order.AbsoluteQuantity))
if self.symbol.SecurityType == SecurityType.Equity:
slippageRatio = slippageRatioEq
else:
slippageRatio = slippageRatioFX
baseSlippage = atr * slippageRatio
if self.CL.applyMinVariation: baseSlippage = max(baseSlippage, minPriceVariation)
if self.CL.roundSlippage:
slippage = round(baseSlippage, priceRoundingDigits)
else:
slippage = baseSlippage
return slippage
class MyModelLoader:
session = 0
@classmethod
def LoadModelTorch(cls, caller, url, existingmodel=None):
algo = caller.algo
response = algo.Download(url)
decoded = codecs.decode(response.encode(), "base64")
stream = io.BytesIO(decoded)
if existingmodel==None:
model = torch.load(stream, map_location='cpu')
else:
model = existingmodel
model.load_state_dict(torch.load(stream, map_location='cpu'))
if False:
algo.Debug(str(model))
algo.Debug(str(model.state_dict()))
model.eval()
# algo.Debug(' MODEL LOADED: '+str(url1))
return model
@classmethod
def LoadModelPickled(cls, caller, url):
response = self.algo.Download(self.url1)
model = pickle.loads(codecs.decode(response.encode(), "base64"))
return model
def __init__(self, algo, loadtype, url1, url2=None, printSummary=False):
self.algo=algo
self.loadtype=loadtype
self.url1=url1
self.url2=url2
self.printSummary= printSummary
self.model=None
self.stream=None
if self.loadtype in [2,3,4,5]:
self.tfGraph = tensorflow.Graph() #tensorflow.Graph() #tensorflow.get_default_graph()
#self.tfSession = tensorflow.keras.backend.get_session() #tensorflow.Session(graph=self.tfGraph)
self.tfConfig = tensorflow.ConfigProto()
self.tfConfig.operation_timeout_in_ms = 10000
self.tfConfig.allow_soft_placement = True
self.LoadModel()
return
def LoadModel(self):
model = None
#Pickle the whole model. Works for sklearn
if self.loadtype==1:
response = self.algo.Download(self.url1)
self.model = pickle.loads(codecs.decode(response.encode(), "base64"))
#keras only: load model from json and pickle weights
#model.set_weights(weights) sets the values of the weights of the model, from a list of Numpy arrays. The arrays in the list should have the same shape as those returned by get_weights()
#https://keras.io/models/about-keras-models/
elif self.loadtype==2:
#get the model first
response = self.Download(self.url1)
model_json = json.loads(response)
self.model = tensorflow.keras.models.model_from_json(model_json)
#get the pickled weights
response = self.Download(self.url2)
weights = pickle.loads(codecs.decode(response.encode(), "base64"))
self.model.set_weights(weights)
self.model._make_predict_function()
#keras only: load model from json and h5 weights. Works if keras.get_file whitelisted on QC proxy
elif self.loadtype==3:
#get the model first
response = self.Download(self.url1)
self.model_json = json.loads(response)
self.model = tensorflow.keras.models.model_from_json(model_json)
#get the weights in h5 format
weights_path = tensorflow.keras.utils.get_file('model.h5',self.url2)
self.model.load_weights(weights_path)
self.model._make_predict_function()
#keras only: load model from h5 using tempfile
elif self.loadtype==4:
response = self.algo.Download(self.url1)
h5file_fromtxt = codecs.decode(response.encode(), "base64")
with tempfile.NamedTemporaryFile(suffix='.hdf5', delete=False) as fd:
fd.write(h5file_fromtxt)
fd.flush()
self.model = tensorflow.keras.models.load_model(fd.name)
self.model._make_predict_function()
try:
fd.close()
os.unlink(fd.name)
except:
pass
if self.printSummary:
self.algo.MyDebug("Summary of the loaded model: " + self.url1)
model.summary(print_fn=lambda x: self.algo.MyDebug(x))
#keras only: load model from h5txt using BytesIO
elif self.loadtype==5:
dummyImput = [np.random.rand(1,400), np.random.rand(1,100)]
response = self.algo.Download(self.url1)
decoded = codecs.decode(response.encode(), "base64")
stream = io.BytesIO(decoded)
self.stream = stream
#self.tfGraph = tensorflow.Graph()
with self.tfGraph.as_default():
self.tfSession = tensorflow.Session(config=self.tfConfig, graph=self.tfGraph)
tensorflow.keras.backend.set_session(self.tfSession)
with self.tfSession.as_default():
self.model = tensorflow.keras.models.load_model(stream)
#self.model.predict(dummyImput)
self.model._make_predict_function()
#self.tfSession.run(tensorflow.global_variables_initializer())
#self.tfSession.run(tensorflow.local_variables_initializer())
#self.tfGraph.finalize()
if self.printSummary:
self.algo.MyDebug("Summary of the loaded model: " + self.url1)
self.model.summary(print_fn=lambda x: self.algo.MyDebug(x))
self.algo.MyDebug(' MODEL LOADED: '+str(self.url1))
return
def tfPredict(self, features):
#with self.tfGraph.as_default(), self.tfSession.as_default():
# with self.tfGraph.as_default():
# with self.tfSession.as_default():
#Brute force solution to the multithread problem to load the actual model again
#Properly managing Graphs and Sessions it to be investigated
if self.algo.LiveMode or True:
#self.tfGraph = tensorflow.get_default_graph() #tensorflow.Graph()
with self.tfGraph.as_default():
#self.model = tensorflow.keras.models.load_model(self.stream)
#self.model._make_predict_function()
#tfSession = tensorflow.Session(graph=self.tfGraph, config=self.tfConfig)
tensorflow.keras.backend.set_session(self.tfSession)
with self.tfSession.as_default():
prediction = self.model.predict(features)
#tensorflow.keras.backend.clear_session()
#tensorflow.reset_default_graph()
else:
with self.tfGraph.as_default():
# self.tfSession = tensorflow.Session(graph=self.tfGraph)
# with self.tfSession.as_default():
prediction = self.model.predict(features)
return (np.argmax(prediction), prediction)
class MyStrategySettings():
debugChanges = True
debug = False
dataValidation = {
'enabled': (bool, True, False),
'debug': (bool, True, False),
'strategyAllocation': (float, 0.00, 1.00),
'enableLong': (bool, True, False),
'enableShort': (bool, True, False),
'liquidateLong': (bool, True, False),
'liquidateShort': (bool, True, False),
'riskperLongTrade': (float, 0.00, 0.02),
'riskperShortTrade': (float, 0.00, 0.02),
'maxAbsExposure' : (float, 0.00, 4.00),
'maxLongExposure' : (float, 0.00, 4.00),
'maxNetLongExposure' : (float, 0.00, 4.00),
'maxShortExposure' : (float, -4.00, 0.00),
'maxNetShortExposure' : (float, -4.00, 0.00),
'maxSymbolAbsExposure' : (float, 0.00, 2.00),
'maxLongVaR' : (float, 0.00, 0.20),
'maxShortVaR' : (float, 0.00, 0.20),
'maxTotalVaR' : (float, 0.00, 0.20)
}
def __init__(self, algo):
self.CL = self.__class__
self.algo = algo
def ReadSettings(self):
try:
file_str = self.algo.Download(self.algo.settingsURL)
csv_stream = io.StringIO(file_str)
df = pd.read_csv(csv_stream, sep=',', index_col=0, header=0)
df = self.ConvertDataType_pd(df)
return df
except:
self.algo.MyDebug('--- SETTING READ ERROR!')
return None
def UpdateSettings(self):
df = self.ReadSettings()
if df is None:
return
if self.CL.debug: self.algo.MyDebug('Settings Up')
#Update algo Settings
if 'algo' in df:
for row in range(df.shape[0]):
prop = df.index[row]
value = df.loc[df.index[row], 'algo']
if hasattr(self.algo, prop) and not pd.isna(value):
oldvalue = getattr(self.algo, prop)
if value!=oldvalue and ((isinstance(value, float) and isinstance(oldvalue, float)) or (isinstance(value, bool) and isinstance(oldvalue, bool))) and self.ValidateData(value, prop):
setattr(self.algo, prop, value)
if self.CL.debugChanges: self.algo.MyDebug(' ---- SETTING CHANGED! algo.{} = {}, oldvalue:{}, equal:{}'.format(prop, str(getattr(self.algo, prop)), str(oldvalue), getattr(self.algo, prop)==df.loc[df.index[row], 'algo']))
if self.CL.debug: self.algo.MyDebug('algo.{} value:{} csv_value:{} equal:{}'.format(prop, str(getattr(self.algo, prop)),df.loc[df.index[row], 'algo'], getattr(self.algo, prop)==df.loc[df.index[row], 'algo']))
#Update Strategies
for strategy in self.algo.myStrategyClassList:
if hasattr(strategy, "strategyCodeOriginal"):
strCode = strategy.strategyCodeOriginal
else:
strCode = strategy.strategyCode
if strCode in df:
for row in range(df.shape[0]):
prop = df.index[row]
value = df.loc[df.index[row], strCode]
if hasattr(strategy, prop) and not pd.isna(value):
oldvalue = getattr(strategy, prop)
if value!=oldvalue and ((isinstance(value, float) and isinstance(oldvalue, float)) or (isinstance(value, bool) and isinstance(oldvalue, bool))) and self.ValidateData(value, prop):
setattr(strategy, prop, value)
if self.CL.debugChanges: self.algo.MyDebug(' ---- SETTING CHANGED! {}.CL.{} = {}, oldvalue:{}, equal:{}'.format(strCode, prop, str(getattr(strategy, prop)), str(oldvalue), getattr(strategy, prop)==df.loc[df.index[row], strCode]))
if self.CL.debug: self.algo.MyDebug('{}.CL.{} value:{} csv_value:{}, equal:{}'.format(strCode, prop, str(getattr(strategy, prop)), df.loc[df.index[row], strCode], getattr(strategy, prop)==df.loc[df.index[row], strCode]))
return
def ValidateData(self, value, prop):
if prop in self.CL.dataValidation:
if self.CL.dataValidation[prop][0] == bool:
return value==self.CL.dataValidation[prop][1] or value==self.CL.dataValidation[prop][2]
if self.CL.dataValidation[prop][0] == float:
return value>=self.CL.dataValidation[prop][1] and value<=self.CL.dataValidation[prop][2]
else:
return False
else:
return False
def ConvertDataType_pd (self, df):
for row in range(df.shape[0]):
for col in range(df.shape[1]):
#Check if string is Boolean
cell = df.iloc[row, col]
cellStr = str(cell).lower()
if cellStr in ("yes", "true", "t"):
df.iloc[row, col] = True
elif cellStr in ("no", "false", "f"):
df.iloc[row, col] = False
#Check if sting is Float
cell = df.iloc[row, col]
if cell!=True and cell!=False and not pd.isna(cell):
try:
float(cell)
df.iloc[row, col] = float(cell)
except ValueError:
pass
return df | true | true |
1c3f02f8e5985d6b7670531f93bdfb106a93f89f | 2,070 | py | Python | dataloader.py | JayD1912/image_outpaint | 0b47d94c6cbd10f749ed717d7d5f76bba03c0d9d | [
"MIT"
] | null | null | null | dataloader.py | JayD1912/image_outpaint | 0b47d94c6cbd10f749ed717d7d5f76bba03c0d9d | [
"MIT"
] | null | null | null | dataloader.py | JayD1912/image_outpaint | 0b47d94c6cbd10f749ed717d7d5f76bba03c0d9d | [
"MIT"
] | null | null | null | import numpy as np
import os
from random import shuffle
DATA_PATH = "train"
TEST_PATH = "test"
class Data():
def __init__(self):
self.X_counter = 0
self.file_counter = 0
self.files = os.listdir(DATA_PATH)
self.files = [file for file in self.files if '.npy' in file]
shuffle(self.files)
self._load_data()
def _load_data(self):
datas = np.load(os.path.join(DATA_PATH, self.files[self.file_counter]))
self.X = []
for data in datas:
self.X.append(data)
shuffle(self.X)
self.X = np.asarray(self.X)
self.file_counter += 1
def get_data(self, batch_size):
if self.X_counter >= len(self.X):
if self.file_counter > len(self.files) - 1:
print("Data exhausted, Re Initialize")
self.__init__()
return None
else:
self._load_data()
self.X_counter = 0
if self.X_counter + batch_size <= len(self.X):
remaining = len(self.X) - (self.X_counter)
X = self.X[self.X_counter: self.X_counter + batch_size]
else:
X = self.X[self.X_counter: ]
self.X_counter += batch_size
return X
class TestData():
def __init__(self):
self.X_counter = 0
self.file_counter = 0
self.files = os.listdir(TEST_PATH)
self.files = [file for file in self.files if '.npy' in file]
shuffle(self.files)
self._load_data()
def _load_data(self):
datas = np.load(os.path.join(TEST_PATH, self.files[self.file_counter]))
self.X = []
for data in datas:
self.X.append(data)
shuffle(self.X)
self.X = np.asarray(self.X)
self.file_counter += 1
def get_data(self, batch_size):
if self.X_counter >= len(self.X):
if self.file_counter > len(self.files) - 1:
print("Data exhausted, Re Initialize")
self.__init__()
return None
else:
self._load_data()
self.X_counter = 0
if self.X_counter + batch_size <= len(self.X):
remaining = len(self.X) - (self.X_counter)
X = self.X[self.X_counter: self.X_counter + batch_size]
else:
X = self.X[self.X_counter: ]
self.X_counter += batch_size
return X
| 24.069767 | 74 | 0.644928 | import numpy as np
import os
from random import shuffle
DATA_PATH = "train"
TEST_PATH = "test"
class Data():
def __init__(self):
self.X_counter = 0
self.file_counter = 0
self.files = os.listdir(DATA_PATH)
self.files = [file for file in self.files if '.npy' in file]
shuffle(self.files)
self._load_data()
def _load_data(self):
datas = np.load(os.path.join(DATA_PATH, self.files[self.file_counter]))
self.X = []
for data in datas:
self.X.append(data)
shuffle(self.X)
self.X = np.asarray(self.X)
self.file_counter += 1
def get_data(self, batch_size):
if self.X_counter >= len(self.X):
if self.file_counter > len(self.files) - 1:
print("Data exhausted, Re Initialize")
self.__init__()
return None
else:
self._load_data()
self.X_counter = 0
if self.X_counter + batch_size <= len(self.X):
remaining = len(self.X) - (self.X_counter)
X = self.X[self.X_counter: self.X_counter + batch_size]
else:
X = self.X[self.X_counter: ]
self.X_counter += batch_size
return X
class TestData():
def __init__(self):
self.X_counter = 0
self.file_counter = 0
self.files = os.listdir(TEST_PATH)
self.files = [file for file in self.files if '.npy' in file]
shuffle(self.files)
self._load_data()
def _load_data(self):
datas = np.load(os.path.join(TEST_PATH, self.files[self.file_counter]))
self.X = []
for data in datas:
self.X.append(data)
shuffle(self.X)
self.X = np.asarray(self.X)
self.file_counter += 1
def get_data(self, batch_size):
if self.X_counter >= len(self.X):
if self.file_counter > len(self.files) - 1:
print("Data exhausted, Re Initialize")
self.__init__()
return None
else:
self._load_data()
self.X_counter = 0
if self.X_counter + batch_size <= len(self.X):
remaining = len(self.X) - (self.X_counter)
X = self.X[self.X_counter: self.X_counter + batch_size]
else:
X = self.X[self.X_counter: ]
self.X_counter += batch_size
return X
| true | true |
1c3f04dfed6c101a659c0d01e90716a188f78071 | 4,533 | py | Python | sourcecode/usb-example/python/face_detect.py | HeavenFish/Face-Recognition-Check-in-with-Line-API | 06f5fb635ce606f225ef24aac8270d689dd68cbc | [
"MIT"
] | null | null | null | sourcecode/usb-example/python/face_detect.py | HeavenFish/Face-Recognition-Check-in-with-Line-API | 06f5fb635ce606f225ef24aac8270d689dd68cbc | [
"MIT"
] | null | null | null | sourcecode/usb-example/python/face_detect.py | HeavenFish/Face-Recognition-Check-in-with-Line-API | 06f5fb635ce606f225ef24aac8270d689dd68cbc | [
"MIT"
] | null | null | null | import time
import cv2 as cv
import smtplib as sm
import os
from practicum import find_mcu_boards, McuBoard, PeriBoard
from requests import get, post
from line_notify import LineNotify
def notifymessage(message):
payload = {"message": message}
sendnotify(payload)
def notifypic(message, url):
payload = {"message": message,
"imageFile": open(url,'rb')}
sendnotify(payload)
def sendnotify(payload, file = None):
url = 'https://notify-api.line.me/api/notify'
token = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'
headers = {"content-type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {token}"}
#payload = {"message": message}
r = post(url, headers=headers, data=payload, files=file)
print(r.text)
def sendpic(txt, path, token):
notify = LineNotify(token)
notify.send(txt + ' checked in', path) # send picture
#notifymessage("bung")
haar_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
people = ['You_know_who', 'Taro', 'prayuth', 'M']
#features = np.load('features.npy', allow_pickle=True)
#labels = np.load('labels.npy')
img = 0
path = '/home/pi/practicum/project/usb-example/python/pic/'
face_recognizer = cv.face.LBPHFaceRecognizer_create()
face_recognizer.read('face_trained.yml')
capture = cv.VideoCapture(0)
switch = 0
lst = [0] * len(people)
unknown = 0
finish = 0
nump = 1
token = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'
devices = find_mcu_boards()
mcu = McuBoard(devices[0])
peri = PeriBoard(mcu)
peri.get_switch()
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,0)
while True:
#capture = cv.VideoCapture("192.168.2.46:8080")
blank, img = capture.read()
img+=1
img = cv.resize(img, (300,200))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
#cv.imshow('Person', gray)
# Detect the face in the image
faces_rect = haar_cascade.detectMultiScale(gray, 1.1, 4)
for (x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h,x:x+h]
label, confidence = face_recognizer.predict(faces_roi)
print(f'label = {people[label]} with a confidence of {confidence} lst[label]={lst[label]}')
print(f'unknown={unknown}')
if (unknown >=50):
#red
if(finish == 0):
peri.set_led(0,1)
peri.set_led(1,0)
peri.set_led(2,0)
cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)
#notifymessage("Unknown")
#notifypic("Unknown",path+'photo' + str(nump) + '.jpeg')
sendpic("Unknown", path+"photo1.jpeg", token)
nump += 1
lst = [0] * len(people)
unknown = 0
finish = 1
if(lst[label]>=50):
if(finish == 0):
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,1)
cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)
#notifymessage(people[label]+' checked in')
#notifypic(people[label] + ' checked in', path+'photo' + str(nump) + '.jpeg')
sendpic(people[label], path+"photo1.jpeg", token)
nump += 1
#f=open("int.txt","w")
#integer=1
#f.write(str(integer))
#f.truncate()
unknown = 0
lst = [0] * len(people)
finish = 1
if(lst[label]>=0 or unknown >= 0):
#yellow
if(finish == 0):
peri.set_led(0,0)
peri.set_led(1,1)
peri.set_led(2,0)
if (confidence >= 60 and confidence <= 100):
lst[label] += 1
cv.putText(img, str(people[label]), (x, y - 4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), thickness=2)
cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
cv.imshow('Detected Face', img)
elif(confidence < 60 or confidence > 100):
unknown+=1
cv.putText(img, "Unknown", (x,y-4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0,0,255), thickness=2)
cv.rectangle(img, (x,y), (x+w,y+h), (000,0,255), thickness=2)
cv.imshow('Detected Face', img)
if(cv.waitKey(1) & 0xFF == ord('d')):
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,0)
break
| 35.414063 | 116 | 0.554379 | import time
import cv2 as cv
import smtplib as sm
import os
from practicum import find_mcu_boards, McuBoard, PeriBoard
from requests import get, post
from line_notify import LineNotify
def notifymessage(message):
payload = {"message": message}
sendnotify(payload)
def notifypic(message, url):
payload = {"message": message,
"imageFile": open(url,'rb')}
sendnotify(payload)
def sendnotify(payload, file = None):
url = 'https://notify-api.line.me/api/notify'
token = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'
headers = {"content-type": "application/x-www-form-urlencoded",
"Authorization": f"Bearer {token}"}
r = post(url, headers=headers, data=payload, files=file)
print(r.text)
def sendpic(txt, path, token):
notify = LineNotify(token)
notify.send(txt + ' checked in', path)
haar_cascade = cv.CascadeClassifier('haarcascade_frontalface_default.xml')
people = ['You_know_who', 'Taro', 'prayuth', 'M']
img = 0
path = '/home/pi/practicum/project/usb-example/python/pic/'
face_recognizer = cv.face.LBPHFaceRecognizer_create()
face_recognizer.read('face_trained.yml')
capture = cv.VideoCapture(0)
switch = 0
lst = [0] * len(people)
unknown = 0
finish = 0
nump = 1
token = '2dlsMzR3c0HjNMYtZVKyt1Wou1dX02RLzs6sJRyW6iD'
devices = find_mcu_boards()
mcu = McuBoard(devices[0])
peri = PeriBoard(mcu)
peri.get_switch()
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,0)
while True:
blank, img = capture.read()
img+=1
img = cv.resize(img, (300,200))
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces_rect = haar_cascade.detectMultiScale(gray, 1.1, 4)
for (x,y,w,h) in faces_rect:
faces_roi = gray[y:y+h,x:x+h]
label, confidence = face_recognizer.predict(faces_roi)
print(f'label = {people[label]} with a confidence of {confidence} lst[label]={lst[label]}')
print(f'unknown={unknown}')
if (unknown >=50):
if(finish == 0):
peri.set_led(0,1)
peri.set_led(1,0)
peri.set_led(2,0)
cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)
sendpic("Unknown", path+"photo1.jpeg", token)
nump += 1
lst = [0] * len(people)
unknown = 0
finish = 1
if(lst[label]>=50):
if(finish == 0):
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,1)
cv.imwrite(os.path.join(path,'photo' + str(nump) + '.jpeg'),img)
sendpic(people[label], path+"photo1.jpeg", token)
nump += 1
unknown = 0
lst = [0] * len(people)
finish = 1
if(lst[label]>=0 or unknown >= 0):
if(finish == 0):
peri.set_led(0,0)
peri.set_led(1,1)
peri.set_led(2,0)
if (confidence >= 60 and confidence <= 100):
lst[label] += 1
cv.putText(img, str(people[label]), (x, y - 4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0, 255, 0), thickness=2)
cv.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), thickness=2)
cv.imshow('Detected Face', img)
elif(confidence < 60 or confidence > 100):
unknown+=1
cv.putText(img, "Unknown", (x,y-4), cv.FONT_HERSHEY_COMPLEX, 0.8, (0,0,255), thickness=2)
cv.rectangle(img, (x,y), (x+w,y+h), (000,0,255), thickness=2)
cv.imshow('Detected Face', img)
if(cv.waitKey(1) & 0xFF == ord('d')):
peri.set_led(0,0)
peri.set_led(1,0)
peri.set_led(2,0)
break
| true | true |
1c3f056eef1798da8fcdf25ae23b87f8e864b8e9 | 2,616 | py | Python | src/scheduler/domain/operation/commands/SendSchedulerErrorMailCommand.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/scheduler/domain/operation/commands/SendSchedulerErrorMailCommand.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | src/scheduler/domain/operation/commands/SendSchedulerErrorMailCommand.py | jedicontributors/pythondataintegrator | 3e877b367ab9b20185476128ec053db41087879f | [
"MIT"
] | null | null | null | from injector import inject
from IocManager import IocManager
from infrastructor.configuration.ConfigService import ConfigService
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.delivery.EmailProvider import EmailProvider
from infrastructor.exceptions.OperationalException import OperationalException
from infrastructor.logging.SqlLogger import SqlLogger
from models.configs.ApplicationConfig import ApplicationConfig
from models.dao.operation import DataOperationJob
class SendSchedulerErrorMailCommand:
@inject
def __init__(self):
self.repository_provider = RepositoryProvider()
self.config_service = ConfigService(self.repository_provider)
self.sql_logger = IocManager.injector.get(SqlLogger)
self.application_config = IocManager.injector.get(ApplicationConfig)
self.email_provider = EmailProvider(config_service=self.config_service, sql_logger=self.sql_logger)
def send(self, job_id:int,exception:Exception,data_operation_job_execution_id=None):
try:
data_operation_job_repository = self.repository_provider.get(DataOperationJob)
data_operation_job = data_operation_job_repository.first(JobId=job_id)
if data_operation_job is None:
raise OperationalException("Job definition not found")
operation_contacts = []
default_contacts = self.config_service.get_config_by_name("DataOperationDefaultContact")
if default_contacts is not None and default_contacts != '':
default_contacts_emails = default_contacts.split(",")
for default_contact in default_contacts_emails:
if default_contact is not None and default_contact != '':
operation_contacts.append(default_contact)
data_operation_name = data_operation_job.DataOperation.Name
subject = f"Scheduler getting error on execution create"
subject = subject + f": {self.application_config.environment} » {data_operation_name}"
body = f'''
<p>Scheduler getting error on job</p>
<p>{exception}<p/>
'''
try:
self.email_provider.send(operation_contacts, subject, body)
except Exception as ex:
self.sql_logger.error(f"Scheduler mail sending. Error:{ex}",job_id=data_operation_job_execution_id)
except Exception as ex:
self.sql_logger.error(f"Scheduler getting error. Error:{ex}",job_id=data_operation_job_execution_id)
| 51.294118 | 116 | 0.713303 | from injector import inject
from IocManager import IocManager
from infrastructor.configuration.ConfigService import ConfigService
from infrastructor.data.RepositoryProvider import RepositoryProvider
from infrastructor.delivery.EmailProvider import EmailProvider
from infrastructor.exceptions.OperationalException import OperationalException
from infrastructor.logging.SqlLogger import SqlLogger
from models.configs.ApplicationConfig import ApplicationConfig
from models.dao.operation import DataOperationJob
class SendSchedulerErrorMailCommand:
@inject
def __init__(self):
self.repository_provider = RepositoryProvider()
self.config_service = ConfigService(self.repository_provider)
self.sql_logger = IocManager.injector.get(SqlLogger)
self.application_config = IocManager.injector.get(ApplicationConfig)
self.email_provider = EmailProvider(config_service=self.config_service, sql_logger=self.sql_logger)
def send(self, job_id:int,exception:Exception,data_operation_job_execution_id=None):
try:
data_operation_job_repository = self.repository_provider.get(DataOperationJob)
data_operation_job = data_operation_job_repository.first(JobId=job_id)
if data_operation_job is None:
raise OperationalException("Job definition not found")
operation_contacts = []
default_contacts = self.config_service.get_config_by_name("DataOperationDefaultContact")
if default_contacts is not None and default_contacts != '':
default_contacts_emails = default_contacts.split(",")
for default_contact in default_contacts_emails:
if default_contact is not None and default_contact != '':
operation_contacts.append(default_contact)
data_operation_name = data_operation_job.DataOperation.Name
subject = f"Scheduler getting error on execution create"
subject = subject + f": {self.application_config.environment} » {data_operation_name}"
body = f'''
<p>Scheduler getting error on job</p>
<p>{exception}<p/>
'''
try:
self.email_provider.send(operation_contacts, subject, body)
except Exception as ex:
self.sql_logger.error(f"Scheduler mail sending. Error:{ex}",job_id=data_operation_job_execution_id)
except Exception as ex:
self.sql_logger.error(f"Scheduler getting error. Error:{ex}",job_id=data_operation_job_execution_id)
| true | true |
1c3f06668e685207312debef13aeb5f3ad782b94 | 437 | py | Python | rubicon_ml/ui/__init__.py | capitalone/rubicon | b784cd2e28c68bc44d04317b7acc1eaadda7e4eb | [
"Apache-2.0"
] | 42 | 2021-02-23T23:30:49.000Z | 2021-05-01T02:54:03.000Z | rubicon_ml/ui/__init__.py | capitalone/rubicon-ml | b784cd2e28c68bc44d04317b7acc1eaadda7e4eb | [
"Apache-2.0"
] | 56 | 2021-05-13T13:47:50.000Z | 2022-03-24T13:46:49.000Z | rubicon_ml/ui/__init__.py | capitalone/rubicon | b784cd2e28c68bc44d04317b7acc1eaadda7e4eb | [
"Apache-2.0"
] | 9 | 2021-02-23T23:30:51.000Z | 2021-04-24T16:42:28.000Z | def _check_for_ui_extras():
try:
import dash # noqa F401
import dash_html_components as html # noqa F401
except ImportError:
install_command = "pip install rubicon[ui]"
message = f"Install the packages required for the UI with `{install_command}`."
raise ImportError(message)
_check_for_ui_extras()
from rubicon_ml.ui.dashboard import Dashboard # noqa F401
__all__ = ["Dashboard"]
| 25.705882 | 87 | 0.693364 | def _check_for_ui_extras():
try:
import dash
import dash_html_components as html
except ImportError:
install_command = "pip install rubicon[ui]"
message = f"Install the packages required for the UI with `{install_command}`."
raise ImportError(message)
_check_for_ui_extras()
from rubicon_ml.ui.dashboard import Dashboard
__all__ = ["Dashboard"]
| true | true |
1c3f08346d19b97e95a22306a1f67375c392273a | 227 | py | Python | sequential/audioset/__init__.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | 26 | 2020-06-17T08:44:15.000Z | 2022-03-20T04:21:13.000Z | sequential/audioset/__init__.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | null | null | null | sequential/audioset/__init__.py | mariacer/cl_in_rnns | 333b8e03391600a8e3df7d684a3f171b135d273a | [
"Apache-2.0"
] | 4 | 2020-10-26T02:19:38.000Z | 2021-12-26T02:26:05.000Z | import os
import sys
curr_dir = os.path.basename(os.path.abspath(os.curdir))
# See __init__.py in folder "toy_example" for an explanation.
if curr_dir == 'audioset' and '../..' not in sys.path:
sys.path.insert(0, '../..')
| 28.375 | 61 | 0.682819 | import os
import sys
curr_dir = os.path.basename(os.path.abspath(os.curdir))
if curr_dir == 'audioset' and '../..' not in sys.path:
sys.path.insert(0, '../..')
| true | true |
1c3f0914bb047b56ded0096975ff401e24cd96e5 | 7,279 | py | Python | lte/gateway/python/magma/pipelined/rule_mappers.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/magma/pipelined/rule_mappers.py | saurabhsoni88/magma | 4236c9d8edb7bd203707ff7e861b1f7c12fb84c7 | [
"BSD-3-Clause"
] | 72 | 2021-03-08T09:37:52.000Z | 2022-03-29T23:20:10.000Z | lte/gateway/python/magma/pipelined/rule_mappers.py | kkahrs/magma | 73e666627dc28e0c492feab7321bb7d6dd433b09 | [
"BSD-3-Clause"
] | 1 | 2021-07-07T14:26:13.000Z | 2021-07-07T14:26:13.000Z | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import threading
from collections import namedtuple
from typing import Optional
from lte.protos.mobilityd_pb2 import IPAddress
from magma.pipelined.imsi import encode_imsi
from magma.common.redis.client import get_default_client
from magma.common.redis.containers import RedisHashDict
from magma.common.redis.serializers import get_json_deserializer, \
get_json_serializer
SubscriberRuleKey = namedtuple('SubscriberRuleKey', 'key_type imsi ip_addr rule_id')
class RuleIDToNumMapper:
"""
Rule ID to Number Mapper
This class assigns integers to rule ids so that they can be identified in
an openflow register. The methods can be called from multiple threads
"""
def __init__(self):
self.redis_cli = get_default_client()
self._curr_rule_num = 1
self._rule_nums_by_rule = RuleIDDict()
self._rules_by_rule_num = RuleNameDict()
self._lock = threading.Lock() # write lock
def _register_rule(self, rule_id):
""" NOT thread safe """
rule_num = self._rule_nums_by_rule.get(rule_id)
if rule_num is not None:
return rule_num
rule_num = self._curr_rule_num
self._rule_nums_by_rule[rule_id] = rule_num
self._rules_by_rule_num[rule_num] = rule_id
self._curr_rule_num += 1
return rule_num
def get_rule_num(self, rule_id):
with self._lock:
return self._rule_nums_by_rule[rule_id]
def get_or_create_rule_num(self, rule_id):
with self._lock:
rule_num = self._rule_nums_by_rule.get(rule_id)
if rule_num is None:
return self._register_rule(rule_id)
return rule_num
def get_rule_id(self, rule_num):
with self._lock:
return self._rules_by_rule_num[rule_num]
class SessionRuleToVersionMapper:
"""
Session & Rule to Version Mapper
This class assigns version numbers to rule id & subscriber id combinations
that can be used in an openflow register. The methods can be called from
multiple threads.
"""
VERSION_LIMIT = 0xFFFFFFFF # 32 bit unsigned int limit (inclusive)
def __init__(self):
self._version_by_imsi_and_rule = RuleVersionDict()
self._lock = threading.Lock() # write lock
def _update_version_unsafe(self, imsi: str, ip_addr: str, rule_id: str):
key = self._get_json_key(encode_imsi(imsi), ip_addr, rule_id)
version = self._version_by_imsi_and_rule.get(key)
if not version:
version = 0
self._version_by_imsi_and_rule[key] = \
(version % self.VERSION_LIMIT) + 1
def update_version(self, imsi: str, ip_addr: IPAddress,
rule_id: Optional[str] = None):
"""
Increment the version number for a given subscriber and rule. If the
rule id is not specified, then all rules for the subscriber will be
incremented.
"""
encoded_imsi = encode_imsi(imsi)
if ip_addr is None or ip_addr.address is None:
ip_addr_str = ""
else:
ip_addr_str = ip_addr.address.decode('utf-8')
with self._lock:
if rule_id is None:
for k, v in self._version_by_imsi_and_rule.items():
_, imsi, ip_addr_str, _ = SubscriberRuleKey(*json.loads(k))
if imsi == encoded_imsi and ip_addr_str == ip_addr_str:
self._version_by_imsi_and_rule[k] = v + 1
else:
self._update_version_unsafe(imsi, ip_addr_str, rule_id)
def get_version(self, imsi: str, ip_addr: IPAddress, rule_id: str) -> int:
"""
Returns the version number given a subscriber and a rule.
"""
if ip_addr is None or ip_addr.address is None:
ip_addr_str = ""
else:
ip_addr_str = ip_addr.address.decode('utf-8')
key = self._get_json_key(encode_imsi(imsi), ip_addr_str, rule_id)
with self._lock:
version = self._version_by_imsi_and_rule.get(key)
if version is None:
version = 0
return version
def _get_json_key(self, imsi: str, ip_addr: str, rule_id: str):
return json.dumps(SubscriberRuleKey('imsi_rule', imsi, ip_addr,
rule_id))
class RuleIDDict(RedisHashDict):
"""
RuleIDDict uses the RedisHashDict collection to store a mapping of
rule name to rule id.
Setting and deleting items in the dictionary syncs with Redis automatically
"""
_DICT_HASH = "pipelined:rule_ids"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
"""Instead of throwing a key error, return None when key not found"""
return None
class RuleNameDict(RedisHashDict):
"""
RuleNameDict uses the RedisHashDict collection to store a mapping of
rule id to rule name.
Setting and deleting items in the dictionary syncs with Redis automatically
"""
_DICT_HASH = "pipelined:rule_names"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
"""Instead of throwing a key error, return None when key not found"""
return None
class RuleVersionDict(RedisHashDict):
"""
RuleVersionDict uses the RedisHashDict collection to store a mapping of
subscriber+rule_id to rule version.
Setting and deleting items in the dictionary syncs with Redis automatically
"""
_DICT_HASH = "pipelined:rule_versions"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
"""Instead of throwing a key error, return None when key not found"""
return None
class UsageDeltaDict(RedisHashDict):
"""
UsageDeltaDict uses the RedisHashDict collection to store a mapping of
subscriber+rule_id+ip to rule usage.
Setting and deleting items in the dictionary syncs with Redis automatically
"""
_DICT_HASH = "pipelined:last_usage_delta"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
"""Instead of throwing a key error, return None when key not found"""
return None | 34.334906 | 84 | 0.660393 | import json
import threading
from collections import namedtuple
from typing import Optional
from lte.protos.mobilityd_pb2 import IPAddress
from magma.pipelined.imsi import encode_imsi
from magma.common.redis.client import get_default_client
from magma.common.redis.containers import RedisHashDict
from magma.common.redis.serializers import get_json_deserializer, \
get_json_serializer
SubscriberRuleKey = namedtuple('SubscriberRuleKey', 'key_type imsi ip_addr rule_id')
class RuleIDToNumMapper:
def __init__(self):
self.redis_cli = get_default_client()
self._curr_rule_num = 1
self._rule_nums_by_rule = RuleIDDict()
self._rules_by_rule_num = RuleNameDict()
self._lock = threading.Lock()
def _register_rule(self, rule_id):
rule_num = self._rule_nums_by_rule.get(rule_id)
if rule_num is not None:
return rule_num
rule_num = self._curr_rule_num
self._rule_nums_by_rule[rule_id] = rule_num
self._rules_by_rule_num[rule_num] = rule_id
self._curr_rule_num += 1
return rule_num
def get_rule_num(self, rule_id):
with self._lock:
return self._rule_nums_by_rule[rule_id]
def get_or_create_rule_num(self, rule_id):
with self._lock:
rule_num = self._rule_nums_by_rule.get(rule_id)
if rule_num is None:
return self._register_rule(rule_id)
return rule_num
def get_rule_id(self, rule_num):
with self._lock:
return self._rules_by_rule_num[rule_num]
class SessionRuleToVersionMapper:
VERSION_LIMIT = 0xFFFFFFFF
def __init__(self):
self._version_by_imsi_and_rule = RuleVersionDict()
self._lock = threading.Lock()
def _update_version_unsafe(self, imsi: str, ip_addr: str, rule_id: str):
key = self._get_json_key(encode_imsi(imsi), ip_addr, rule_id)
version = self._version_by_imsi_and_rule.get(key)
if not version:
version = 0
self._version_by_imsi_and_rule[key] = \
(version % self.VERSION_LIMIT) + 1
def update_version(self, imsi: str, ip_addr: IPAddress,
rule_id: Optional[str] = None):
encoded_imsi = encode_imsi(imsi)
if ip_addr is None or ip_addr.address is None:
ip_addr_str = ""
else:
ip_addr_str = ip_addr.address.decode('utf-8')
with self._lock:
if rule_id is None:
for k, v in self._version_by_imsi_and_rule.items():
_, imsi, ip_addr_str, _ = SubscriberRuleKey(*json.loads(k))
if imsi == encoded_imsi and ip_addr_str == ip_addr_str:
self._version_by_imsi_and_rule[k] = v + 1
else:
self._update_version_unsafe(imsi, ip_addr_str, rule_id)
def get_version(self, imsi: str, ip_addr: IPAddress, rule_id: str) -> int:
if ip_addr is None or ip_addr.address is None:
ip_addr_str = ""
else:
ip_addr_str = ip_addr.address.decode('utf-8')
key = self._get_json_key(encode_imsi(imsi), ip_addr_str, rule_id)
with self._lock:
version = self._version_by_imsi_and_rule.get(key)
if version is None:
version = 0
return version
def _get_json_key(self, imsi: str, ip_addr: str, rule_id: str):
return json.dumps(SubscriberRuleKey('imsi_rule', imsi, ip_addr,
rule_id))
class RuleIDDict(RedisHashDict):
_DICT_HASH = "pipelined:rule_ids"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
return None
class RuleNameDict(RedisHashDict):
_DICT_HASH = "pipelined:rule_names"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
return None
class RuleVersionDict(RedisHashDict):
_DICT_HASH = "pipelined:rule_versions"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
return None
class UsageDeltaDict(RedisHashDict):
_DICT_HASH = "pipelined:last_usage_delta"
def __init__(self):
client = get_default_client()
super().__init__(
client,
self._DICT_HASH,
get_json_serializer(), get_json_deserializer())
def __missing__(self, key):
return None | true | true |
1c3f0a0c86d47e00c83a25b03533a6f7098b15d8 | 927 | py | Python | etc/v3/lib.py | timm/au | 14b6ae77cf12c746c1dbad3d35a3dca874fc8d41 | [
"MIT"
] | null | null | null | etc/v3/lib.py | timm/au | 14b6ae77cf12c746c1dbad3d35a3dca874fc8d41 | [
"MIT"
] | 14 | 2020-05-24T19:22:20.000Z | 2021-01-01T03:50:26.000Z | etc/v3/lib.py | timm/gold | 14b6ae77cf12c746c1dbad3d35a3dca874fc8d41 | [
"MIT"
] | null | null | null | import pprint
import re
import random
import sys
class Thing:
"Classes that can pretty print themselves."
def __repr__(i):
return re.sub(r"'", ' ',
pprint.pformat(dicts(i.__dict__), compact=True))
def dicts(i, seen=None):
" Converts `i` into a nested dictionary, then pretty-prints that."
if isinstance(i, (tuple, list)):
return [dicts(v, seen) for v in i]
elif isinstance(i, dict):
return {k: dicts(i[k], seen) for k in i if str(k)[0] != "_"}
elif isinstance(i, Thing):
seen = seen or {}
j = id(i) % 128021 # ids are LONG; show them shorter.
if i in seen:
return f"#:{j}"
seen[i] = i
d = dicts(i.__dict__, seen)
d["#"] = j
return d
else:
return i
class o(Thing):
"Fast way to initialize an instance that has no methods"
def __init__(i, **d): i.__dict__.update(**d)
if __name__ == "__main__":
if "--test" in sys.argv:
tests()
| 22.609756 | 68 | 0.606257 | import pprint
import re
import random
import sys
class Thing:
def __repr__(i):
return re.sub(r"'", ' ',
pprint.pformat(dicts(i.__dict__), compact=True))
def dicts(i, seen=None):
if isinstance(i, (tuple, list)):
return [dicts(v, seen) for v in i]
elif isinstance(i, dict):
return {k: dicts(i[k], seen) for k in i if str(k)[0] != "_"}
elif isinstance(i, Thing):
seen = seen or {}
j = id(i) % 128021 # ids are LONG; show them shorter.
if i in seen:
return f"#:{j}"
seen[i] = i
d = dicts(i.__dict__, seen)
d["#"] = j
return d
else:
return i
class o(Thing):
def __init__(i, **d): i.__dict__.update(**d)
if __name__ == "__main__":
if "--test" in sys.argv:
tests()
| true | true |
1c3f0a791361f1bf2f0d720fe7fdd421e3085d21 | 283 | py | Python | compiler_gym/util/flags/episode_length.py | thecoblack/CompilerGym | ade54e2f1829cf41722decb0942a4d6fd3102c2c | [
"MIT"
] | 562 | 2020-12-21T14:10:20.000Z | 2022-03-31T21:23:55.000Z | compiler_gym/util/flags/episode_length.py | thecoblack/CompilerGym | ade54e2f1829cf41722decb0942a4d6fd3102c2c | [
"MIT"
] | 433 | 2020-12-22T03:40:41.000Z | 2022-03-31T18:16:17.000Z | compiler_gym/util/flags/episode_length.py | thecoblack/CompilerGym | ade54e2f1829cf41722decb0942a4d6fd3102c2c | [
"MIT"
] | 88 | 2020-12-22T08:22:00.000Z | 2022-03-20T19:00:40.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from absl import flags
flags.DEFINE_integer("episode_length", 5, "The number of steps in each episode.")
| 35.375 | 81 | 0.763251 |
from absl import flags
flags.DEFINE_integer("episode_length", 5, "The number of steps in each episode.")
| true | true |
1c3f0a9ccd8484cdf510ff5f2a5708936bfbae13 | 139 | py | Python | tinychronicler/__init__.py | adzialocha/tinychronicler | 0a3fb213536dd7fc2af490c027a189fb2810903f | [
"MIT"
] | 4 | 2021-08-16T17:22:25.000Z | 2022-02-21T14:38:17.000Z | tinychronicler/__init__.py | adzialocha/tinychronicler | 0a3fb213536dd7fc2af490c027a189fb2810903f | [
"MIT"
] | null | null | null | tinychronicler/__init__.py | adzialocha/tinychronicler | 0a3fb213536dd7fc2af490c027a189fb2810903f | [
"MIT"
] | null | null | null | from .main import main
from .server import server
from .version import version as __version__
__all__ = ["__version__", "main", "server"]
| 23.166667 | 43 | 0.755396 | from .main import main
from .server import server
from .version import version as __version__
__all__ = ["__version__", "main", "server"]
| true | true |
1c3f0b2a01834fec9d729e03253215c8a0748774 | 500 | py | Python | migrations/versions/26e69577923_.py | Summerotter/furryyellowpages | 7e10786a39ddae9de5dad0236191a6258b527be8 | [
"MIT"
] | null | null | null | migrations/versions/26e69577923_.py | Summerotter/furryyellowpages | 7e10786a39ddae9de5dad0236191a6258b527be8 | [
"MIT"
] | 1 | 2021-02-02T21:46:02.000Z | 2021-02-02T21:46:02.000Z | migrations/versions/26e69577923_.py | Summerotter/furryyellowpages | 7e10786a39ddae9de5dad0236191a6258b527be8 | [
"MIT"
] | null | null | null | """empty message
Revision ID: 26e69577923
Revises: cd1d14e435
Create Date: 2017-03-08 18:18:30.706551
"""
# revision identifiers, used by Alembic.
revision = '26e69577923'
down_revision = 'cd1d14e435'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
| 18.518519 | 63 | 0.686 |
revision = '26e69577923'
down_revision = 'cd1d14e435'
from alembic import op
import sqlalchemy as sa
def upgrade():
| true | true |
1c3f0b739199b034e4405680127f28833ab4e5ff | 3,077 | py | Python | flask_admin/helpers.py | pashcovich/flask-admin | f5748f25b91392012be536a81dc23fd92e5e791d | [
"BSD-3-Clause"
] | 2 | 2015-01-04T15:56:55.000Z | 2015-06-23T19:55:07.000Z | flask_admin/helpers.py | pawl/flask-admin | 700b8f4313b12d46d79d55f434db44d794fffb7d | [
"BSD-3-Clause"
] | null | null | null | flask_admin/helpers.py | pawl/flask-admin | 700b8f4313b12d46d79d55f434db44d794fffb7d | [
"BSD-3-Clause"
] | null | null | null | from re import sub
from jinja2 import contextfunction
from flask import g, request, url_for
from wtforms.validators import DataRequired, InputRequired
from flask.ext.admin._compat import urljoin, urlparse
from ._compat import string_types
def set_current_view(view):
g._admin_view = view
def get_current_view():
"""
Get current administrative view.
"""
return getattr(g, '_admin_view', None)
def get_url(endpoint, **kwargs):
"""
Alternative to Flask `url_for`.
If there's current administrative view, will call its `get_url`. If there's none - will
use generic `url_for`.
:param endpoint:
Endpoint name
:param kwargs:
View arguments
"""
view = get_current_view()
if not view:
return url_for(endpoint, **kwargs)
return view.get_url(endpoint, **kwargs)
def is_required_form_field(field):
"""
Check if form field has `DataRequired` or `InputRequired` validators.
:param field:
WTForms field to check
"""
for validator in field.validators:
if isinstance(validator, (DataRequired, InputRequired)):
return True
return False
def is_form_submitted():
"""
Check if current method is PUT or POST
"""
return request and request.method in ("PUT", "POST")
def validate_form_on_submit(form):
"""
If current method is PUT or POST, validate form and return validation status.
"""
return is_form_submitted() and form.validate()
def get_form_data():
"""
If current method is PUT or POST, return concatenated `request.form` with
`request.files` or `None` otherwise.
"""
if is_form_submitted():
formdata = request.form
if request.files:
formdata = formdata.copy()
formdata.update(request.files)
return formdata
return None
def is_field_error(errors):
"""
Check if wtforms field has error without checking its children.
:param errors:
Errors list.
"""
for e in errors:
if isinstance(e, string_types):
return True
return False
@contextfunction
def resolve_ctx(context):
"""
Resolve current Jinja2 context and store it for general consumption.
"""
g._admin_render_ctx = context
def get_render_ctx():
"""
Get view template context.
"""
return getattr(g, '_admin_render_ctx', None)
def prettify_class_name(name):
"""
Split words in PascalCase string into separate words.
:param name:
String to split
"""
return sub(r'(?<=.)([A-Z])', r' \1', name)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return (test_url.scheme in ('http', 'https') and
ref_url.netloc == test_url.netloc)
def get_redirect_target(param_name='url'):
target = request.values.get(param_name)
if target and is_safe_url(target):
return target
| 22.792593 | 95 | 0.637634 | from re import sub
from jinja2 import contextfunction
from flask import g, request, url_for
from wtforms.validators import DataRequired, InputRequired
from flask.ext.admin._compat import urljoin, urlparse
from ._compat import string_types
def set_current_view(view):
g._admin_view = view
def get_current_view():
return getattr(g, '_admin_view', None)
def get_url(endpoint, **kwargs):
view = get_current_view()
if not view:
return url_for(endpoint, **kwargs)
return view.get_url(endpoint, **kwargs)
def is_required_form_field(field):
for validator in field.validators:
if isinstance(validator, (DataRequired, InputRequired)):
return True
return False
def is_form_submitted():
return request and request.method in ("PUT", "POST")
def validate_form_on_submit(form):
return is_form_submitted() and form.validate()
def get_form_data():
if is_form_submitted():
formdata = request.form
if request.files:
formdata = formdata.copy()
formdata.update(request.files)
return formdata
return None
def is_field_error(errors):
for e in errors:
if isinstance(e, string_types):
return True
return False
@contextfunction
def resolve_ctx(context):
g._admin_render_ctx = context
def get_render_ctx():
return getattr(g, '_admin_render_ctx', None)
def prettify_class_name(name):
return sub(r'(?<=.)([A-Z])', r' \1', name)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return (test_url.scheme in ('http', 'https') and
ref_url.netloc == test_url.netloc)
def get_redirect_target(param_name='url'):
target = request.values.get(param_name)
if target and is_safe_url(target):
return target
| true | true |
1c3f0b974b912a60861d9ae2ced14645b28dda03 | 100 | py | Python | tracker/app/models/__init__.py | skielred/FairyJokeAPI | 71228e477bc6dd259e6f5f7e09b30c1e23ab96a3 | [
"MIT"
] | 3 | 2021-12-18T11:09:08.000Z | 2022-03-31T22:42:19.000Z | tracker/app/models/__init__.py | skielred/FairyJokeAPI | 71228e477bc6dd259e6f5f7e09b30c1e23ab96a3 | [
"MIT"
] | null | null | null | tracker/app/models/__init__.py | skielred/FairyJokeAPI | 71228e477bc6dd259e6f5f7e09b30c1e23ab96a3 | [
"MIT"
] | null | null | null | from .user import User
from .game import Game
from .ddr import DDRLocalChart, DDRScore, DDRScoreMod
| 25 | 53 | 0.81 | from .user import User
from .game import Game
from .ddr import DDRLocalChart, DDRScore, DDRScoreMod
| true | true |
1c3f0bfc5a08d4f9a4674c31951e141174c4e72b | 7,890 | py | Python | examples/lstm_stateful.py | asanoboy/keras | e467ee5a1a00afdfa1cb7f5508fdbfd2c5eab1e5 | [
"MIT"
] | 1 | 2017-11-01T19:10:35.000Z | 2017-11-01T19:10:35.000Z | examples/lstm_stateful.py | dmaniry/keras | 32aa192548b6b59bf407e583fbd246ba9f5f5676 | [
"MIT"
] | null | null | null | examples/lstm_stateful.py | dmaniry/keras | 32aa192548b6b59bf407e583fbd246ba9f5f5676 | [
"MIT"
] | 1 | 2019-11-19T12:13:27.000Z | 2019-11-19T12:13:27.000Z | '''Example script showing how to use a stateful LSTM model
and how its stateless counterpart performs.
More documentation about the Keras LSTM model can be found at
https://keras.io/layers/recurrent/#lstm
The models are trained on an input/output pair, where
the input is a generated uniformly distributed
random sequence of length = "input_len",
and the output is a moving average of the input with window length = "tsteps".
Both "input_len" and "tsteps" are defined in the "editable parameters" section.
A larger "tsteps" value means that the LSTM will need more memory
to figure out the input-output relationship.
This memory length is controlled by the "lahead" variable (more details below).
The rest of the parameters are:
- input_len: the length of the generated input sequence
- lahead: the input sequence length that the LSTM
is trained on for each output point
- batch_size, epochs: same parameters as in the model.fit(...) function
When lahead > 1, the model input is preprocessed to a "rolling window view"
of the data, with the window length = "lahead".
This is similar to sklearn's "view_as_windows"
with "window_shape" being a single number
Ref: http://scikit-image.org/docs/0.10.x/api/skimage.util.html#view-as-windows
When lahead < tsteps, only the stateful LSTM converges because its
statefulness allows it to see beyond the capability that lahead
gave it to fit the n-point average. The stateless LSTM does not have
this capability, and hence is limited by its "lahead" parameter,
which is not sufficient to see the n-point average.
When lahead >= tsteps, both the stateful and stateless LSTM converge.
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM
# ----------------------------------------------------------
# EDITABLE PARAMETERS
# Read the documentation in the script head for more details
# ----------------------------------------------------------
# length of input
input_len = 1000
# The window length of the moving average used to generate
# the output from the input in the input/output pair used
# to train the LSTM
# e.g. if tsteps=2 and input=[1, 2, 3, 4, 5],
# then output=[1.5, 2.5, 3.5, 4.5]
tsteps = 2
# The input sequence length that the LSTM is trained on for each output point
lahead = 1
# training parameters passed to "model.fit(...)"
batch_size = 1
epochs = 10
# ------------
# MAIN PROGRAM
# ------------
print("*" * 33)
if lahead >= tsteps:
print("STATELESS LSTM WILL ALSO CONVERGE")
else:
print("STATELESS LSTM WILL NOT CONVERGE")
print("*" * 33)
np.random.seed(1986)
print('Generating Data...')
def gen_uniform_amp(amp=1, xn=10000):
"""Generates uniform random data between
-amp and +amp
and of length xn
Arguments:
amp: maximum/minimum range of uniform data
xn: length of series
"""
data_input = np.random.uniform(-1 * amp, +1 * amp, xn)
data_input = pd.DataFrame(data_input)
return data_input
# Since the output is a moving average of the input,
# the first few points of output will be NaN
# and will be dropped from the generated data
# before training the LSTM.
# Also, when lahead > 1,
# the preprocessing step later of "rolling window view"
# will also cause some points to be lost.
# For aesthetic reasons,
# in order to maintain generated data length = input_len after pre-processing,
# add a few points to account for the values that will be lost.
to_drop = max(tsteps - 1, lahead - 1)
data_input = gen_uniform_amp(amp=0.1, xn=input_len + to_drop)
# set the target to be a N-point average of the input
expected_output = data_input.rolling(window=tsteps, center=False).mean()
# when lahead > 1, need to convert the input to "rolling window view"
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.repeat.html
if lahead > 1:
data_input = np.repeat(data_input.values, repeats=lahead, axis=1)
data_input = pd.DataFrame(data_input)
for i, c in enumerate(data_input.columns):
data_input[c] = data_input[c].shift(i)
# drop the nan
expected_output = expected_output[to_drop:]
data_input = data_input[to_drop:]
print('Input shape:', data_input.shape)
print('Output shape:', expected_output.shape)
print('Input head: ')
print(data_input.head())
print('Output head: ')
print(expected_output.head())
print('Input tail: ')
print(data_input.tail())
print('Output tail: ')
print(expected_output.tail())
print('Plotting input and expected output')
plt.plot(data_input[0][:10], '.')
plt.plot(expected_output[0][:10], '-')
plt.legend(['Input', 'Expected output'])
plt.title('Input')
plt.show()
def create_model(stateful: bool):
model = Sequential()
model.add(LSTM(20,
input_shape=(lahead, 1),
batch_size=batch_size,
stateful=stateful))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
print('Creating Stateful Model...')
model_stateful = create_model(stateful=True)
# split train/test data
def split_data(x, y, ratio=0.8):
to_train = int(input_len * ratio)
# tweak to match with batch_size
to_train -= to_train % batch_size
x_train = x[:to_train]
y_train = y[:to_train]
x_test = x[to_train:]
y_test = y[to_train:]
# tweak to match with batch_size
to_drop = x.shape[0] % batch_size
if to_drop > 0:
x_test = x_test[:-1 * to_drop]
y_test = y_test[:-1 * to_drop]
# some reshaping
reshape_3 = lambda x: x.values.reshape((x.shape[0], x.shape[1], 1))
x_train = reshape_3(x_train)
x_test = reshape_3(x_test)
reshape_2 = lambda x: x.values.reshape((x.shape[0], 1))
y_train = reshape_2(y_train)
y_test = reshape_2(y_test)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = split_data(data_input, expected_output)
print('x_train.shape: ', x_train.shape)
print('y_train.shape: ', y_train.shape)
print('x_test.shape: ', x_test.shape)
print('y_test.shape: ', y_test.shape)
print('Training')
for i in range(epochs):
print('Epoch', i + 1, '/', epochs)
# Note that the last state for sample i in a batch will
# be used as initial state for sample i in the next batch.
# Thus we are simultaneously training on batch_size series with
# lower resolution than the original series contained in data_input.
# Each of these series are offset by one step and can be
# extracted with data_input[i::batch_size].
model_stateful.fit(x_train,
y_train,
batch_size=batch_size,
epochs=1,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
model_stateful.reset_states()
print('Predicting')
predicted_stateful = model_stateful.predict(x_test, batch_size=batch_size)
print('Creating Stateless Model...')
model_stateless = create_model(stateful=False)
print('Training')
model_stateless.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
print('Predicting')
predicted_stateless = model_stateless.predict(x_test, batch_size=batch_size)
# ----------------------------
print('Plotting Results')
plt.subplot(3, 1, 1)
plt.plot(y_test)
plt.title('Expected')
plt.subplot(3, 1, 2)
# drop the first "tsteps-1" because it is not possible to predict them
# since the "previous" timesteps to use do not exist
plt.plot((y_test - predicted_stateful).flatten()[tsteps - 1:])
plt.title('Stateful: Expected - Predicted')
plt.subplot(3, 1, 3)
plt.plot((y_test - predicted_stateless).flatten())
plt.title('Stateless: Expected - Predicted')
plt.show()
| 32.603306 | 79 | 0.685805 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense, LSTM
input_len = 1000
tsteps = 2
lahead = 1
batch_size = 1
epochs = 10
print("*" * 33)
if lahead >= tsteps:
print("STATELESS LSTM WILL ALSO CONVERGE")
else:
print("STATELESS LSTM WILL NOT CONVERGE")
print("*" * 33)
np.random.seed(1986)
print('Generating Data...')
def gen_uniform_amp(amp=1, xn=10000):
data_input = np.random.uniform(-1 * amp, +1 * amp, xn)
data_input = pd.DataFrame(data_input)
return data_input
to_drop = max(tsteps - 1, lahead - 1)
data_input = gen_uniform_amp(amp=0.1, xn=input_len + to_drop)
expected_output = data_input.rolling(window=tsteps, center=False).mean()
if lahead > 1:
data_input = np.repeat(data_input.values, repeats=lahead, axis=1)
data_input = pd.DataFrame(data_input)
for i, c in enumerate(data_input.columns):
data_input[c] = data_input[c].shift(i)
expected_output = expected_output[to_drop:]
data_input = data_input[to_drop:]
print('Input shape:', data_input.shape)
print('Output shape:', expected_output.shape)
print('Input head: ')
print(data_input.head())
print('Output head: ')
print(expected_output.head())
print('Input tail: ')
print(data_input.tail())
print('Output tail: ')
print(expected_output.tail())
print('Plotting input and expected output')
plt.plot(data_input[0][:10], '.')
plt.plot(expected_output[0][:10], '-')
plt.legend(['Input', 'Expected output'])
plt.title('Input')
plt.show()
def create_model(stateful: bool):
model = Sequential()
model.add(LSTM(20,
input_shape=(lahead, 1),
batch_size=batch_size,
stateful=stateful))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
return model
print('Creating Stateful Model...')
model_stateful = create_model(stateful=True)
def split_data(x, y, ratio=0.8):
to_train = int(input_len * ratio)
to_train -= to_train % batch_size
x_train = x[:to_train]
y_train = y[:to_train]
x_test = x[to_train:]
y_test = y[to_train:]
to_drop = x.shape[0] % batch_size
if to_drop > 0:
x_test = x_test[:-1 * to_drop]
y_test = y_test[:-1 * to_drop]
reshape_3 = lambda x: x.values.reshape((x.shape[0], x.shape[1], 1))
x_train = reshape_3(x_train)
x_test = reshape_3(x_test)
reshape_2 = lambda x: x.values.reshape((x.shape[0], 1))
y_train = reshape_2(y_train)
y_test = reshape_2(y_test)
return (x_train, y_train), (x_test, y_test)
(x_train, y_train), (x_test, y_test) = split_data(data_input, expected_output)
print('x_train.shape: ', x_train.shape)
print('y_train.shape: ', y_train.shape)
print('x_test.shape: ', x_test.shape)
print('y_test.shape: ', y_test.shape)
print('Training')
for i in range(epochs):
print('Epoch', i + 1, '/', epochs)
model_stateful.fit(x_train,
y_train,
batch_size=batch_size,
epochs=1,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
model_stateful.reset_states()
print('Predicting')
predicted_stateful = model_stateful.predict(x_test, batch_size=batch_size)
print('Creating Stateless Model...')
model_stateless = create_model(stateful=False)
print('Training')
model_stateless.fit(x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test),
shuffle=False)
print('Predicting')
predicted_stateless = model_stateless.predict(x_test, batch_size=batch_size)
print('Plotting Results')
plt.subplot(3, 1, 1)
plt.plot(y_test)
plt.title('Expected')
plt.subplot(3, 1, 2)
plt.plot((y_test - predicted_stateful).flatten()[tsteps - 1:])
plt.title('Stateful: Expected - Predicted')
plt.subplot(3, 1, 3)
plt.plot((y_test - predicted_stateless).flatten())
plt.title('Stateless: Expected - Predicted')
plt.show()
| true | true |
1c3f0d1dcf080b615a73e5f6e831b8581fc6ca4a | 597 | py | Python | tadataka/decorator.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 54 | 2019-11-15T16:30:34.000Z | 2022-01-13T15:18:54.000Z | tadataka/decorator.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 11 | 2019-02-28T08:28:24.000Z | 2020-04-07T04:47:12.000Z | tadataka/decorator.py | IshitaTakeshi/Tadataka | 852c7afb904503005e51884408e1492ef0be836f | [
"Apache-2.0"
] | 1 | 2020-02-26T13:59:40.000Z | 2020-02-26T13:59:40.000Z | import numpy as np
def allow_1d(which_argument):
def allow_1d_(function):
def decorated(*args, **kwargs):
args = list(args)
ndim = np.ndim(args[which_argument])
if ndim == 1:
args[which_argument] = np.atleast_2d(args[which_argument])
return function(*args, **kwargs)[0]
if ndim == 2:
return function(*args, **kwargs)
raise ValueError(
f"Argument number {which_argument} has to be 1d or 2d array"
)
return decorated
return allow_1d_
| 27.136364 | 76 | 0.544389 | import numpy as np
def allow_1d(which_argument):
def allow_1d_(function):
def decorated(*args, **kwargs):
args = list(args)
ndim = np.ndim(args[which_argument])
if ndim == 1:
args[which_argument] = np.atleast_2d(args[which_argument])
return function(*args, **kwargs)[0]
if ndim == 2:
return function(*args, **kwargs)
raise ValueError(
f"Argument number {which_argument} has to be 1d or 2d array"
)
return decorated
return allow_1d_
| true | true |
1c3f0d534ffb999c7a8ad354a71e615c40bb1fb1 | 6,166 | py | Python | test/countries/test_singapore.py | hugovk/python-holidays | e22c667a159c959d81b512cc354910fc5c6653a9 | [
"MIT"
] | 48 | 2016-11-22T09:18:50.000Z | 2018-01-14T14:06:49.000Z | test/countries/test_singapore.py | hugovk/python-holidays | e22c667a159c959d81b512cc354910fc5c6653a9 | [
"MIT"
] | 59 | 2016-12-03T15:52:36.000Z | 2018-01-16T09:37:15.000Z | test/countries/test_singapore.py | hugovk/python-holidays | e22c667a159c959d81b512cc354910fc5c6653a9 | [
"MIT"
] | 51 | 2016-11-25T14:53:55.000Z | 2018-01-16T09:58:56.000Z | # -*- coding: utf-8 -*-
# python-holidays
# ---------------
# A fast, efficient Python library for generating country, province and state
# specific sets of holidays on the fly. It aims to make determining whether a
# specific date is a holiday as fast and flexible as possible.
#
# Authors: dr-prodigy <maurizio.montel@gmail.com> (c) 2017-2022
# ryanss <ryanssdev@icloud.com> (c) 2014-2017
# Website: https://github.com/dr-prodigy/python-holidays
# License: MIT (see LICENSE file)
import sys
import unittest
from datetime import date
import holidays
class TestSingapore(unittest.TestCase):
def setUp(self):
self.holidays = holidays.Singapore()
def test_Singapore(self):
# <= 1968 holidays
self.assertIn(date(1968, 4, 13), self.holidays)
self.assertIn(date(1968, 4, 15), self.holidays)
self.assertIn(date(1968, 12, 26), self.holidays)
# latest polling day
self.assertIn(date(2015, 9, 11), self.holidays)
# SG50
self.assertIn(date(2015, 8, 7), self.holidays)
# Year with lunar leap month
self.assertIn(date(2015, 8, 7), self.holidays)
# Latest holidays
# Source: https://www.mom.gov.sg/employment-practices/public-holidays
# 2018
self.assertIn(date(2018, 1, 1), self.holidays)
self.assertIn(date(2018, 2, 16), self.holidays)
self.assertIn(date(2018, 2, 17), self.holidays)
self.assertIn(date(2018, 3, 30), self.holidays)
self.assertIn(date(2018, 5, 1), self.holidays)
self.assertIn(date(2018, 5, 29), self.holidays)
self.assertIn(date(2018, 6, 15), self.holidays)
self.assertIn(date(2018, 8, 9), self.holidays)
self.assertIn(date(2018, 8, 22), self.holidays)
self.assertIn(date(2018, 11, 6), self.holidays)
self.assertIn(date(2018, 12, 25), self.holidays)
# 2018: total holidays (11 + 0 falling on a Sunday)
self.assertEqual(len(holidays.Singapore(years=[2018])), 11 + 0)
# 2019
self.assertIn(date(2019, 1, 1), self.holidays)
self.assertIn(date(2019, 2, 5), self.holidays)
self.assertIn(date(2019, 2, 6), self.holidays)
self.assertIn(date(2019, 4, 19), self.holidays)
self.assertIn(date(2019, 5, 1), self.holidays)
self.assertIn(date(2019, 5, 19), self.holidays)
self.assertIn(date(2019, 6, 5), self.holidays)
self.assertIn(date(2019, 8, 9), self.holidays)
self.assertIn(date(2019, 8, 11), self.holidays)
self.assertIn(date(2019, 10, 27), self.holidays)
self.assertIn(date(2019, 12, 25), self.holidays)
# 2019: total holidays (11 + 3 falling on a Sunday)
self.assertEqual(len(holidays.Singapore(years=[2019])), 11 + 3)
# 2020
self.assertIn(date(2020, 1, 1), self.holidays)
self.assertIn(date(2020, 1, 25), self.holidays)
self.assertIn(date(2020, 1, 26), self.holidays)
self.assertIn(date(2020, 4, 10), self.holidays)
self.assertIn(date(2020, 5, 1), self.holidays)
self.assertIn(date(2020, 5, 7), self.holidays)
self.assertIn(date(2020, 5, 24), self.holidays)
self.assertIn(date(2020, 7, 31), self.holidays)
self.assertIn(date(2020, 8, 9), self.holidays)
self.assertIn(date(2020, 11, 14), self.holidays)
self.assertIn(date(2020, 12, 25), self.holidays)
# 2020: total holidays (11 + 3 falling on a Sunday)
self.assertEqual(len(holidays.Singapore(years=[2020])), 11 + 4)
# 2021
self.assertIn(date(2021, 1, 1), self.holidays)
self.assertIn(date(2021, 2, 12), self.holidays)
self.assertIn(date(2021, 2, 13), self.holidays)
self.assertIn(date(2021, 4, 2), self.holidays)
self.assertIn(date(2021, 5, 1), self.holidays)
self.assertIn(date(2021, 5, 13), self.holidays)
self.assertIn(date(2021, 5, 26), self.holidays)
self.assertIn(date(2021, 7, 20), self.holidays)
self.assertIn(date(2021, 8, 9), self.holidays)
self.assertIn(date(2021, 11, 4), self.holidays)
self.assertIn(date(2021, 12, 25), self.holidays)
# 2021: total holidays (11)
self.assertEqual(len(holidays.Singapore(years=[2021])), 11)
# 2022
self.assertIn(date(2022, 1, 1), self.holidays)
self.assertIn(date(2022, 2, 1), self.holidays)
self.assertIn(date(2022, 2, 2), self.holidays)
self.assertIn(date(2022, 4, 15), self.holidays)
self.assertIn(date(2022, 5, 1), self.holidays)
self.assertIn(date(2022, 5, 2), self.holidays)
self.assertIn(date(2022, 5, 3), self.holidays)
self.assertIn(date(2022, 5, 15), self.holidays)
self.assertIn(date(2022, 5, 16), self.holidays)
self.assertIn(date(2022, 7, 9), self.holidays)
self.assertIn(date(2022, 8, 9), self.holidays)
self.assertIn(date(2022, 11, 24), self.holidays)
self.assertIn(date(2022, 12, 25), self.holidays)
self.assertIn(date(2022, 12, 26), self.holidays)
# 2022: total holidays (11 + 3 falling on a Sunday)
self.assertEqual(len(holidays.Singapore(years=[2022])), 11 + 3)
# holidays estimated using lunar calendar
self.assertIn(date(2023, 6, 2), self.holidays) # Vesak Day
self.assertIn(date(2023, 11, 11), self.holidays) # Deepavali
# holidays estimated using library hijri-converter
if sys.version_info >= (3, 6):
import importlib.util
if importlib.util.find_spec("hijri_converter"):
# <= 1968 holidays
self.assertIn(date(1968, 1, 2), self.holidays)
# 2021
self.assertIn(
date(2023, 4, 21), self.holidays
) # Hari Raya Puasa
self.assertIn(
date(2023, 6, 28), self.holidays
) # Hari Raya Haji
def test_aliases(self):
"""For coverage purposes"""
h = holidays.SG()
self.assertIsInstance(h, holidays.Singapore)
h = holidays.SGP()
self.assertIsInstance(h, holidays.Singapore)
| 45.007299 | 78 | 0.614175 |
import sys
import unittest
from datetime import date
import holidays
class TestSingapore(unittest.TestCase):
def setUp(self):
self.holidays = holidays.Singapore()
def test_Singapore(self):
self.assertIn(date(1968, 4, 13), self.holidays)
self.assertIn(date(1968, 4, 15), self.holidays)
self.assertIn(date(1968, 12, 26), self.holidays)
self.assertIn(date(2015, 9, 11), self.holidays)
self.assertIn(date(2015, 8, 7), self.holidays)
self.assertIn(date(2015, 8, 7), self.holidays)
self.assertIn(date(2018, 1, 1), self.holidays)
self.assertIn(date(2018, 2, 16), self.holidays)
self.assertIn(date(2018, 2, 17), self.holidays)
self.assertIn(date(2018, 3, 30), self.holidays)
self.assertIn(date(2018, 5, 1), self.holidays)
self.assertIn(date(2018, 5, 29), self.holidays)
self.assertIn(date(2018, 6, 15), self.holidays)
self.assertIn(date(2018, 8, 9), self.holidays)
self.assertIn(date(2018, 8, 22), self.holidays)
self.assertIn(date(2018, 11, 6), self.holidays)
self.assertIn(date(2018, 12, 25), self.holidays)
self.assertEqual(len(holidays.Singapore(years=[2018])), 11 + 0)
self.assertIn(date(2019, 1, 1), self.holidays)
self.assertIn(date(2019, 2, 5), self.holidays)
self.assertIn(date(2019, 2, 6), self.holidays)
self.assertIn(date(2019, 4, 19), self.holidays)
self.assertIn(date(2019, 5, 1), self.holidays)
self.assertIn(date(2019, 5, 19), self.holidays)
self.assertIn(date(2019, 6, 5), self.holidays)
self.assertIn(date(2019, 8, 9), self.holidays)
self.assertIn(date(2019, 8, 11), self.holidays)
self.assertIn(date(2019, 10, 27), self.holidays)
self.assertIn(date(2019, 12, 25), self.holidays)
self.assertEqual(len(holidays.Singapore(years=[2019])), 11 + 3)
self.assertIn(date(2020, 1, 1), self.holidays)
self.assertIn(date(2020, 1, 25), self.holidays)
self.assertIn(date(2020, 1, 26), self.holidays)
self.assertIn(date(2020, 4, 10), self.holidays)
self.assertIn(date(2020, 5, 1), self.holidays)
self.assertIn(date(2020, 5, 7), self.holidays)
self.assertIn(date(2020, 5, 24), self.holidays)
self.assertIn(date(2020, 7, 31), self.holidays)
self.assertIn(date(2020, 8, 9), self.holidays)
self.assertIn(date(2020, 11, 14), self.holidays)
self.assertIn(date(2020, 12, 25), self.holidays)
self.assertEqual(len(holidays.Singapore(years=[2020])), 11 + 4)
self.assertIn(date(2021, 1, 1), self.holidays)
self.assertIn(date(2021, 2, 12), self.holidays)
self.assertIn(date(2021, 2, 13), self.holidays)
self.assertIn(date(2021, 4, 2), self.holidays)
self.assertIn(date(2021, 5, 1), self.holidays)
self.assertIn(date(2021, 5, 13), self.holidays)
self.assertIn(date(2021, 5, 26), self.holidays)
self.assertIn(date(2021, 7, 20), self.holidays)
self.assertIn(date(2021, 8, 9), self.holidays)
self.assertIn(date(2021, 11, 4), self.holidays)
self.assertIn(date(2021, 12, 25), self.holidays)
self.assertEqual(len(holidays.Singapore(years=[2021])), 11)
self.assertIn(date(2022, 1, 1), self.holidays)
self.assertIn(date(2022, 2, 1), self.holidays)
self.assertIn(date(2022, 2, 2), self.holidays)
self.assertIn(date(2022, 4, 15), self.holidays)
self.assertIn(date(2022, 5, 1), self.holidays)
self.assertIn(date(2022, 5, 2), self.holidays)
self.assertIn(date(2022, 5, 3), self.holidays)
self.assertIn(date(2022, 5, 15), self.holidays)
self.assertIn(date(2022, 5, 16), self.holidays)
self.assertIn(date(2022, 7, 9), self.holidays)
self.assertIn(date(2022, 8, 9), self.holidays)
self.assertIn(date(2022, 11, 24), self.holidays)
self.assertIn(date(2022, 12, 25), self.holidays)
self.assertIn(date(2022, 12, 26), self.holidays)
self.assertEqual(len(holidays.Singapore(years=[2022])), 11 + 3)
self.assertIn(date(2023, 6, 2), self.holidays)
self.assertIn(date(2023, 11, 11), self.holidays)
if sys.version_info >= (3, 6):
import importlib.util
if importlib.util.find_spec("hijri_converter"):
self.assertIn(date(1968, 1, 2), self.holidays)
self.assertIn(
date(2023, 4, 21), self.holidays
)
self.assertIn(
date(2023, 6, 28), self.holidays
)
def test_aliases(self):
h = holidays.SG()
self.assertIsInstance(h, holidays.Singapore)
h = holidays.SGP()
self.assertIsInstance(h, holidays.Singapore)
| true | true |
1c3f0d91ed6ea18b6fa39386621d4fef7c35b322 | 10,747 | py | Python | graph_updater.py | xingdi-eric-yuan/gata | 059cd2e486adfdb5edc3e2df628d573ee9a3796b | [
"MIT"
] | 1 | 2021-04-28T03:31:07.000Z | 2021-04-28T03:31:07.000Z | graph_updater.py | xingdi-eric-yuan/gata | 059cd2e486adfdb5edc3e2df628d573ee9a3796b | [
"MIT"
] | null | null | null | graph_updater.py | xingdi-eric-yuan/gata | 059cd2e486adfdb5edc3e2df628d573ee9a3796b | [
"MIT"
] | 1 | 2021-04-28T03:32:57.000Z | 2021-04-28T03:32:57.000Z | import torch
import torch.nn as nn
from typing import Optional, Dict
from layers import GraphEncoder, TextEncoder, ReprAggregator, EncoderMixin
from utils import masked_mean
class GraphUpdater(EncoderMixin, nn.Module):
def __init__(
self,
hidden_dim: int,
word_emb_dim: int,
num_nodes: int,
node_emb_dim: int,
num_relations: int,
relation_emb_dim: int,
text_encoder_num_blocks: int,
text_encoder_num_conv_layers: int,
text_encoder_kernel_size: int,
text_encoder_num_heads: int,
graph_encoder_num_cov_layers: int,
graph_encoder_num_bases: int,
pretrained_word_embeddings: nn.Embedding,
node_name_word_ids: torch.Tensor,
node_name_mask: torch.Tensor,
rel_name_word_ids: torch.Tensor,
rel_name_mask: torch.Tensor,
) -> None:
super().__init__()
# constants
self.hidden_dim = hidden_dim
# b/c we add inverse relations, num_relations has to be even
assert num_relations % 2 == 0
self.num_nodes = num_nodes
self.num_relations = num_relations
# word embeddings
assert word_emb_dim == pretrained_word_embeddings.embedding_dim
self.word_embeddings = nn.Sequential(
pretrained_word_embeddings, nn.Linear(word_emb_dim, hidden_dim, bias=False)
)
# node and relation embeddings
self.node_embeddings = nn.Embedding(num_nodes, node_emb_dim)
self.relation_embeddings = nn.Embedding(num_relations, relation_emb_dim)
# save the node and relation name word ids and masks as buffers.
# GATA used the mean word embeddings of the node and relation name words.
# These are static as we have a fixed set of node and relation names.
assert node_name_word_ids.dtype == torch.int64
assert node_name_mask.dtype == torch.float
assert node_name_word_ids.size() == node_name_mask.size()
assert node_name_word_ids.size(0) == self.num_nodes
assert node_name_mask.size(0) == self.num_nodes
assert rel_name_word_ids.dtype == torch.int64
assert rel_name_mask.dtype == torch.float
assert rel_name_word_ids.size() == rel_name_mask.size()
assert rel_name_word_ids.size(0) == self.num_relations
assert rel_name_mask.size(0) == self.num_relations
self.register_buffer("node_name_word_ids", node_name_word_ids)
self.register_buffer("node_name_mask", node_name_mask)
self.register_buffer("rel_name_word_ids", rel_name_word_ids)
self.register_buffer("rel_name_mask", rel_name_mask)
# encoders
self.text_encoder = TextEncoder(
text_encoder_num_blocks,
text_encoder_num_conv_layers,
text_encoder_kernel_size,
hidden_dim,
text_encoder_num_heads,
)
self.graph_encoder = GraphEncoder(
hidden_dim + node_emb_dim,
hidden_dim + relation_emb_dim,
num_relations,
[hidden_dim] * graph_encoder_num_cov_layers,
graph_encoder_num_bases,
)
# other layers
self.repr_aggr = ReprAggregator(hidden_dim)
self.rnncell_input_prj = nn.Sequential(
nn.Linear(4 * hidden_dim, hidden_dim), nn.Tanh()
)
self.rnncell = nn.GRUCell(hidden_dim, hidden_dim)
self.f_d_layers = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, num_relations // 2 * num_nodes * num_nodes),
nn.Tanh(),
)
# pretraining flag
self.pretraining = False
def f_delta(
self,
prev_node_hidden: torch.Tensor,
obs_hidden: torch.Tensor,
prev_action_hidden: torch.Tensor,
obs_mask: torch.Tensor,
prev_action_mask: torch.Tensor,
) -> torch.Tensor:
"""
prev_node_hidden: (batch, num_node, hidden_dim)
obs_hidden: (batch, obs_len, hidden_dim)
prev_action_hidden: (batch, prev_action_len, hidden_dim)
obs_mask: (batch, obs_len)
prev_action_mask: (batch, prev_action_len)
output: (batch, 4 * hidden_dim)
"""
batch_size = prev_node_hidden.size(0)
# no masks necessary for prev_node_hidden, so just create a fake one
prev_node_mask = torch.ones(
batch_size, self.num_nodes, device=prev_node_hidden.device
)
# h_og: (batch, obs_len, hidden_dim)
# h_go: (batch, num_node, hidden_dim)
h_og, h_go = self.repr_aggr(
obs_hidden, prev_node_hidden, obs_mask, prev_node_mask
)
# h_ag: (batch, prev_action_len, hidden_dim)
# h_ga: (batch, num_node, hidden_dim)
h_ag, h_ga = self.repr_aggr(
prev_action_hidden, prev_node_hidden, prev_action_mask, prev_node_mask
)
mean_h_og = masked_mean(h_og, obs_mask)
mean_h_go = masked_mean(h_go, prev_node_mask)
mean_h_ag = masked_mean(h_ag, prev_action_mask)
mean_h_ga = masked_mean(h_go, prev_node_mask)
return torch.cat([mean_h_og, mean_h_go, mean_h_ag, mean_h_ga], dim=1)
def f_d(self, rnn_hidden: torch.Tensor) -> torch.Tensor:
"""
rnn_hidden: (batch, hidden_dim)
output: (batch, num_relation, num_node, num_node)
"""
h = self.f_d_layers(rnn_hidden).view(
-1, self.num_relations // 2, self.num_nodes, self.num_nodes
)
# (batch, num_relation // 2, num_node, num_node)
return torch.cat([h, h.transpose(2, 3)], dim=1)
# (batch, num_relation, num_node, num_node)
def forward(
self,
obs_word_ids: torch.Tensor,
prev_action_word_ids: torch.Tensor,
obs_mask: torch.Tensor,
prev_action_mask: torch.Tensor,
rnn_prev_hidden: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
"""
obs_word_ids: (batch, obs_len)
prev_action_word_ids: (batch, prev_action_len)
obs_mask: (batch, obs_len)
prev_action_mask: (batch, prev_action_len)
rnn_prev_hidden: (batch, hidden_dim)
output:
{
'h_t': hidden state of the rnn cell at time t; (batch, hidden_dim)
'g_t': decoded graph at time t; (batch, num_relation, num_node, num_node)
'h_ag': aggregated representation of the previous action
with the current graph. Used for pretraining.
(batch, prev_action_len, hidden_dim)
'h_ga': aggregated node representation of the current graph
with the previous action. Used for pretraining.
(batch, num_node, hidden_dim)
'prj_obs': projected input obs word embeddings. Used for pretraining.
(batch, obs_len, hidden_dim)
}
"""
batch_size = obs_word_ids.size(0)
# encode previous actions
encoded_prev_action = self.encode_text(prev_action_word_ids, prev_action_mask)
# (batch, prev_action_len, hidden_dim)
# decode the previous graph
# if rnn_prev_hidden is None, pass in zeros, which is what GRUCell does.
# Also this makes it easier to train the action selector as you can simply
# put zeros for rnn_prev_hidden for initial transitions, instead of having to
# worry about None.
prev_graph = self.f_d(
torch.zeros(batch_size, self.hidden_dim, device=obs_word_ids.device)
if rnn_prev_hidden is None
else rnn_prev_hidden
)
# (batch, num_relation, num_node, num_node)
if self.pretraining:
# encode text observations
# we don't use encode_text here
# b/c we want to return obs_word_embs for pretraining
obs_word_embs = self.word_embeddings(obs_word_ids)
# (batch, obs_len, hidden_dim)
encoded_obs = self.text_encoder(obs_word_embs, obs_mask)
# encoded_obs: (batch, obs_len, hidden_dim)
# prj_obs: (batch, obs_len, hidden_dim)
# encode the previous graph
# we don't want to use encode_graph here
# b/c we're going to use node_features and relation_features
# for the current graph later
node_features = (
self.get_node_features().unsqueeze(0).expand(batch_size, -1, -1)
)
# (batch, num_node, hidden_dim + node_emb_dim)
relation_features = (
self.get_relation_features().unsqueeze(0).expand(batch_size, -1, -1)
)
# (batch, num_relations, hidden_dim + relation_emb_dim)
encoded_prev_graph = self.graph_encoder(
node_features, relation_features, prev_graph
)
# (batch, num_node, hidden_dim)
else:
# encode text observations
encoded_obs = self.encode_text(obs_word_ids, obs_mask)
# encoded_obs: (batch, obs_len, hidden_dim)
# encode the previous graph
encoded_prev_graph = self.encode_graph(prev_graph)
# (batch, num_node, hidden_dim)
delta_g = self.f_delta(
encoded_prev_graph,
encoded_obs,
encoded_prev_action,
obs_mask,
prev_action_mask,
)
# (batch, 4 * hidden_dim)
rnn_input = self.rnncell_input_prj(delta_g)
# (batch, hidden_dim)
h_t = self.rnncell(rnn_input, hx=rnn_prev_hidden)
# (batch, hidden_dim)
# (batch, num_node, hidden_dim)
curr_graph = self.f_d(h_t)
# (batch, num_relation, num_node, num_node)
results: Dict[str, torch.Tensor] = {"h_t": h_t, "g_t": curr_graph}
if not self.pretraining:
return results
# pretraining, so calculate the aggregated representations of
# the current graph and previous action
# no masks necessary for encoded_curr_graph, so just create a fake one
encoded_curr_graph = self.graph_encoder(
node_features, relation_features, curr_graph
)
# (batch, num_node, hidden_dim)
h_ag, h_ga = self.repr_aggr(
encoded_prev_action,
encoded_curr_graph,
prev_action_mask,
torch.ones(batch_size, self.num_nodes, device=encoded_curr_graph.device),
)
# h_ag: (batch, prev_action_len, hidden_dim)
# h_ga: (batch, num_node, hidden_dim)
results["h_ag"] = h_ag
results["h_ga"] = h_ga
# finally include prj_obs
results["prj_obs"] = obs_word_embs
return results
| 38.658273 | 87 | 0.626966 | import torch
import torch.nn as nn
from typing import Optional, Dict
from layers import GraphEncoder, TextEncoder, ReprAggregator, EncoderMixin
from utils import masked_mean
class GraphUpdater(EncoderMixin, nn.Module):
def __init__(
self,
hidden_dim: int,
word_emb_dim: int,
num_nodes: int,
node_emb_dim: int,
num_relations: int,
relation_emb_dim: int,
text_encoder_num_blocks: int,
text_encoder_num_conv_layers: int,
text_encoder_kernel_size: int,
text_encoder_num_heads: int,
graph_encoder_num_cov_layers: int,
graph_encoder_num_bases: int,
pretrained_word_embeddings: nn.Embedding,
node_name_word_ids: torch.Tensor,
node_name_mask: torch.Tensor,
rel_name_word_ids: torch.Tensor,
rel_name_mask: torch.Tensor,
) -> None:
super().__init__()
self.hidden_dim = hidden_dim
assert num_relations % 2 == 0
self.num_nodes = num_nodes
self.num_relations = num_relations
assert word_emb_dim == pretrained_word_embeddings.embedding_dim
self.word_embeddings = nn.Sequential(
pretrained_word_embeddings, nn.Linear(word_emb_dim, hidden_dim, bias=False)
)
self.node_embeddings = nn.Embedding(num_nodes, node_emb_dim)
self.relation_embeddings = nn.Embedding(num_relations, relation_emb_dim)
assert node_name_word_ids.dtype == torch.int64
assert node_name_mask.dtype == torch.float
assert node_name_word_ids.size() == node_name_mask.size()
assert node_name_word_ids.size(0) == self.num_nodes
assert node_name_mask.size(0) == self.num_nodes
assert rel_name_word_ids.dtype == torch.int64
assert rel_name_mask.dtype == torch.float
assert rel_name_word_ids.size() == rel_name_mask.size()
assert rel_name_word_ids.size(0) == self.num_relations
assert rel_name_mask.size(0) == self.num_relations
self.register_buffer("node_name_word_ids", node_name_word_ids)
self.register_buffer("node_name_mask", node_name_mask)
self.register_buffer("rel_name_word_ids", rel_name_word_ids)
self.register_buffer("rel_name_mask", rel_name_mask)
self.text_encoder = TextEncoder(
text_encoder_num_blocks,
text_encoder_num_conv_layers,
text_encoder_kernel_size,
hidden_dim,
text_encoder_num_heads,
)
self.graph_encoder = GraphEncoder(
hidden_dim + node_emb_dim,
hidden_dim + relation_emb_dim,
num_relations,
[hidden_dim] * graph_encoder_num_cov_layers,
graph_encoder_num_bases,
)
self.repr_aggr = ReprAggregator(hidden_dim)
self.rnncell_input_prj = nn.Sequential(
nn.Linear(4 * hidden_dim, hidden_dim), nn.Tanh()
)
self.rnncell = nn.GRUCell(hidden_dim, hidden_dim)
self.f_d_layers = nn.Sequential(
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, num_relations // 2 * num_nodes * num_nodes),
nn.Tanh(),
)
self.pretraining = False
def f_delta(
self,
prev_node_hidden: torch.Tensor,
obs_hidden: torch.Tensor,
prev_action_hidden: torch.Tensor,
obs_mask: torch.Tensor,
prev_action_mask: torch.Tensor,
) -> torch.Tensor:
batch_size = prev_node_hidden.size(0)
prev_node_mask = torch.ones(
batch_size, self.num_nodes, device=prev_node_hidden.device
)
h_og, h_go = self.repr_aggr(
obs_hidden, prev_node_hidden, obs_mask, prev_node_mask
)
h_ag, h_ga = self.repr_aggr(
prev_action_hidden, prev_node_hidden, prev_action_mask, prev_node_mask
)
mean_h_og = masked_mean(h_og, obs_mask)
mean_h_go = masked_mean(h_go, prev_node_mask)
mean_h_ag = masked_mean(h_ag, prev_action_mask)
mean_h_ga = masked_mean(h_go, prev_node_mask)
return torch.cat([mean_h_og, mean_h_go, mean_h_ag, mean_h_ga], dim=1)
def f_d(self, rnn_hidden: torch.Tensor) -> torch.Tensor:
h = self.f_d_layers(rnn_hidden).view(
-1, self.num_relations // 2, self.num_nodes, self.num_nodes
)
return torch.cat([h, h.transpose(2, 3)], dim=1)
def forward(
self,
obs_word_ids: torch.Tensor,
prev_action_word_ids: torch.Tensor,
obs_mask: torch.Tensor,
prev_action_mask: torch.Tensor,
rnn_prev_hidden: Optional[torch.Tensor] = None,
) -> Dict[str, torch.Tensor]:
batch_size = obs_word_ids.size(0)
encoded_prev_action = self.encode_text(prev_action_word_ids, prev_action_mask)
prev_graph = self.f_d(
torch.zeros(batch_size, self.hidden_dim, device=obs_word_ids.device)
if rnn_prev_hidden is None
else rnn_prev_hidden
)
if self.pretraining:
# b/c we want to return obs_word_embs for pretraining
obs_word_embs = self.word_embeddings(obs_word_ids)
# (batch, obs_len, hidden_dim)
encoded_obs = self.text_encoder(obs_word_embs, obs_mask)
# encoded_obs: (batch, obs_len, hidden_dim)
# prj_obs: (batch, obs_len, hidden_dim)
# encode the previous graph
# we don't want to use encode_graph here
# for the current graph later
node_features = (
self.get_node_features().unsqueeze(0).expand(batch_size, -1, -1)
)
# (batch, num_node, hidden_dim + node_emb_dim)
relation_features = (
self.get_relation_features().unsqueeze(0).expand(batch_size, -1, -1)
)
# (batch, num_relations, hidden_dim + relation_emb_dim)
encoded_prev_graph = self.graph_encoder(
node_features, relation_features, prev_graph
)
# (batch, num_node, hidden_dim)
else:
# encode text observations
encoded_obs = self.encode_text(obs_word_ids, obs_mask)
# encoded_obs: (batch, obs_len, hidden_dim)
# encode the previous graph
encoded_prev_graph = self.encode_graph(prev_graph)
# (batch, num_node, hidden_dim)
delta_g = self.f_delta(
encoded_prev_graph,
encoded_obs,
encoded_prev_action,
obs_mask,
prev_action_mask,
)
# (batch, 4 * hidden_dim)
rnn_input = self.rnncell_input_prj(delta_g)
# (batch, hidden_dim)
h_t = self.rnncell(rnn_input, hx=rnn_prev_hidden)
# (batch, hidden_dim)
# (batch, num_node, hidden_dim)
curr_graph = self.f_d(h_t)
# (batch, num_relation, num_node, num_node)
results: Dict[str, torch.Tensor] = {"h_t": h_t, "g_t": curr_graph}
if not self.pretraining:
return results
# pretraining, so calculate the aggregated representations of
# the current graph and previous action
# no masks necessary for encoded_curr_graph, so just create a fake one
encoded_curr_graph = self.graph_encoder(
node_features, relation_features, curr_graph
)
# (batch, num_node, hidden_dim)
h_ag, h_ga = self.repr_aggr(
encoded_prev_action,
encoded_curr_graph,
prev_action_mask,
torch.ones(batch_size, self.num_nodes, device=encoded_curr_graph.device),
)
# h_ag: (batch, prev_action_len, hidden_dim)
# h_ga: (batch, num_node, hidden_dim)
results["h_ag"] = h_ag
results["h_ga"] = h_ga
# finally include prj_obs
results["prj_obs"] = obs_word_embs
return results
| true | true |
1c3f0f516f199958c86634276a1edb4466568970 | 1,189 | py | Python | test.py | nathan-gilbert/graphworks-test | 46840288bf58f726cca1f0756fa7e86457dd6768 | [
"Unlicense"
] | null | null | null | test.py | nathan-gilbert/graphworks-test | 46840288bf58f726cca1f0756fa7e86457dd6768 | [
"Unlicense"
] | null | null | null | test.py | nathan-gilbert/graphworks-test | 46840288bf58f726cca1f0756fa7e86457dd6768 | [
"Unlicense"
] | null | null | null | import json
from graphworks.algorithms.basic import find_isolated_vertices
from graphworks.algorithms.basic import generate_edges
from graphworks.export.graphviz_utils import save_to_dot
from graphworks.export.json_utils import save_to_json
from graphworks.graph import Graph
if __name__ == "__main__":
json_graph = {"label": "my graph", "edges": {"A": ["B"], "B": []}}
graph = Graph("my graph", input_graph=json.dumps(json_graph))
print(graph)
all_edges = generate_edges(graph)
print(all_edges)
isolated = find_isolated_vertices(graph)
print(isolated)
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print("Add vertex:")
graph.add_vertex("D")
print("Vertices of graph:")
print(graph.vertices())
print("Add an edge:")
graph.add_edge("A", "D")
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
graph.add_edge("X", "Y")
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
save_to_dot(graph, ".")
save_to_json(graph, ".")
| 23.78 | 70 | 0.668629 | import json
from graphworks.algorithms.basic import find_isolated_vertices
from graphworks.algorithms.basic import generate_edges
from graphworks.export.graphviz_utils import save_to_dot
from graphworks.export.json_utils import save_to_json
from graphworks.graph import Graph
if __name__ == "__main__":
json_graph = {"label": "my graph", "edges": {"A": ["B"], "B": []}}
graph = Graph("my graph", input_graph=json.dumps(json_graph))
print(graph)
all_edges = generate_edges(graph)
print(all_edges)
isolated = find_isolated_vertices(graph)
print(isolated)
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
print("Add vertex:")
graph.add_vertex("D")
print("Vertices of graph:")
print(graph.vertices())
print("Add an edge:")
graph.add_edge("A", "D")
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
graph.add_edge("X", "Y")
print("Vertices of graph:")
print(graph.vertices())
print("Edges of graph:")
print(graph.edges())
save_to_dot(graph, ".")
save_to_json(graph, ".")
| true | true |
1c3f0f5c4ee5b5fd5a03fa9630986bf135acaa7c | 21,599 | py | Python | py/nightwatch/script.py | sbailey/nightwatch | 09c2218afd529384866e103b96aa6ed555aef85e | [
"BSD-3-Clause"
] | null | null | null | py/nightwatch/script.py | sbailey/nightwatch | 09c2218afd529384866e103b96aa6ed555aef85e | [
"BSD-3-Clause"
] | null | null | null | py/nightwatch/script.py | sbailey/nightwatch | 09c2218afd529384866e103b96aa6ed555aef85e | [
"BSD-3-Clause"
] | null | null | null | """
nightwatch command line script
"""
import os, sys, time, glob
import argparse
import traceback
import subprocess
from desimodel.io import load_tiles
import desispec.io
from . import run, plots, io
from .run import timestamp, get_ncpu
from .qa.runner import QARunner
from desiutil.log import get_logger
import tempfile
import shutil
import contextlib
import multiprocessing as mp
def print_help():
print("""USAGE: nightwatch <command> [options]
Supported commands are:
monitor Monitor input directory and run qproc, qa, and generate plots
run Run qproc, qa, and generate plots for a single exposure
assemble_fibermap
Run assemble_fibermap using data from input raw data file
preproc Run only preprocessing on an input raw data file
qproc Run qproc (includes preproc) on an input raw data file
qa Run QA analysis on qproc outputs
plot Generate webpages with plots of QA output
tables Generate webpages with tables of nights and exposures
webapp Run a nightwatch Flask webapp server
surveyqa Generate surveyqa webpages
Run "nightwatch <command> --help" for details options about each command
""")
def main():
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help', '-help', 'help'):
print_help()
return 0
command = sys.argv[1]
if command == 'monitor':
main_monitor()
if command == 'run':
main_run()
elif command == 'assemble_fibermap':
main_assemble_fibermap()
elif command == 'preproc':
main_preproc()
elif command == 'qproc':
main_qproc()
elif command == 'qa':
main_qa()
elif command in ('plot', 'plots'):
main_plot()
elif command == 'tables':
main_tables()
elif command == 'webapp':
from .webapp import main_webapp
main_webapp()
elif command == 'summary':
main_summary()
elif command == 'threshold':
main_threshold()
elif command == 'surveyqa':
main_surveyqa()
else:
print('ERROR: unrecognized command "{}"'.format(command))
print_help()
return 1
def main_monitor(options=None):
parser = argparse.ArgumentParser(usage = "{prog} monitor [options]")
parser.add_argument("-i", "--indir", type=str, help="watch indir/YEARMMDD/EXPID/ for new raw data")
parser.add_argument("-o", "--outdir", type=str, help="write output to outdir/YEARMMDD/EXPID/")
# parser.add_argument("--qprocdir", type=str, help="qproc output directory")
# parser.add_argument("--qadir", type=str, help="QA output directory")
parser.add_argument("--plotdir", type=str, help="QA plot output directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
parser.add_argument("--catchup", action="store_true", help="Catch up on processing all unprocessed data")
parser.add_argument("--waittime", type=int, default=10, help="Seconds to wait between checks for new data")
parser.add_argument("--startdate", type=int, default=None, help="Earliest startdate to check for unprocessed nights (YYYYMMDD)")
parser.add_argument("--batch", "-b", action='store_true', help="spawn qproc data processing to batch job")
parser.add_argument("--batch-queue", "-q", type=str, default="realtime", help="batch queue to use")
parser.add_argument("--batch-time", "-t", type=int, default=15, help="batch job time limit [minutes]")
parser.add_argument("--batch-opts", type=str, default="-N 1 -C haswell -A desi", help="Additional batch options")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
if args.plotdir is None :
args.plotdir = args.outdir
log = get_logger()
tmp = os.path.join(args.indir, 'YEARMMDD', 'EXPID')
log.info('Monitoring {}/ for new raw data'.format(tmp))
qarunner = QARunner()
processed = set()
#- TODO: figure out a way to print how many nights are being skipped before startdate
while True:
if os.path.exists('stop.nightwatch'):
print("Found stop.nightwatch file; exiting now")
sys.exit(0)
if args.catchup:
expdir = run.find_unprocessed_expdir(args.indir, args.outdir, processed, startdate=args.startdate)
else:
expdir = run.find_latest_expdir(args.indir, processed, startdate=args.startdate)
if expdir is None:
print('{} No new exposures found; sleeping {} sec'.format(
timestamp(), args.waittime))
sys.stdout.flush()
time.sleep(args.waittime)
continue
night, expid = expdir.split('/')[-2:]
night = int(night)
rawfile = os.path.join(expdir, 'desi-{}.fits.fz'.format(expid))
if expdir not in processed and os.path.exists(rawfile):
processed.add(expdir)
outdir = '{}/{}/{}'.format(args.outdir, night, expid)
if os.path.exists(outdir) and len(glob.glob(outdir+'/qa-*.fits'))>0:
print('Skipping previously processed {}/{}'.format(night, expid))
processed.add(expdir)
continue
else:
os.makedirs(outdir, exist_ok=True)
time_start = time.time()
print('\n{} Found new exposure {}/{}'.format(timestamp(), night, expid))
sys.stdout.flush()
try :
if args.batch:
print('{} Submitting batch job for {}'.format(time.strftime('%H:%M'), rawfile))
batch_run(rawfile, args.outdir, cameras, args.batch_queue, args.batch_time, args.batch_opts)
else:
print('{} Running qproc on {}'.format(time.strftime('%H:%M'), rawfile))
sys.stdout.flush()
header = run.run_qproc(rawfile, outdir, cameras=cameras)
print('{} Running QA on {}/{}'.format(timestamp(), night, expid))
sys.stdout.flush()
qafile = "{}/qa-{}.fits".format(outdir,expid)
caldir = os.path.join(args.plotdir, "static")
jsonfile = os.path.join(caldir, "timeseries_dropdown.json")
if not os.path.isdir(caldir):
os.makedirs(caldir)
qarunner.run(indir=outdir, outfile=qafile, jsonfile=jsonfile)
print('Generating plots for {}/{}'.format(night, expid))
tmpdir = '{}/{}/{}'.format(args.plotdir, night, expid)
if not os.path.isdir(tmpdir) :
os.makedirs(tmpdir)
run.make_plots(infile=qafile, basedir=args.plotdir, preprocdir=outdir, logdir=outdir,
cameras=cameras)
run.write_tables(args.outdir, args.plotdir, expnights=[night,])
time_end = time.time()
dt = (time_end - time_start) / 60
print('{} Finished exposure {}/{} ({:.1f} min)'.format(
timestamp(), night, expid, dt))
except Exception as e :
print("Failed to process or QA or plot exposure {}".format(expid))
print("Error message: {}".format(str(e)))
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
del exc_info
print("Now moving on ...")
sys.stdout.flush()
processed.add(expdir)
else:
sys.stdout.flush()
time.sleep(args.waittime)
class TempDirManager():
'''Custom context manager that creates a temporary directory, and upon exiting the context copies all files (regardless if the code written inside the context runs properly or exits with some error) into a specified output directory.'''
def __init__(self, outdir):
'''Initializes TempDirManager with the directory specified to copy all files written.'''
self.outdir = outdir
self.tempdir = None
def __enter__(self):
self.tempdir = tempfile.TemporaryDirectory().name
return self.tempdir
def __exit__(self, *exc):
'''Copies files over when context is exited.'''
outdir = self.outdir
tempdir = self.tempdir
print('{} Copying files from temporary directory to {}'.format(
time.strftime('%H:%M'), outdir))
src = []
for dirpath, dirnames, files in os.walk(tempdir, topdown=True):
for file_name in files:
src.append(os.path.join(dirpath, file_name))
dest = [file.replace(tempdir, outdir) for file in src]
argslist = list(zip(src, dest))
#- Check what output directories need to be made, but cache list
#- so that we don't check existence for the same dir N>>1 times
fullpath_outdirs = set()
for (src, dest) in argslist:
dirpath = os.path.dirname(dest)
if dirpath not in fullpath_outdirs:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
#- using shutil.move in place of shutil.copytree, for instance, because copytree requires that the directory/file being copied to does not exist prior to the copying (option to supress this requirement only available in python 3.8+)
#- parallel copying performs better than copying serially
ncpu = get_ncpu(None)
if ncpu > 1:
pool = mp.Pool(ncpu)
pool.starmap(shutil.move, argslist)
pool.close()
pool.join()
else:
for args in argslist:
shutil.move(**args)
print('{} Done copying {} files'.format(
time.strftime('%H:%M'), len(argslist)))
def batch_run(infile, outdir, cameras, queue, batchtime, batchopts):
"""Submits batch job to `nightwatch run infile outdir ...`
Args:
infile (str): input DESI raw data file
outdir (str): base output directory
cameras (list or None): None, or list of cameras to include
queue (str): slurm queue name
batchtime (int): batch job time limit [minutes]
batchopts (str): additional batch options
Returns error code from sbatch submission
Note: this is a non-blocking call and will return before the batch
processing is finished
"""
night, expid = io.get_night_expid(infile)
expdir = io.findfile('expdir', night=night, expid=expid, basedir=outdir)
infile = os.path.abspath(infile)
expdir = os.path.abspath(expdir)
outdir = os.path.abspath(outdir)
if cameras is None:
camera_options = ""
elif isinstance(cameras, (list, tuple)):
camera_options = "--cameras {}".format(','.join(cameras))
elif isinstance(cameras, str):
camera_options = f"--cameras {cameras}"
else:
raise ValueError('Unable to parse cameras {}'.format(cameras))
jobname = f'nightwatch-{expid:08d}'
batchfile = f'{expdir}/{jobname}.slurm'
with open(batchfile, 'w') as fx:
fx.write(f"""#!/bin/bash -l
#SBATCH {batchopts}
#SBATCH --qos {queue}
#SBATCH --time {batchtime}
#SBATCH --job-name {jobname}
#SBATCH --output {expdir}/{jobname}-%j.joblog
#SBATCH --exclusive
nightwatch run --infile {infile} --outdir {outdir} {camera_options}
""")
err = subprocess.call(["sbatch", batchfile])
return err
def main_run(options=None):
parser = argparse.ArgumentParser(usage = "{prog} run [options]")
parser.add_argument("-i", "--infile", type=str, required=False,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output base directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
parser.add_argument('-n', '--night', type=int,
help="YEARMMDD night")
parser.add_argument('-e', '--expid', type=int,
help="Exposure ID")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
if args.infile is None:
if args.night is None or args.expid is None:
print('ERROR: must provide --infile or --night AND --expid')
sys.exit(2)
args.infile = desispec.io.findfile('raw', args.night, args.expid)
night, expid = io.get_night_expid(args.infile)
rawdir = os.path.dirname(os.path.dirname(os.path.dirname(args.infile)))
#- Using a tempdir sometimes is better, and sometimes is way worse;
#- turn off for now
# with TempDirManager(args.outdir) as tempdir:
tempdir = args.outdir
if True:
expdir = io.findfile('expdir', night=night, expid=expid, basedir=tempdir)
time_start = time.time()
print('{} Running assemble_fibermap'.format(time.strftime('%H:%M')))
fibermap = run.run_assemble_fibermap(args.infile, expdir)
print('{} Running qproc'.format(time.strftime('%H:%M')))
header = run.run_qproc(args.infile, expdir, cameras=cameras)
print('{} Running QA analysis'.format(time.strftime('%H:%M')))
qafile = io.findfile('qa', night=night, expid=expid, basedir=tempdir)
qaresults = run.run_qa(expdir, outfile=qafile)
print('{} Making plots'.format(time.strftime('%H:%M')))
run.make_plots(qafile, tempdir, preprocdir=expdir, logdir=expdir, rawdir=rawdir, cameras=cameras)
print('{} Updating night/exposure summary tables'.format(time.strftime('%H:%M')))
run.write_tables(args.outdir, args.outdir, expnights=[night,])
dt = (time.time() - time_start) / 60.0
print('{} Done ({:.1f} min)'.format(time.strftime('%H:%M'), dt))
def main_assemble_fibermap(options=None):
parser = argparse.ArgumentParser(usage = "{prog} preproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
fibermap = run.run_assemble_fibermap(args.infile, args.outdir)
if fibermap is not None:
print('Done running assemble_fibermap for {}; wrote outputs to {}'.format(args.infile, fibermap))
else:
print('Did not run assemble_fibermap for {}'.format(args.infile))
def main_preproc(options=None):
parser = argparse.ArgumentParser(usage = "{prog} preproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
parser.add_argument('--fibermap', type=str, default=None,
help="fibermap file")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
header = run.run_preproc(args.infile, args.outdir, fibermap=args.fibermap, cameras=cameras)
print("Done running preproc on {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_qproc(options=None):
parser = argparse.ArgumentParser(usage = "{prog} qproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
header = run.run_qproc(args.infile, args.outdir, cameras=cameras)
print("Done running qproc on {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_qa(options=None):
parser = argparse.ArgumentParser(usage = "{prog} qa [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="input directory with qproc outputs")
parser.add_argument("-o", "--outfile", type=str, required=True, help="output qa fits file name")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
qaresults = run.run_qa(args.indir, outfile=args.outfile)
print("Done running QA on {}; wrote outputs to {}".format(args.indir, args.outfile))
def main_plot(options=None):
parser = argparse.ArgumentParser(usage = "{prog} plot [options]")
parser.add_argument("-i", "--infile", type=str, nargs='*', required=True, help="input QA fits file")
parser.add_argument("-o", "--outdir", type=str, help="output base directory (not including YEARMMDD/EXPID/)")
parser.add_argument("-r", "--rawdir", type=str, help="directory containing raw data (not including YEARMMDD/EXPID/)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
for infile in args.infile:
if args.outdir is None:
outdir = os.path.dirname(infile)
else:
outdir = args.outdir
rawdir = args.rawdir
run.make_plots(infile, outdir, preprocdir=os.path.dirname(infile), logdir=os.path.dirname(infile), rawdir=rawdir)
print("Done making plots for {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_tables(options=None):
parser = argparse.ArgumentParser(usage = "{prog} plot [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="QA in indir/YEARMMDD/EXPID")
parser.add_argument("-o", "--outdir", type=str, help="write summary tables to outdir/nights.html and outdir/YEARMMDD/exposures.html")
parser.add_argument("-n", "--nights", type=str, help="comma separated list of nights to process")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.outdir is None:
args.outdir = args.indir
nights = None
if args.nights is not None:
nights = [int(n) for n in args.nights.split(',')]
run.write_tables(args.indir, args.outdir, expnights=nights)
print('Wrote summary tables to {}'.format(args.outdir))
def main_summary(options=None):
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="directory of night directories; write summary data to indir/night/summary.json")
parser.add_argument("-l", "--last", type=bool, help="True if last night shown is complete and ready to summarize")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
last = args.last
if last is None:
last = False
run.write_nights_summary(args.indir, last)
print('Wrote summary jsons for each night to {}'.format(args.indir))
def main_threshold(options=None):
parser = argparse.ArgumentParser(usage = '{prog} [options]')
parser.add_argument('-i', '--indir', type=str, required=True, help='directory of night directories; where summary.json files can be found')
parser.add_argument('-o', '--outdir', type=str, required=True, help='directory threshold json/html files should be written to')
parser.add_argument('-s', '--start', type=int, required=True, help='start date for calculation range')
parser.add_argument('-e', '--end', type=int, required=True, help='end date for calculation range')
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
run.write_thresholds(args.indir, args.outdir, args.start, args.end)
print('Wrote threshold jsons for each night to {}'.format('nightwatch/py/nightwatch/threshold_files'))
def main_surveyqa(options=None):
parser = argparse.ArgumentParser(usage = '{prog} [options]')
parser.add_argument('-i', '--infile', type=str, required=True, help='file containing data to feed into surveyqa')
parser.add_argument('-o', '--outdir', type=str, required=True, help='directory threshold json/html files should be written to (will be written to outdir/surveyqa, outdir should be same location as other nightwatch files)')
parser.add_argument('-t', '--tilefile', type=str, help='file containing data on tiles')
parser.add_argument('-r', '--rawdir', type=str, help='directory containing raw data files (without YYYMMDD/EXPID/)')
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.tilefile is None:
tiles = load_tiles()
else:
tiles = Table.read(args.tilefile, hdu=1)
if args.rawdir is None:
args.rawdir = desispec.io.meta.rawdata_root()
name_dict = {"EXPID": "EXPID", "MJD": "MJD",
"AIRMASS": "AIRMASS", "TRANSP": "TRANSPARENCY", "NIGHT": "NIGHT",
"MOONSEP": "MOON_SEP_DEG", "RA": "SKYRA", "DEC": "SKYDEC",
"SKY": "SKY_MAG_AB", "SEEING": "FWHM_ASEC"}
run.write_summaryqa(args.infile, name_dict, tiles, args.rawdir, args.outdir)
| 40.829868 | 240 | 0.6304 |
import os, sys, time, glob
import argparse
import traceback
import subprocess
from desimodel.io import load_tiles
import desispec.io
from . import run, plots, io
from .run import timestamp, get_ncpu
from .qa.runner import QARunner
from desiutil.log import get_logger
import tempfile
import shutil
import contextlib
import multiprocessing as mp
def print_help():
print("""USAGE: nightwatch <command> [options]
Supported commands are:
monitor Monitor input directory and run qproc, qa, and generate plots
run Run qproc, qa, and generate plots for a single exposure
assemble_fibermap
Run assemble_fibermap using data from input raw data file
preproc Run only preprocessing on an input raw data file
qproc Run qproc (includes preproc) on an input raw data file
qa Run QA analysis on qproc outputs
plot Generate webpages with plots of QA output
tables Generate webpages with tables of nights and exposures
webapp Run a nightwatch Flask webapp server
surveyqa Generate surveyqa webpages
Run "nightwatch <command> --help" for details options about each command
""")
def main():
if len(sys.argv) == 1 or sys.argv[1] in ('-h', '--help', '-help', 'help'):
print_help()
return 0
command = sys.argv[1]
if command == 'monitor':
main_monitor()
if command == 'run':
main_run()
elif command == 'assemble_fibermap':
main_assemble_fibermap()
elif command == 'preproc':
main_preproc()
elif command == 'qproc':
main_qproc()
elif command == 'qa':
main_qa()
elif command in ('plot', 'plots'):
main_plot()
elif command == 'tables':
main_tables()
elif command == 'webapp':
from .webapp import main_webapp
main_webapp()
elif command == 'summary':
main_summary()
elif command == 'threshold':
main_threshold()
elif command == 'surveyqa':
main_surveyqa()
else:
print('ERROR: unrecognized command "{}"'.format(command))
print_help()
return 1
def main_monitor(options=None):
parser = argparse.ArgumentParser(usage = "{prog} monitor [options]")
parser.add_argument("-i", "--indir", type=str, help="watch indir/YEARMMDD/EXPID/ for new raw data")
parser.add_argument("-o", "--outdir", type=str, help="write output to outdir/YEARMMDD/EXPID/")
parser.add_argument("--plotdir", type=str, help="QA plot output directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
parser.add_argument("--catchup", action="store_true", help="Catch up on processing all unprocessed data")
parser.add_argument("--waittime", type=int, default=10, help="Seconds to wait between checks for new data")
parser.add_argument("--startdate", type=int, default=None, help="Earliest startdate to check for unprocessed nights (YYYYMMDD)")
parser.add_argument("--batch", "-b", action='store_true', help="spawn qproc data processing to batch job")
parser.add_argument("--batch-queue", "-q", type=str, default="realtime", help="batch queue to use")
parser.add_argument("--batch-time", "-t", type=int, default=15, help="batch job time limit [minutes]")
parser.add_argument("--batch-opts", type=str, default="-N 1 -C haswell -A desi", help="Additional batch options")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
if args.plotdir is None :
args.plotdir = args.outdir
log = get_logger()
tmp = os.path.join(args.indir, 'YEARMMDD', 'EXPID')
log.info('Monitoring {}/ for new raw data'.format(tmp))
qarunner = QARunner()
processed = set()
while True:
if os.path.exists('stop.nightwatch'):
print("Found stop.nightwatch file; exiting now")
sys.exit(0)
if args.catchup:
expdir = run.find_unprocessed_expdir(args.indir, args.outdir, processed, startdate=args.startdate)
else:
expdir = run.find_latest_expdir(args.indir, processed, startdate=args.startdate)
if expdir is None:
print('{} No new exposures found; sleeping {} sec'.format(
timestamp(), args.waittime))
sys.stdout.flush()
time.sleep(args.waittime)
continue
night, expid = expdir.split('/')[-2:]
night = int(night)
rawfile = os.path.join(expdir, 'desi-{}.fits.fz'.format(expid))
if expdir not in processed and os.path.exists(rawfile):
processed.add(expdir)
outdir = '{}/{}/{}'.format(args.outdir, night, expid)
if os.path.exists(outdir) and len(glob.glob(outdir+'/qa-*.fits'))>0:
print('Skipping previously processed {}/{}'.format(night, expid))
processed.add(expdir)
continue
else:
os.makedirs(outdir, exist_ok=True)
time_start = time.time()
print('\n{} Found new exposure {}/{}'.format(timestamp(), night, expid))
sys.stdout.flush()
try :
if args.batch:
print('{} Submitting batch job for {}'.format(time.strftime('%H:%M'), rawfile))
batch_run(rawfile, args.outdir, cameras, args.batch_queue, args.batch_time, args.batch_opts)
else:
print('{} Running qproc on {}'.format(time.strftime('%H:%M'), rawfile))
sys.stdout.flush()
header = run.run_qproc(rawfile, outdir, cameras=cameras)
print('{} Running QA on {}/{}'.format(timestamp(), night, expid))
sys.stdout.flush()
qafile = "{}/qa-{}.fits".format(outdir,expid)
caldir = os.path.join(args.plotdir, "static")
jsonfile = os.path.join(caldir, "timeseries_dropdown.json")
if not os.path.isdir(caldir):
os.makedirs(caldir)
qarunner.run(indir=outdir, outfile=qafile, jsonfile=jsonfile)
print('Generating plots for {}/{}'.format(night, expid))
tmpdir = '{}/{}/{}'.format(args.plotdir, night, expid)
if not os.path.isdir(tmpdir) :
os.makedirs(tmpdir)
run.make_plots(infile=qafile, basedir=args.plotdir, preprocdir=outdir, logdir=outdir,
cameras=cameras)
run.write_tables(args.outdir, args.plotdir, expnights=[night,])
time_end = time.time()
dt = (time_end - time_start) / 60
print('{} Finished exposure {}/{} ({:.1f} min)'.format(
timestamp(), night, expid, dt))
except Exception as e :
print("Failed to process or QA or plot exposure {}".format(expid))
print("Error message: {}".format(str(e)))
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
del exc_info
print("Now moving on ...")
sys.stdout.flush()
processed.add(expdir)
else:
sys.stdout.flush()
time.sleep(args.waittime)
class TempDirManager():
def __init__(self, outdir):
self.outdir = outdir
self.tempdir = None
def __enter__(self):
self.tempdir = tempfile.TemporaryDirectory().name
return self.tempdir
def __exit__(self, *exc):
outdir = self.outdir
tempdir = self.tempdir
print('{} Copying files from temporary directory to {}'.format(
time.strftime('%H:%M'), outdir))
src = []
for dirpath, dirnames, files in os.walk(tempdir, topdown=True):
for file_name in files:
src.append(os.path.join(dirpath, file_name))
dest = [file.replace(tempdir, outdir) for file in src]
argslist = list(zip(src, dest))
fullpath_outdirs = set()
for (src, dest) in argslist:
dirpath = os.path.dirname(dest)
if dirpath not in fullpath_outdirs:
if not os.path.exists(dirpath):
os.makedirs(dirpath)
#- using shutil.move in place of shutil.copytree, for instance, because copytree requires that the directory/file being copied to does not exist prior to the copying (option to supress this requirement only available in python 3.8+)
#- parallel copying performs better than copying serially
ncpu = get_ncpu(None)
if ncpu > 1:
pool = mp.Pool(ncpu)
pool.starmap(shutil.move, argslist)
pool.close()
pool.join()
else:
for args in argslist:
shutil.move(**args)
print('{} Done copying {} files'.format(
time.strftime('%H:%M'), len(argslist)))
def batch_run(infile, outdir, cameras, queue, batchtime, batchopts):
night, expid = io.get_night_expid(infile)
expdir = io.findfile('expdir', night=night, expid=expid, basedir=outdir)
infile = os.path.abspath(infile)
expdir = os.path.abspath(expdir)
outdir = os.path.abspath(outdir)
if cameras is None:
camera_options = ""
elif isinstance(cameras, (list, tuple)):
camera_options = "--cameras {}".format(','.join(cameras))
elif isinstance(cameras, str):
camera_options = f"--cameras {cameras}"
else:
raise ValueError('Unable to parse cameras {}'.format(cameras))
jobname = f'nightwatch-{expid:08d}'
batchfile = f'{expdir}/{jobname}.slurm'
with open(batchfile, 'w') as fx:
fx.write(f"""#!/bin/bash -l
#SBATCH {batchopts}
#SBATCH --qos {queue}
#SBATCH --time {batchtime}
#SBATCH --job-name {jobname}
#SBATCH --output {expdir}/{jobname}-%j.joblog
#SBATCH --exclusive
nightwatch run --infile {infile} --outdir {outdir} {camera_options}
""")
err = subprocess.call(["sbatch", batchfile])
return err
def main_run(options=None):
parser = argparse.ArgumentParser(usage = "{prog} run [options]")
parser.add_argument("-i", "--infile", type=str, required=False,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output base directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
parser.add_argument('-n', '--night', type=int,
help="YEARMMDD night")
parser.add_argument('-e', '--expid', type=int,
help="Exposure ID")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
if args.infile is None:
if args.night is None or args.expid is None:
print('ERROR: must provide --infile or --night AND --expid')
sys.exit(2)
args.infile = desispec.io.findfile('raw', args.night, args.expid)
night, expid = io.get_night_expid(args.infile)
rawdir = os.path.dirname(os.path.dirname(os.path.dirname(args.infile)))
#- Using a tempdir sometimes is better, and sometimes is way worse;
#- turn off for now
# with TempDirManager(args.outdir) as tempdir:
tempdir = args.outdir
if True:
expdir = io.findfile('expdir', night=night, expid=expid, basedir=tempdir)
time_start = time.time()
print('{} Running assemble_fibermap'.format(time.strftime('%H:%M')))
fibermap = run.run_assemble_fibermap(args.infile, expdir)
print('{} Running qproc'.format(time.strftime('%H:%M')))
header = run.run_qproc(args.infile, expdir, cameras=cameras)
print('{} Running QA analysis'.format(time.strftime('%H:%M')))
qafile = io.findfile('qa', night=night, expid=expid, basedir=tempdir)
qaresults = run.run_qa(expdir, outfile=qafile)
print('{} Making plots'.format(time.strftime('%H:%M')))
run.make_plots(qafile, tempdir, preprocdir=expdir, logdir=expdir, rawdir=rawdir, cameras=cameras)
print('{} Updating night/exposure summary tables'.format(time.strftime('%H:%M')))
run.write_tables(args.outdir, args.outdir, expnights=[night,])
dt = (time.time() - time_start) / 60.0
print('{} Done ({:.1f} min)'.format(time.strftime('%H:%M'), dt))
def main_assemble_fibermap(options=None):
parser = argparse.ArgumentParser(usage = "{prog} preproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
fibermap = run.run_assemble_fibermap(args.infile, args.outdir)
if fibermap is not None:
print('Done running assemble_fibermap for {}; wrote outputs to {}'.format(args.infile, fibermap))
else:
print('Did not run assemble_fibermap for {}'.format(args.infile))
def main_preproc(options=None):
parser = argparse.ArgumentParser(usage = "{prog} preproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
parser.add_argument('--fibermap', type=str, default=None,
help="fibermap file")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
header = run.run_preproc(args.infile, args.outdir, fibermap=args.fibermap, cameras=cameras)
print("Done running preproc on {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_qproc(options=None):
parser = argparse.ArgumentParser(usage = "{prog} qproc [options]")
parser.add_argument("-i", "--infile", type=str, required=True,
help="input raw data file")
parser.add_argument("-o", "--outdir", type=str, required=True,
help="output directory")
parser.add_argument("--cameras", type=str, help="comma separated list of cameras (for debugging)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.cameras is not None:
cameras = args.cameras.split(',')
else:
cameras = None
header = run.run_qproc(args.infile, args.outdir, cameras=cameras)
print("Done running qproc on {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_qa(options=None):
parser = argparse.ArgumentParser(usage = "{prog} qa [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="input directory with qproc outputs")
parser.add_argument("-o", "--outfile", type=str, required=True, help="output qa fits file name")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
qaresults = run.run_qa(args.indir, outfile=args.outfile)
print("Done running QA on {}; wrote outputs to {}".format(args.indir, args.outfile))
def main_plot(options=None):
parser = argparse.ArgumentParser(usage = "{prog} plot [options]")
parser.add_argument("-i", "--infile", type=str, nargs='*', required=True, help="input QA fits file")
parser.add_argument("-o", "--outdir", type=str, help="output base directory (not including YEARMMDD/EXPID/)")
parser.add_argument("-r", "--rawdir", type=str, help="directory containing raw data (not including YEARMMDD/EXPID/)")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
for infile in args.infile:
if args.outdir is None:
outdir = os.path.dirname(infile)
else:
outdir = args.outdir
rawdir = args.rawdir
run.make_plots(infile, outdir, preprocdir=os.path.dirname(infile), logdir=os.path.dirname(infile), rawdir=rawdir)
print("Done making plots for {}; wrote outputs to {}".format(args.infile, args.outdir))
def main_tables(options=None):
parser = argparse.ArgumentParser(usage = "{prog} plot [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="QA in indir/YEARMMDD/EXPID")
parser.add_argument("-o", "--outdir", type=str, help="write summary tables to outdir/nights.html and outdir/YEARMMDD/exposures.html")
parser.add_argument("-n", "--nights", type=str, help="comma separated list of nights to process")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.outdir is None:
args.outdir = args.indir
nights = None
if args.nights is not None:
nights = [int(n) for n in args.nights.split(',')]
run.write_tables(args.indir, args.outdir, expnights=nights)
print('Wrote summary tables to {}'.format(args.outdir))
def main_summary(options=None):
parser = argparse.ArgumentParser(usage = "{prog} [options]")
parser.add_argument("-i", "--indir", type=str, required=True, help="directory of night directories; write summary data to indir/night/summary.json")
parser.add_argument("-l", "--last", type=bool, help="True if last night shown is complete and ready to summarize")
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
last = args.last
if last is None:
last = False
run.write_nights_summary(args.indir, last)
print('Wrote summary jsons for each night to {}'.format(args.indir))
def main_threshold(options=None):
parser = argparse.ArgumentParser(usage = '{prog} [options]')
parser.add_argument('-i', '--indir', type=str, required=True, help='directory of night directories; where summary.json files can be found')
parser.add_argument('-o', '--outdir', type=str, required=True, help='directory threshold json/html files should be written to')
parser.add_argument('-s', '--start', type=int, required=True, help='start date for calculation range')
parser.add_argument('-e', '--end', type=int, required=True, help='end date for calculation range')
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
run.write_thresholds(args.indir, args.outdir, args.start, args.end)
print('Wrote threshold jsons for each night to {}'.format('nightwatch/py/nightwatch/threshold_files'))
def main_surveyqa(options=None):
parser = argparse.ArgumentParser(usage = '{prog} [options]')
parser.add_argument('-i', '--infile', type=str, required=True, help='file containing data to feed into surveyqa')
parser.add_argument('-o', '--outdir', type=str, required=True, help='directory threshold json/html files should be written to (will be written to outdir/surveyqa, outdir should be same location as other nightwatch files)')
parser.add_argument('-t', '--tilefile', type=str, help='file containing data on tiles')
parser.add_argument('-r', '--rawdir', type=str, help='directory containing raw data files (without YYYMMDD/EXPID/)')
if options is None:
options = sys.argv[2:]
args = parser.parse_args(options)
if args.tilefile is None:
tiles = load_tiles()
else:
tiles = Table.read(args.tilefile, hdu=1)
if args.rawdir is None:
args.rawdir = desispec.io.meta.rawdata_root()
name_dict = {"EXPID": "EXPID", "MJD": "MJD",
"AIRMASS": "AIRMASS", "TRANSP": "TRANSPARENCY", "NIGHT": "NIGHT",
"MOONSEP": "MOON_SEP_DEG", "RA": "SKYRA", "DEC": "SKYDEC",
"SKY": "SKY_MAG_AB", "SEEING": "FWHM_ASEC"}
run.write_summaryqa(args.infile, name_dict, tiles, args.rawdir, args.outdir)
| true | true |
1c3f0f72545da6d3e28a8e03cb7dc5cd6805e0ef | 16,612 | py | Python | monitor.py | Eternity-luo/JDC | 5356f2ea27490364342dc7a0118455bb6dfab485 | [
"MIT"
] | 8 | 2021-03-12T23:03:42.000Z | 2021-05-06T22:43:49.000Z | monitor.py | Eternity-luo/JDC | 5356f2ea27490364342dc7a0118455bb6dfab485 | [
"MIT"
] | 1 | 2021-07-03T19:07:47.000Z | 2021-07-17T05:27:33.000Z | monitor.py | Eternity-luo/JDC | 5356f2ea27490364342dc7a0118455bb6dfab485 | [
"MIT"
] | 6 | 2021-03-15T12:43:20.000Z | 2021-05-07T02:23:41.000Z | from telethon import TelegramClient, events
import time, os, sys, datetime, requests, random, string, re, json, httpx, asyncio
from jsonpath import jsonpath
import importlib
importlib.reload(sys)
requests.packages.urllib3.disable_warnings()
ckss = 'pt_key=AAJhVrPQADBghACD7FWhN04rKpDsLjchHHP8cUUCfaHBAtYDmzvaFzievcdaI3LXw7Jrc0tC78o;pt_pin=jd_GOiWYOBshJfE;&pt_key=AAJhVrRgADD_x8egP9EQ6SoG6-a-dz0CW5CQKVar1x6MIuCE2erqvgEpMd7iwGF6BUEaTOw1D1k;pt_pin=jd_dbnOAWlSnLRz;'
cks = ckss.split('&')
timestamp = int(round(time.time() * 1000))
today = datetime.datetime.now().strftime('%Y-%m-%d')
pwd = repr(os.getcwd()).replace("'", '')
record = 'yes' # False|True 或 yes |no 是否记录符合条件的shopid,
openCardBean = 1 # 只入送豆数量大于此值
onlyRecord = 'no' ##yes 或 no yes:仅记录,不入会。
timesleep = 2 # 请求间隔
api_id = 3420052
api_hash = "ec130bf6eb5a4b0710e6e989cbb7dd28"
# 获取VenderId
async def getVenderId(shopId, headers):
"""
:param shopId:
:param headers
:return: venderId
"""
url = f'https://mall.jd.com/index-{shopId}.html'
# print(url)
# resp = requests.get(url=url,headers=headers)
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
resp = await client.get(url=url)
resulttext = resp.text
r = re.compile(r'shopId=\d+&id=(\d+)"')
venderId = r.findall(resulttext)
return venderId[0]
def nowtime():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
async def getShopOpenCardInfo(venderId, headers, shopid, userName):
"""
:param venderId:
:param headers:
:return: activityId,getBean 或 返回 0:没豆 1:有豆已是会员 2:记录模式(不入会)
"""
num1 = string.digits
v_num1 = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 4))
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{2}%22%2C%22channel%22%3A406%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{0}_{1}'.format(
timestamp, v_num1, venderId)
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
resp = await client.get(url=url)
# resp = requests.get(url=url, verify=False, headers=headers, timeout=30)
time.sleep(timesleep)
resulttxt = resp.text
r = re.compile('jsonp_.*?\\((.*?)\\)\\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
cardInfo = json.loads(result[0])
venderCardName = cardInfo['result']['shopMemberCardInfo']['venderCardName']
# print(f"\t╰查询入会礼包【{venderCardName}】{shopid}")
openCardStatus = cardInfo['result']['userInfo']['openCardStatus']
interestsRuleList = cardInfo['result']['interestsRuleList']
if interestsRuleList == None:
# print('\t\t╰Oh,该店礼包已被领光了~')
return (0, 0)
try:
if len(interestsRuleList) > 0:
for i in interestsRuleList:
if '京豆' in i['prizeName']:
getBean = int(i['discountString'])
activityId = i['interestsInfo']['activityId']
context = '{0}'.format(shopid)
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
context = '[{0}]:入会{2}豆,【{1}】销卡:{3}'.format(nowtime(), venderCardName, getBean, url)
if getBean >= openCardBean:
# print(f"\t╰{venderCardName}:入会赠送【{getBean}豆】,可入会")
context = '{0}'.format(shopid)
if onlyRecord == True:
# print('已开启仅记录,不入会。')
return (2, 2)
if openCardStatus == 1:
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(
venderId)
# print('\t\t╰[账号:{0}]:您已经是本店会员,请注销会员卡24小时后再来~\n注销链接:{1}'.format(userName, url))
context = '[{3}]:入会{1}豆,{0}销卡:{2}'.format(venderCardName, getBean, url, nowtime())
return (1, 1)
return (activityId, getBean)
# print(f"\t\t╰{venderCardName}:入会送【{getBean}】豆少于【{openCardBean}豆】,不入...")
if onlyRecord == True:
# print('已开启仅记录,不入会。')
return (2, 2)
return (
0, openCardStatus)
continue
# print('\t\t╰Oh~ 该店入会京豆已被领光了')
return (0, 0)
return (0, 0)
except Exception as e:
try:
print(e)
finally:
e = None
del e
async def bindWithVender(venderId, shopId, activityId, channel, headers):
"""
:param venderId:
:param shopId:
:param activityId:
:param channel:
:param headers:
:return: result : 开卡结果
"""
num = string.ascii_letters + string.digits
v_name = ''.join(random.sample(num, 10))
num1 = string.digits
v_num1 = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 4))
qq_num = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 8)) + '@qq.com'
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{4}%22%2C%22shopId%22%3A%22{7}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%22v_sex%22%3A%22%E6%9C%AA%E7%9F%A5%22%2C%22v_name%22%3A%22{0}%22%2C%22v_birthday%22%3A%221990-03-18%22%2C%22v_email%22%3A%22{6}%22%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{5}%2C%22channel%22%3A{3}%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{1}_{2}'.format(
v_name, timestamp, v_num1, channel, venderId, activityId, qq_num, shopId)
try:
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
respon = await client.get(url=url)
# respon = requests.get(url=url, verify=False, headers=headers, timeout=30)
result = respon.text
return result
except Exception as e:
try:
print(e)
finally:
e = None
del e
async def getResult(resulttxt, userName, user_num):
r = re.compile('jsonp_.*?\\((.*?)\\)\\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
for i in result:
result_data = json.loads(i)
busiCode = result_data['busiCode']
if busiCode == '0':
message = result_data['message']
try:
result = result_data['result']['giftInfo']['giftList']
#print(f"\t\t╰用户{user_num}【{userName}】:{message}")
for i in result:
print('\t\t\t╰{0}:{1} '.format(i['prizeTypeName'], i['discount']))
except:
print(f"\t\t╰用户{user_num}【{userName}】:{message}")
return busiCode
busiCode = '\t\t╰用户{0}【{1}】:{2}'.format(user_num, userName, result_data['message'])
return busiCode
async def setHeaders(cookie, intype):
if intype == 'mall':
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": "mall.jd.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15",
"Accept-Language": "zh-cn",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "close"
}
return headers
elif intype == 'JDApp':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
'Accept-Language': "zh-cn"
}
return headers
elif intype == 'mh5':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
'Accept-Language': "zh-cn"
}
return headers
async def jd_main(activecode, Id):
s = ''
try:
for ck in cks:
regex1 = re.compile(r"(?<=pt_pin=).+?(?=;)", re.M)
userName = re.findall(regex1, ck)[0]
#(userName)
headers_a = await setHeaders(ck, 'mh5')
headers_b = await setHeaders(ck, 'mall')
shopId = Id
user_num = 1
if activecode == 1:
venderId = await getVenderId(Id, headers=headers_b)
elif activecode == 2:
venderId = Id
else:
#print('id错误')
break
# print(shopId)
# venderId =await getVenderId(shopId, headers=headers_b)
activityId, getBean = await getShopOpenCardInfo(venderId, headers=headers_a, shopid=Id, userName=userName)
# print(activityId, getBean)
if activityId > 10:
activityIdLabel = 1
headers = await setHeaders(ck, 'JDApp')
result = await bindWithVender(venderId, shopId, activityId, 208, headers)
busiCode = await getResult(result, userName, user_num)
s += busiCode
else:
return '领光了,我的天!'
return s
except Exception as e:
print(e)
# client = TelegramClient('test', api_id, api_hash, proxy=("socks5", '127.0.0.1', 7890))
client = TelegramClient('test', api_id, api_hash)
p1 = re.compile(r"[(](.*?)[)]", re.S)
async def guanzhu(url):
for ck in cks:
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36",
'Cookie': ck}
async with httpx.AsyncClient(headers=header, verify=False, timeout=30) as client:
r = await client.get(url=url)
# r=requests.get(url=url,headers=header, verify=False)
#print(r.json()['result']['followDesc'])
async def get_id(url):
# url='https://u.jd.com/qq3OS8s'
global location1
try:
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Mobile/15E148 Safari/604.1'
}
async with httpx.AsyncClient(headers=headers, verify=False, timeout=30) as client:
pro_res = await client.get(url=url)
# pro_res = requests.get(url, headers=headers, verify=False).text
f = re.findall(r'(?<=hrl=\').+?(?=\';var)', pro_res.text)[0]
# async with httpx.AsyncClient(headers=headers, verify=False, allow_redirects=False) as Client:
# res=await Client.get(url=f)
# location1=res.headers['location']
location1 = requests.get(url=f, headers=headers, verify=False, allow_redirects=False).headers['location']
# print(location1)
Id = re.findall(r'(?<=Id=).+?(?=&)', location1)
try:
if 'shopId' in location1:
#print('shopId=' + Id[0])
return (1, Id[0])
elif 'venderId' in location1:
#print('verid=' + Id[0])
return (2, Id[0])
else:
#print('url err-getid')
return (0, 0)
except Exception as e:
print(e)
except Exception as e:
print('网址错误!-getid')
# print(Id)
async def send_tg(chat_id, client, messages, m):
destination_user_username = chat_id
entity = await client.get_entity(destination_user_username)
if m == 0:
await client.send_message(entity=entity, message=messages)
elif m == 1:
await client.send_file(entity=entity, file=messages)
else:
print('发送错误')
async def optc(aus):
try:
url = 'https://api.jds.codes/jCommand'
data = {"code": f"{aus}"}
result = requests.post(url=url, json=data)
if result.status_code == 200:
jumpurl = result.json()['data']['jumpUrl']
title = result.json()['data']['title']
# url
compile1 = re.compile('(?<=https:\/\/).+?(?=&)')
url1 = re.findall(compile1, jumpurl)[0]
# id
compile2 = re.compile('(?<=activityId=).+?(?=&)')
id1 = re.findall(compile2, jumpurl)[0]
# url
compile3 = re.compile('(?<=https:\/\/).+?(?=\/)')
url2 = re.findall(compile3, jumpurl)[0]
msg = f'原始url:{jumpurl}\n标题:{title}\n活动地址:{url1}\nid:{id1}\nurl:{url2}'
#print(msg)
return msg
except:
return '裂开了,看不懂你说的........'
@client.on(events.NewMessage(incoming=True, chats=[-1001175133767]))
@client.on(events.NewMessage(incoming=True, chats=[-1001461096991]))
#@client.on(events.NewMessage())
async def my_event_handler(event):
# print('1')
sender = event.message.chat_id
regex1 = re.compile(r"(https://u.jd.com/.*)", re.M)
open_url1 = re.findall(regex1, event.message.text)
if len(open_url1):
# if '入会' in event.raw_text:
for j_url in open_url1:
# print(j_url)
# print(event.message.text)
activecode, Id = await get_id(j_url)
res = await jd_main(activecode, Id)
await send_tg(sender, client, res, 0)
# else:
# print('等待关注程序开发!')
regex2 = re.compile(r"(https://api.m.jd.com/.*)", re.M)
open_url2 = re.findall(regex2, event.message.text)
if len(open_url2):
for j_url in open_url2:
j_url = j_url.replace(')', '')
# print(j_url)
await guanzhu(j_url)
regex3 = re.compile(r"(集卡#.*)", re.M)
open_url3 = re.findall(regex3, event.message.text)[0]
if len(open_url3):
mes = open_url3.split('#')
if len(mes) == 2:
mes = f'#{mes[-1]}'
msg = await optc(mes)
# print(msg)
# await send_tg(sender, client, msg, 0)
sender = 'https://t.me/joinchat/2Gkyl0qS4vNiNjZl'
await send_tg(sender, client, msg, 0)
else:
msg = '丢,别瞎搞'
await send_tg(sender, client, msg, 0)
'''
try:
regex1 = re.compile(r"(https://u.jd.com/.*)", re.M)
open_url1 = re.findall(regex1, event.message.text)
if len(open_url1):
# if '入会' in event.raw_text:
for j_url in open_url1:
# print(j_url)
# print(event.message.text)
activecode, Id = await get_id(j_url)
res = await jd_main(activecode, Id)
await send_tg(sender, client, res, 0)
# else:
# print('等待关注程序开发!')
regex2 = re.compile(r"(https://api.m.jd.com/.*)", re.M)
open_url2 = re.findall(regex2, event.message.text)
if len(open_url2):
for j_url in open_url2:
j_url = j_url.replace(')', '')
# print(j_url)
await guanzhu(j_url)
regex3 = re.compile(r"(集卡#.*)", re.M)
open_url3 = re.findall(regex3, event.message.text)[0]
if len(open_url3):
mes = open_url3.split('#')
if len(mes) == 2:
mes=f'#{mes[-1]}'
msg = await optc(mes)
# print(msg)
#await send_tg(sender, client, msg, 0)
sender = 'https://t.me/joinchat/2Gkyl0qS4vNiNjZl'
await send_tg(sender, client, msg,0)
else:
msg = '丢,别瞎搞'
await send_tg(sender, client, msg, 0)
except Exception as e:
print(e)
#await send_tg(sender, client, str(e), 0)
'''
if __name__ == "__main__":
with client:
# client.loop.run_until_complete(main())
client.loop.run_forever()
| 40.517073 | 492 | 0.561642 | from telethon import TelegramClient, events
import time, os, sys, datetime, requests, random, string, re, json, httpx, asyncio
from jsonpath import jsonpath
import importlib
importlib.reload(sys)
requests.packages.urllib3.disable_warnings()
ckss = 'pt_key=AAJhVrPQADBghACD7FWhN04rKpDsLjchHHP8cUUCfaHBAtYDmzvaFzievcdaI3LXw7Jrc0tC78o;pt_pin=jd_GOiWYOBshJfE;&pt_key=AAJhVrRgADD_x8egP9EQ6SoG6-a-dz0CW5CQKVar1x6MIuCE2erqvgEpMd7iwGF6BUEaTOw1D1k;pt_pin=jd_dbnOAWlSnLRz;'
cks = ckss.split('&')
timestamp = int(round(time.time() * 1000))
today = datetime.datetime.now().strftime('%Y-%m-%d')
pwd = repr(os.getcwd()).replace("'", '')
record = 'yes' # False|True 或 yes |no 是否记录符合条件的shopid,
openCardBean = 1 # 只入送豆数量大于此值
onlyRecord = 'no' ##yes 或 no yes:仅记录,不入会。
timesleep = 2 # 请求间隔
api_id = 3420052
api_hash = "ec130bf6eb5a4b0710e6e989cbb7dd28"
# 获取VenderId
async def getVenderId(shopId, headers):
url = f'https://mall.jd.com/index-{shopId}.html'
# print(url)
# resp = requests.get(url=url,headers=headers)
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
resp = await client.get(url=url)
resulttext = resp.text
r = re.compile(r'shopId=\d+&id=(\d+)"')
venderId = r.findall(resulttext)
return venderId[0]
def nowtime():
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
async def getShopOpenCardInfo(venderId, headers, shopid, userName):
num1 = string.digits
v_num1 = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 4))
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=getShopOpenCardInfo&body=%7B%22venderId%22%3A%22{2}%22%2C%22channel%22%3A406%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{0}_{1}'.format(
timestamp, v_num1, venderId)
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
resp = await client.get(url=url)
# resp = requests.get(url=url, verify=False, headers=headers, timeout=30)
time.sleep(timesleep)
resulttxt = resp.text
r = re.compile('jsonp_.*?\\((.*?)\\)\\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
cardInfo = json.loads(result[0])
venderCardName = cardInfo['result']['shopMemberCardInfo']['venderCardName']
# print(f"\t╰查询入会礼包【{venderCardName}】{shopid}")
openCardStatus = cardInfo['result']['userInfo']['openCardStatus']
interestsRuleList = cardInfo['result']['interestsRuleList']
if interestsRuleList == None:
# print('\t\t╰Oh,该店礼包已被领光了~')
return (0, 0)
try:
if len(interestsRuleList) > 0:
for i in interestsRuleList:
if '京豆' in i['prizeName']:
getBean = int(i['discountString'])
activityId = i['interestsInfo']['activityId']
context = '{0}'.format(shopid)
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(venderId)
context = '[{0}]:入会{2}豆,【{1}】销卡:{3}'.format(nowtime(), venderCardName, getBean, url)
if getBean >= openCardBean:
# print(f"\t╰{venderCardName}:入会赠送【{getBean}豆】,可入会")
context = '{0}'.format(shopid)
if onlyRecord == True:
# print('已开启仅记录,不入会。')
return (2, 2)
if openCardStatus == 1:
url = 'https://shopmember.m.jd.com/member/memberCloseAccount?venderId={}'.format(
venderId)
# print('\t\t╰[账号:{0}]:您已经是本店会员,请注销会员卡24小时后再来~\n注销链接:{1}'.format(userName, url))
context = '[{3}]:入会{1}豆,{0}销卡:{2}'.format(venderCardName, getBean, url, nowtime())
return (1, 1)
return (activityId, getBean)
# print(f"\t\t╰{venderCardName}:入会送【{getBean}】豆少于【{openCardBean}豆】,不入...")
if onlyRecord == True:
# print('已开启仅记录,不入会。')
return (2, 2)
return (
0, openCardStatus)
continue
# print('\t\t╰Oh~ 该店入会京豆已被领光了')
return (0, 0)
return (0, 0)
except Exception as e:
try:
print(e)
finally:
e = None
del e
async def bindWithVender(venderId, shopId, activityId, channel, headers):
num = string.ascii_letters + string.digits
v_name = ''.join(random.sample(num, 10))
num1 = string.digits
v_num1 = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 4))
qq_num = ''.join(random.sample(['1', '2', '3', '4', '5', '6', '7', '8', '9'], 1)) + ''.join(
random.sample(num1, 8)) + '@qq.com'
url = 'https://api.m.jd.com/client.action?appid=jd_shop_member&functionId=bindWithVender&body=%7B%22venderId%22%3A%22{4}%22%2C%22shopId%22%3A%22{7}%22%2C%22bindByVerifyCodeFlag%22%3A1%2C%22registerExtend%22%3A%7B%22v_sex%22%3A%22%E6%9C%AA%E7%9F%A5%22%2C%22v_name%22%3A%22{0}%22%2C%22v_birthday%22%3A%221990-03-18%22%2C%22v_email%22%3A%22{6}%22%7D%2C%22writeChildFlag%22%3A0%2C%22activityId%22%3A{5}%2C%22channel%22%3A{3}%7D&client=H5&clientVersion=9.2.0&uuid=&jsonp=jsonp_{1}_{2}'.format(
v_name, timestamp, v_num1, channel, venderId, activityId, qq_num, shopId)
try:
async with httpx.AsyncClient(verify=False, headers=headers, timeout=30) as client:
respon = await client.get(url=url)
# respon = requests.get(url=url, verify=False, headers=headers, timeout=30)
result = respon.text
return result
except Exception as e:
try:
print(e)
finally:
e = None
del e
async def getResult(resulttxt, userName, user_num):
r = re.compile('jsonp_.*?\\((.*?)\\)\\;', re.M | re.S | re.I)
result = r.findall(resulttxt)
for i in result:
result_data = json.loads(i)
busiCode = result_data['busiCode']
if busiCode == '0':
message = result_data['message']
try:
result = result_data['result']['giftInfo']['giftList']
#print(f"\t\t╰用户{user_num}【{userName}】:{message}")
for i in result:
print('\t\t\t╰{0}:{1} '.format(i['prizeTypeName'], i['discount']))
except:
print(f"\t\t╰用户{user_num}【{userName}】:{message}")
return busiCode
busiCode = '\t\t╰用户{0}【{1}】:{2}'.format(user_num, userName, result_data['message'])
return busiCode
async def setHeaders(cookie, intype):
if intype == 'mall':
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Host": "mall.jd.com",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15",
"Accept-Language": "zh-cn",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "close"
}
return headers
elif intype == 'JDApp':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "jdapp;iPhone;9.4.8;14.3;809409cbd5bb8a0fa8fff41378c1afe91b8075ad;network/wifi;ADID/201EDE7F-5111-49E8-9F0D-CCF9677CD6FE;supportApplePay/0;hasUPPay/0;hasOCPay/0;model/iPhone13,4;addressid/;supportBestPay/0;appBuild/167629;jdSupportDarkMode/0;Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148;supportJDSHWK/1",
'Accept-Language': "zh-cn"
}
return headers
elif intype == 'mh5':
headers = {
'Cookie': cookie,
'Accept': "*/*",
'Connection': "close",
'Referer': "https://shopmember.m.jd.com/shopcard/?",
'Accept-Encoding': "gzip, deflate, br",
'Host': "api.m.jd.com",
'User-Agent': "Mozilla/5.0 (iPhone; CPU iPhone OS 14_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Mobile/15E148 Safari/604.1",
'Accept-Language': "zh-cn"
}
return headers
async def jd_main(activecode, Id):
s = ''
try:
for ck in cks:
regex1 = re.compile(r"(?<=pt_pin=).+?(?=;)", re.M)
userName = re.findall(regex1, ck)[0]
#(userName)
headers_a = await setHeaders(ck, 'mh5')
headers_b = await setHeaders(ck, 'mall')
shopId = Id
user_num = 1
if activecode == 1:
venderId = await getVenderId(Id, headers=headers_b)
elif activecode == 2:
venderId = Id
else:
#print('id错误')
break
# print(shopId)
# venderId =await getVenderId(shopId, headers=headers_b)
activityId, getBean = await getShopOpenCardInfo(venderId, headers=headers_a, shopid=Id, userName=userName)
# print(activityId, getBean)
if activityId > 10:
activityIdLabel = 1
headers = await setHeaders(ck, 'JDApp')
result = await bindWithVender(venderId, shopId, activityId, 208, headers)
busiCode = await getResult(result, userName, user_num)
s += busiCode
else:
return '领光了,我的天!'
return s
except Exception as e:
print(e)
# client = TelegramClient('test', api_id, api_hash, proxy=("socks5", '127.0.0.1', 7890))
client = TelegramClient('test', api_id, api_hash)
p1 = re.compile(r"[(](.*?)[)]", re.S)
async def guanzhu(url):
for ck in cks:
header = {
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36",
'Cookie': ck}
async with httpx.AsyncClient(headers=header, verify=False, timeout=30) as client:
r = await client.get(url=url)
# r=requests.get(url=url,headers=header, verify=False)
#print(r.json()['result']['followDesc'])
async def get_id(url):
# url='https://u.jd.com/qq3OS8s'
global location1
try:
headers = {
'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 14_5_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Mobile/15E148 Safari/604.1'
}
async with httpx.AsyncClient(headers=headers, verify=False, timeout=30) as client:
pro_res = await client.get(url=url)
# pro_res = requests.get(url, headers=headers, verify=False).text
f = re.findall(r'(?<=hrl=\').+?(?=\';var)', pro_res.text)[0]
# async with httpx.AsyncClient(headers=headers, verify=False, allow_redirects=False) as Client:
# res=await Client.get(url=f)
# location1=res.headers['location']
location1 = requests.get(url=f, headers=headers, verify=False, allow_redirects=False).headers['location']
# print(location1)
Id = re.findall(r'(?<=Id=).+?(?=&)', location1)
try:
if 'shopId' in location1:
#print('shopId=' + Id[0])
return (1, Id[0])
elif 'venderId' in location1:
#print('verid=' + Id[0])
return (2, Id[0])
else:
#print('url err-getid')
return (0, 0)
except Exception as e:
print(e)
except Exception as e:
print('网址错误!-getid')
# print(Id)
async def send_tg(chat_id, client, messages, m):
destination_user_username = chat_id
entity = await client.get_entity(destination_user_username)
if m == 0:
await client.send_message(entity=entity, message=messages)
elif m == 1:
await client.send_file(entity=entity, file=messages)
else:
print('发送错误')
async def optc(aus):
try:
url = 'https://api.jds.codes/jCommand'
data = {"code": f"{aus}"}
result = requests.post(url=url, json=data)
if result.status_code == 200:
jumpurl = result.json()['data']['jumpUrl']
title = result.json()['data']['title']
# url
compile1 = re.compile('(?<=https:\/\/).+?(?=&)')
url1 = re.findall(compile1, jumpurl)[0]
# id
compile2 = re.compile('(?<=activityId=).+?(?=&)')
id1 = re.findall(compile2, jumpurl)[0]
# url
compile3 = re.compile('(?<=https:\/\/).+?(?=\/)')
url2 = re.findall(compile3, jumpurl)[0]
msg = f'原始url:{jumpurl}\n标题:{title}\n活动地址:{url1}\nid:{id1}\nurl:{url2}'
#print(msg)
return msg
except:
return '裂开了,看不懂你说的........'
@client.on(events.NewMessage(incoming=True, chats=[-1001175133767]))
@client.on(events.NewMessage(incoming=True, chats=[-1001461096991]))
#@client.on(events.NewMessage())
async def my_event_handler(event):
# print('1')
sender = event.message.chat_id
regex1 = re.compile(r"(https://u.jd.com/.*)", re.M)
open_url1 = re.findall(regex1, event.message.text)
if len(open_url1):
# if '入会' in event.raw_text:
for j_url in open_url1:
# print(j_url)
# print(event.message.text)
activecode, Id = await get_id(j_url)
res = await jd_main(activecode, Id)
await send_tg(sender, client, res, 0)
# else:
# print('等待关注程序开发!')
regex2 = re.compile(r"(https://api.m.jd.com/.*)", re.M)
open_url2 = re.findall(regex2, event.message.text)
if len(open_url2):
for j_url in open_url2:
j_url = j_url.replace(')', '')
# print(j_url)
await guanzhu(j_url)
regex3 = re.compile(r"(集卡#.*)", re.M)
open_url3 = re.findall(regex3, event.message.text)[0]
if len(open_url3):
mes = open_url3.split('#')
if len(mes) == 2:
mes = f'#{mes[-1]}'
msg = await optc(mes)
# print(msg)
# await send_tg(sender, client, msg, 0)
sender = 'https://t.me/joinchat/2Gkyl0qS4vNiNjZl'
await send_tg(sender, client, msg, 0)
else:
msg = '丢,别瞎搞'
await send_tg(sender, client, msg, 0)
if __name__ == "__main__":
with client:
# client.loop.run_until_complete(main())
client.loop.run_forever()
| true | true |
1c3f10bc81bda7a0b5af1626ea1a5161acb6d1d9 | 537 | py | Python | adv_train/model/mnist_net.py | busycalibrating/Adversarial-Training | e1fe4061f72e1379d9920b02c1cc281e1be2606f | [
"MIT"
] | null | null | null | adv_train/model/mnist_net.py | busycalibrating/Adversarial-Training | e1fe4061f72e1379d9920b02c1cc281e1be2606f | [
"MIT"
] | null | null | null | adv_train/model/mnist_net.py | busycalibrating/Adversarial-Training | e1fe4061f72e1379d9920b02c1cc281e1be2606f | [
"MIT"
] | 1 | 2022-01-31T06:14:41.000Z | 2022-01-31T06:14:41.000Z | import torch.nn as nn
from adv_train.utils import Flatten
def build_model_mnist(device=None):
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(7 * 7 * 64, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
model = model.to(device)
return model
| 23.347826 | 50 | 0.532588 | import torch.nn as nn
from adv_train.utils import Flatten
def build_model_mnist(device=None):
model = nn.Sequential(
nn.Conv2d(1, 32, 3, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 3, padding=1, stride=2),
nn.ReLU(),
nn.Conv2d(32, 64, 3, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 3, padding=1, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(7 * 7 * 64, 100),
nn.ReLU(),
nn.Linear(100, 10),
)
model = model.to(device)
return model
| true | true |
1c3f112efb580d4f1117769784b589751471e3b0 | 16,989 | py | Python | mujoco/setup4/main_gailfo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | mujoco/setup4/main_gailfo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | mujoco/setup4/main_gailfo.py | EvieQ01/Learning-Feasibility-Different-Dynamics | 73786b11137b8ba9840d00ec4d258c1296b0a595 | [
"MIT"
] | null | null | null | import argparse
from itertools import count
import gym
import gym.spaces
import scipy.optimize
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from models.old_models import *
from replay_memory import Memory
from running_state import ZFilter
from torch.autograd import Variable
from trpo import trpo_step
from utils import *
from loss import *
import time
import swimmer
import walker
import halfcheetah
import pickle
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', type=str, default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G',
help='l2 regularization regression (default: 1e-3)')
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G',
help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=1111, metavar='N',
help='random seed (default: 1111')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='size of a single batch')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--eval-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--num-epochs', type=int, default=500, metavar='N',
help='number of epochs to train an expert')
parser.add_argument('--hidden-dim', type=int, default=64, metavar='H',
help='the size of hidden layers')
parser.add_argument('--lr', type=float, default=1e-3, metavar='L',
help='learning rate')
parser.add_argument('--vf-iters', type=int, default=30, metavar='V',
help='number of iterations of value function optimization iterations per each policy optimization step')
parser.add_argument('--vf-lr', type=float, default=3e-4, metavar='V',
help='learning rate of value network')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--xml', default=None, help='the xml configuration file')
parser.add_argument('--demo_files', nargs='+', help='the environment used for test')
parser.add_argument('--ratios', nargs='+', type=float, help='the ratio of demos to load')
parser.add_argument('--eval_epochs', type=int, default=10, help='the epochs for evaluation')
parser.add_argument('--save_path', help='the path to save model')
parser.add_argument('--feasibility_model', default=None, help='the path to the feasibility model')
parser.add_argument('--mode', help='the mode of feasibility')
parser.add_argument('--discount', type=float, default=0.9, help='the discount factor')
parser.add_argument('--distance_normalizer', type=float, default=5., help='the normalization factor for the distance')
args = parser.parse_args()
if args.seed == 1111:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'.txt', 'w')
save_path = args.save_path
else:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'_seed_{}.txt'.format(args.seed), 'w')
save_path = args.save_path.replace('.pth', '_seed_{}.pth'.format(args.seed))
env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
f_env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
def load_demos(demo_files, ratios):
state_files = []
trajs = []
traj_traj_id = []
traj_id = 0
pair_traj_id = []
init_obs = []
for i in range(len(demo_files)):
state_pairs = []
demo_file = demo_files[i]
raw_demos = pickle.load(open(demo_file, 'rb'))
use_num = int(len(raw_demos['obs'])*ratios[i])
current_state = raw_demos['obs'][0:use_num]
next_state = raw_demos['next_obs'][0:use_num]
trajs += [np.array(traj) for traj in current_state]
if 'InvertedDoublePendulum' in str(type(env.env)):
init_obs += raw_demos['init_obs']
traj_traj_id += [i]*len(current_state)
for j in range(len(current_state)):
if 'Ant' in args.env_name:
state_pairs.append(np.concatenate([np.array(current_state[j])[:,2:], np.array(next_state[j])[:,2:]], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
else:
state_pairs.append(np.concatenate([np.array(current_state[j]), np.array(next_state[j])], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
traj_id += 1
state_files.append(np.concatenate(state_pairs, axis=0))
return state_files, trajs, np.concatenate(pair_traj_id, axis=0), np.array(traj_traj_id), init_obs
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
def compute_feasibility_pair(expert_trajs, models, f_env):
all_distance = []
for index in range(len(expert_trajs)):
expert_traj = expert_trajs[index]
model = models[index]
batch_size = 64
batch_num = (expert_traj.shape[0]-1)//batch_size + 1
with torch.no_grad():
for i in range(batch_num):
f_env.reset()
action_mean, _, action_std = model(torch.from_numpy(expert_traj[i*batch_size:(i+1)*batch_size, 2:num_inputs]))
action = torch.normal(action_mean, action_std).cpu().numpy()
next_states = []
for j in range(action_mean.shape[0]):
f_env.set_observation(expert_traj[i*batch_size+j])
next_state, _, _, _ = f_env.step(action[j])
next_states.append(next_state)
next_states = np.array(next_states)
distance = np.linalg.norm(expert_traj[i*batch_size:(i+1)*batch_size, num_inputs:] - next_states, ord=2, axis=1)
all_distance.append(distance)
all_distance = np.concatenate(all_distance, axis=0)
feasibility = np.exp(-all_distance/3.)
return feasibility
def compute_feasibility_traj(expert_trajs, traj_traj_id, models, f_env, init_obs):
all_distance = []
for index in range(len(expert_trajs)):
if index >= 4:
index = index % 2 + 2
all_distance.append([])
expert_traj = expert_trajs[index]
model = models[traj_traj_id[index]]
with torch.no_grad():
f_env.reset()
f_env.set_observation(expert_traj[0])
state0 = expert_traj[0]
state = expert_traj[0]
for j in range(expert_traj.shape[0]-1):
action_mean, _, action_std = model(torch.from_numpy(np.concatenate([state, state0], axis=0)).unsqueeze(0))
action = action_mean.cpu().numpy()
next_state, _, _, _ = f_env.step(action)
state = next_state
all_distance[-1].append(np.linalg.norm(expert_traj[j+1] - next_state, ord=2, axis=0)*(args.discount**j))
all_distance[-1] = np.sum(all_distance[-1])
all_distance = np.array(all_distance)
all_distance = (all_distance + np.max(-all_distance))/args.distance_normalizer
all_distance[all_distance>50] = 50.
feasibility = np.exp(-all_distance)
return feasibility
if args.feasibility_model is not None:
if args.mode == 'pair':
expert_pairs, _, _, _ = load_demos(args.demo_files, args.ratios)
elif args.mode == 'traj':
expert_pairs, expert_trajs, pair_traj_id, traj_traj_id, init_obs = load_demos(args.demo_files, args.ratios)
feasibility_models = [Policy(num_inputs*2, num_actions, args.hidden_dim) for i in range(len(expert_pairs))]
load_dict = torch.load(args.feasibility_model)
for i in range(min(len(expert_pairs), 4)):
feasibility_models[i].load_state_dict(load_dict['policy_'+str(i)])
if args.mode == 'pair':
feasibility = compute_feasibility_pair(expert_pairs, feasibility_models, f_env)
elif args.mode == 'traj':
feasibility_traj = compute_feasibility_traj(expert_trajs, traj_traj_id, feasibility_models, f_env, init_obs)
feasibility = feasibility_traj[pair_traj_id]
else:
expert_pairs, _, _, _, _ = load_demos(args.demo_files, args.ratios)
feasibility = np.ones(sum([expert_traj.shape[0] for expert_traj in expert_pairs]))
expert_traj = np.concatenate(expert_pairs, axis=0)
policy_net = Policy(num_inputs, num_actions, args.hidden_dim)
value_net = Value(num_inputs, args.hidden_dim).to(device)
discriminator = Discriminator(num_inputs + num_inputs, args.hidden_dim).to(device)
disc_criterion = nn.BCEWithLogitsLoss()
value_criterion = nn.MSELoss()
disc_optimizer = optim.Adam(discriminator.parameters(), args.lr)
value_optimizer = optim.Adam(value_net.parameters(), args.vf_lr)
def select_action(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def update_params(batch):
rewards = torch.Tensor(batch.reward).to(device)
masks = torch.Tensor(batch.mask).to(device)
actions = torch.Tensor(np.concatenate(batch.action, 0)).to(device)
states = torch.Tensor(batch.state).to(device)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1).to(device)
deltas = torch.Tensor(actions.size(0),1).to(device)
advantages = torch.Tensor(actions.size(0),1).to(device)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
batch_size = math.ceil(states.shape[0] / args.vf_iters)
idx = np.random.permutation(states.shape[0])
for i in range(args.vf_iters):
smp_idx = idx[i * batch_size: (i + 1) * batch_size]
smp_states = states[smp_idx, :]
smp_targets = targets[smp_idx, :]
value_optimizer.zero_grad()
value_loss = value_criterion(value_net(Variable(smp_states)), smp_targets)
value_loss.backward()
value_optimizer.step()
advantages = (advantages - advantages.mean()) / advantages.std()
action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))
fixed_log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds).data.clone()
def get_loss(volatile=None):
action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))
log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds)
action_loss = -Variable(advantages.cpu()) * torch.exp(log_prob - Variable(fixed_log_prob))
return action_loss.mean()
def get_kl():
mean1, log_std1, std1 = policy_net(Variable(states.cpu()))
mean0 = Variable(mean1.data)
log_std0 = Variable(log_std1.data)
std0 = Variable(std1.data)
kl = log_std1 - log_std0 + (std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
trpo_step(policy_net, get_loss, get_kl, args.max_kl, args.damping)
def expert_reward(states, actions):
states = np.concatenate(states)
actions = np.concatenate(actions)
with torch.no_grad():
state_action = torch.Tensor(np.concatenate([states, actions], 1)).to(device)
return -F.logsigmoid(discriminator(state_action)).cpu().detach().numpy()
def evaluate(episode, best_reward, log_file):
env.seed(1234)
with torch.no_grad():
avg_reward = 0.0
for _ in range(args.eval_epochs):
state = env.reset()
for _ in range(10000): # Don't infinite loop while learning
state = torch.from_numpy(state).unsqueeze(0)
action, _, _ = policy_net(Variable(state))
action = action.data[0].numpy()
next_state, reward, done, _ = env.step(action)
avg_reward += reward
if done:
break
state = next_state
print('Evaluation: Episode ', episode, ' Reward ', avg_reward / args.eval_epochs)
log_file.write('Evaluation: Episode '+str(episode)+' Reward '+str(avg_reward / args.eval_epochs)+'\n')
log_file.flush()
if best_reward < avg_reward / args.eval_epochs:
best_reward = avg_reward / args.eval_epochs
torch.save({'policy':policy_net.state_dict(), 'value':value_net.state_dict(), 'discriminator':discriminator.state_dict(), 'disc_optimizer':disc_optimizer.state_dict(), 'rew':best_reward}, save_path)
all_idx = np.arange(0, expert_traj.shape[0])
p_idx = np.random.permutation(expert_traj.shape[0])
expert_traj = expert_traj[p_idx, :]
feasibility = feasibility[p_idx]
feasibility = feasibility / (np.sum(feasibility)+0.0000001)
feasibility[feasibility<(1./feasibility.shape[0])/10000000.] = 0
feasibility[0] = 1-np.sum(feasibility[1:])
print(feasibility[0:10])
best_reward = -1000000
for i_episode in range(args.num_epochs):
env.seed(int(time.time()))
memory = Memory()
num_steps = 0
num_episodes = 0
reward_batch = []
states = []
actions = []
next_states = []
mem_actions = []
mem_mask = []
mem_next = []
while num_steps < args.batch_size:
state = env.reset()
reward_sum = 0
for t in range(10000): # Don't infinite loop while learning
action = select_action(state)
action = action.data[0].numpy()
states.append(np.array([state]))
actions.append(np.array([action]))
next_state, true_reward, done, _ = env.step(action)
next_states.append(np.array([next_state]))
reward_sum += true_reward
mask = 1
if done:
mask = 0
mem_mask.append(mask)
mem_next.append(next_state)
if done:
break
state = next_state
num_steps += (t-1)
num_episodes += 1
reward_batch.append(reward_sum)
if i_episode % args.eval_interval == 0:
evaluate(i_episode, best_reward, log_file)
rewards = expert_reward(states, next_states)
for idx in range(len(states)):
memory.push(states[idx][0], actions[idx], mem_mask[idx], mem_next[idx], \
rewards[idx][0])
batch = memory.sample()
update_params(batch)
### update discriminator ###
next_states = torch.from_numpy(np.concatenate(next_states))
states = torch.from_numpy(np.concatenate(states))
labeled_num = min(expert_traj.shape[0], num_steps)
idx = np.random.choice(all_idx, labeled_num, p=feasibility.reshape(-1))
expert_state_action = expert_traj[idx, :]
expert_state_action = torch.Tensor(expert_state_action).to(device)
real = discriminator(expert_state_action)
state_action = torch.cat((states, next_states), 1).to(device)
fake = discriminator(state_action)
disc_optimizer.zero_grad()
disc_loss = disc_criterion(fake, torch.ones(fake.size(0), 1).to(device)) + \
disc_criterion(real, torch.zeros(real.size(0), 1).to(device))
disc_loss.backward()
disc_optimizer.step()
############################
if i_episode % args.log_interval == 0:
print('Episode {}\tAverage reward: {:.2f}\tMax reward: {:.2f}\tLoss (disc): {:.2f}'.format(i_episode, np.mean(reward_batch), max(reward_batch), disc_loss.item()))
log_file.write('Episode {}\tAverage reward: {:.2f}\tMax reward: {:.2f}\tLoss (disc): {:.2f}\n'.format(i_episode, np.mean(reward_batch), max(reward_batch), disc_loss.item()))
log_file.flush()
| 42.901515 | 206 | 0.660663 | import argparse
from itertools import count
import gym
import gym.spaces
import scipy.optimize
import numpy as np
import math
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from models.old_models import *
from replay_memory import Memory
from running_state import ZFilter
from torch.autograd import Variable
from trpo import trpo_step
from utils import *
from loss import *
import time
import swimmer
import walker
import halfcheetah
import pickle
torch.utils.backcompat.broadcast_warning.enabled = True
torch.utils.backcompat.keepdim_warning.enabled = True
torch.set_default_tensor_type('torch.DoubleTensor')
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
parser = argparse.ArgumentParser(description='PyTorch actor-critic example')
parser.add_argument('--gamma', type=float, default=0.995, metavar='G',
help='discount factor (default: 0.995)')
parser.add_argument('--env-name', type=str, default="Reacher-v1", metavar='G',
help='name of the environment to run')
parser.add_argument('--tau', type=float, default=0.97, metavar='G',
help='gae (default: 0.97)')
parser.add_argument('--l2-reg', type=float, default=1e-3, metavar='G',
help='l2 regularization regression (default: 1e-3)')
parser.add_argument('--max-kl', type=float, default=1e-2, metavar='G',
help='max kl value (default: 1e-2)')
parser.add_argument('--damping', type=float, default=1e-1, metavar='G',
help='damping (default: 1e-1)')
parser.add_argument('--seed', type=int, default=1111, metavar='N',
help='random seed (default: 1111')
parser.add_argument('--batch-size', type=int, default=5000, metavar='N',
help='size of a single batch')
parser.add_argument('--log-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--eval-interval', type=int, default=1, metavar='N',
help='interval between training status logs (default: 10)')
parser.add_argument('--num-epochs', type=int, default=500, metavar='N',
help='number of epochs to train an expert')
parser.add_argument('--hidden-dim', type=int, default=64, metavar='H',
help='the size of hidden layers')
parser.add_argument('--lr', type=float, default=1e-3, metavar='L',
help='learning rate')
parser.add_argument('--vf-iters', type=int, default=30, metavar='V',
help='number of iterations of value function optimization iterations per each policy optimization step')
parser.add_argument('--vf-lr', type=float, default=3e-4, metavar='V',
help='learning rate of value network')
parser.add_argument('--render', action='store_true',
help='render the environment')
parser.add_argument('--xml', default=None, help='the xml configuration file')
parser.add_argument('--demo_files', nargs='+', help='the environment used for test')
parser.add_argument('--ratios', nargs='+', type=float, help='the ratio of demos to load')
parser.add_argument('--eval_epochs', type=int, default=10, help='the epochs for evaluation')
parser.add_argument('--save_path', help='the path to save model')
parser.add_argument('--feasibility_model', default=None, help='the path to the feasibility model')
parser.add_argument('--mode', help='the mode of feasibility')
parser.add_argument('--discount', type=float, default=0.9, help='the discount factor')
parser.add_argument('--distance_normalizer', type=float, default=5., help='the normalization factor for the distance')
args = parser.parse_args()
if args.seed == 1111:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'.txt', 'w')
save_path = args.save_path
else:
log_file = open('log/'+args.save_path.split('/')[-1].split('.pth')[0]+'_seed_{}.txt'.format(args.seed), 'w')
save_path = args.save_path.replace('.pth', '_seed_{}.pth'.format(args.seed))
env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
f_env = gym.make(args.env_name, xml_file=args.xml, exclude_current_positions_from_observation=False)
num_inputs = env.observation_space.shape[0]
num_actions = env.action_space.shape[0]
def load_demos(demo_files, ratios):
state_files = []
trajs = []
traj_traj_id = []
traj_id = 0
pair_traj_id = []
init_obs = []
for i in range(len(demo_files)):
state_pairs = []
demo_file = demo_files[i]
raw_demos = pickle.load(open(demo_file, 'rb'))
use_num = int(len(raw_demos['obs'])*ratios[i])
current_state = raw_demos['obs'][0:use_num]
next_state = raw_demos['next_obs'][0:use_num]
trajs += [np.array(traj) for traj in current_state]
if 'InvertedDoublePendulum' in str(type(env.env)):
init_obs += raw_demos['init_obs']
traj_traj_id += [i]*len(current_state)
for j in range(len(current_state)):
if 'Ant' in args.env_name:
state_pairs.append(np.concatenate([np.array(current_state[j])[:,2:], np.array(next_state[j])[:,2:]], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
else:
state_pairs.append(np.concatenate([np.array(current_state[j]), np.array(next_state[j])], axis=1))
pair_traj_id.append(np.array([traj_id]*np.array(current_state[j]).shape[0]))
traj_id += 1
state_files.append(np.concatenate(state_pairs, axis=0))
return state_files, trajs, np.concatenate(pair_traj_id, axis=0), np.array(traj_traj_id), init_obs
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
def compute_feasibility_pair(expert_trajs, models, f_env):
all_distance = []
for index in range(len(expert_trajs)):
expert_traj = expert_trajs[index]
model = models[index]
batch_size = 64
batch_num = (expert_traj.shape[0]-1)//batch_size + 1
with torch.no_grad():
for i in range(batch_num):
f_env.reset()
action_mean, _, action_std = model(torch.from_numpy(expert_traj[i*batch_size:(i+1)*batch_size, 2:num_inputs]))
action = torch.normal(action_mean, action_std).cpu().numpy()
next_states = []
for j in range(action_mean.shape[0]):
f_env.set_observation(expert_traj[i*batch_size+j])
next_state, _, _, _ = f_env.step(action[j])
next_states.append(next_state)
next_states = np.array(next_states)
distance = np.linalg.norm(expert_traj[i*batch_size:(i+1)*batch_size, num_inputs:] - next_states, ord=2, axis=1)
all_distance.append(distance)
all_distance = np.concatenate(all_distance, axis=0)
feasibility = np.exp(-all_distance/3.)
return feasibility
def compute_feasibility_traj(expert_trajs, traj_traj_id, models, f_env, init_obs):
all_distance = []
for index in range(len(expert_trajs)):
if index >= 4:
index = index % 2 + 2
all_distance.append([])
expert_traj = expert_trajs[index]
model = models[traj_traj_id[index]]
with torch.no_grad():
f_env.reset()
f_env.set_observation(expert_traj[0])
state0 = expert_traj[0]
state = expert_traj[0]
for j in range(expert_traj.shape[0]-1):
action_mean, _, action_std = model(torch.from_numpy(np.concatenate([state, state0], axis=0)).unsqueeze(0))
action = action_mean.cpu().numpy()
next_state, _, _, _ = f_env.step(action)
state = next_state
all_distance[-1].append(np.linalg.norm(expert_traj[j+1] - next_state, ord=2, axis=0)*(args.discount**j))
all_distance[-1] = np.sum(all_distance[-1])
all_distance = np.array(all_distance)
all_distance = (all_distance + np.max(-all_distance))/args.distance_normalizer
all_distance[all_distance>50] = 50.
feasibility = np.exp(-all_distance)
return feasibility
if args.feasibility_model is not None:
if args.mode == 'pair':
expert_pairs, _, _, _ = load_demos(args.demo_files, args.ratios)
elif args.mode == 'traj':
expert_pairs, expert_trajs, pair_traj_id, traj_traj_id, init_obs = load_demos(args.demo_files, args.ratios)
feasibility_models = [Policy(num_inputs*2, num_actions, args.hidden_dim) for i in range(len(expert_pairs))]
load_dict = torch.load(args.feasibility_model)
for i in range(min(len(expert_pairs), 4)):
feasibility_models[i].load_state_dict(load_dict['policy_'+str(i)])
if args.mode == 'pair':
feasibility = compute_feasibility_pair(expert_pairs, feasibility_models, f_env)
elif args.mode == 'traj':
feasibility_traj = compute_feasibility_traj(expert_trajs, traj_traj_id, feasibility_models, f_env, init_obs)
feasibility = feasibility_traj[pair_traj_id]
else:
expert_pairs, _, _, _, _ = load_demos(args.demo_files, args.ratios)
feasibility = np.ones(sum([expert_traj.shape[0] for expert_traj in expert_pairs]))
expert_traj = np.concatenate(expert_pairs, axis=0)
policy_net = Policy(num_inputs, num_actions, args.hidden_dim)
value_net = Value(num_inputs, args.hidden_dim).to(device)
discriminator = Discriminator(num_inputs + num_inputs, args.hidden_dim).to(device)
disc_criterion = nn.BCEWithLogitsLoss()
value_criterion = nn.MSELoss()
disc_optimizer = optim.Adam(discriminator.parameters(), args.lr)
value_optimizer = optim.Adam(value_net.parameters(), args.vf_lr)
def select_action(state):
state = torch.from_numpy(state).unsqueeze(0)
action_mean, _, action_std = policy_net(Variable(state))
action = torch.normal(action_mean, action_std)
return action
def update_params(batch):
rewards = torch.Tensor(batch.reward).to(device)
masks = torch.Tensor(batch.mask).to(device)
actions = torch.Tensor(np.concatenate(batch.action, 0)).to(device)
states = torch.Tensor(batch.state).to(device)
values = value_net(Variable(states))
returns = torch.Tensor(actions.size(0),1).to(device)
deltas = torch.Tensor(actions.size(0),1).to(device)
advantages = torch.Tensor(actions.size(0),1).to(device)
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
returns[i] = rewards[i] + args.gamma * prev_return * masks[i]
deltas[i] = rewards[i] + args.gamma * prev_value * masks[i] - values.data[i]
advantages[i] = deltas[i] + args.gamma * args.tau * prev_advantage * masks[i]
prev_return = returns[i, 0]
prev_value = values.data[i, 0]
prev_advantage = advantages[i, 0]
targets = Variable(returns)
batch_size = math.ceil(states.shape[0] / args.vf_iters)
idx = np.random.permutation(states.shape[0])
for i in range(args.vf_iters):
smp_idx = idx[i * batch_size: (i + 1) * batch_size]
smp_states = states[smp_idx, :]
smp_targets = targets[smp_idx, :]
value_optimizer.zero_grad()
value_loss = value_criterion(value_net(Variable(smp_states)), smp_targets)
value_loss.backward()
value_optimizer.step()
advantages = (advantages - advantages.mean()) / advantages.std()
action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))
fixed_log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds).data.clone()
def get_loss(volatile=None):
action_means, action_log_stds, action_stds = policy_net(Variable(states.cpu()))
log_prob = normal_log_density(Variable(actions.cpu()), action_means, action_log_stds, action_stds)
action_loss = -Variable(advantages.cpu()) * torch.exp(log_prob - Variable(fixed_log_prob))
return action_loss.mean()
def get_kl():
mean1, log_std1, std1 = policy_net(Variable(states.cpu()))
mean0 = Variable(mean1.data)
log_std0 = Variable(log_std1.data)
std0 = Variable(std1.data)
kl = log_std1 - log_std0 + (std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)) - 0.5
return kl.sum(1, keepdim=True)
trpo_step(policy_net, get_loss, get_kl, args.max_kl, args.damping)
def expert_reward(states, actions):
states = np.concatenate(states)
actions = np.concatenate(actions)
with torch.no_grad():
state_action = torch.Tensor(np.concatenate([states, actions], 1)).to(device)
return -F.logsigmoid(discriminator(state_action)).cpu().detach().numpy()
def evaluate(episode, best_reward, log_file):
env.seed(1234)
with torch.no_grad():
avg_reward = 0.0
for _ in range(args.eval_epochs):
state = env.reset()
for _ in range(10000):
state = torch.from_numpy(state).unsqueeze(0)
action, _, _ = policy_net(Variable(state))
action = action.data[0].numpy()
next_state, reward, done, _ = env.step(action)
avg_reward += reward
if done:
break
state = next_state
print('Evaluation: Episode ', episode, ' Reward ', avg_reward / args.eval_epochs)
log_file.write('Evaluation: Episode '+str(episode)+' Reward '+str(avg_reward / args.eval_epochs)+'\n')
log_file.flush()
if best_reward < avg_reward / args.eval_epochs:
best_reward = avg_reward / args.eval_epochs
torch.save({'policy':policy_net.state_dict(), 'value':value_net.state_dict(), 'discriminator':discriminator.state_dict(), 'disc_optimizer':disc_optimizer.state_dict(), 'rew':best_reward}, save_path)
all_idx = np.arange(0, expert_traj.shape[0])
p_idx = np.random.permutation(expert_traj.shape[0])
expert_traj = expert_traj[p_idx, :]
feasibility = feasibility[p_idx]
feasibility = feasibility / (np.sum(feasibility)+0.0000001)
feasibility[feasibility<(1./feasibility.shape[0])/10000000.] = 0
feasibility[0] = 1-np.sum(feasibility[1:])
print(feasibility[0:10])
best_reward = -1000000
for i_episode in range(args.num_epochs):
env.seed(int(time.time()))
memory = Memory()
num_steps = 0
num_episodes = 0
reward_batch = []
states = []
actions = []
next_states = []
mem_actions = []
mem_mask = []
mem_next = []
while num_steps < args.batch_size:
state = env.reset()
reward_sum = 0
for t in range(10000): # Don't infinite loop while learning
action = select_action(state)
action = action.data[0].numpy()
states.append(np.array([state]))
actions.append(np.array([action]))
next_state, true_reward, done, _ = env.step(action)
next_states.append(np.array([next_state]))
reward_sum += true_reward
mask = 1
if done:
mask = 0
mem_mask.append(mask)
mem_next.append(next_state)
if done:
break
state = next_state
num_steps += (t-1)
num_episodes += 1
reward_batch.append(reward_sum)
if i_episode % args.eval_interval == 0:
evaluate(i_episode, best_reward, log_file)
rewards = expert_reward(states, next_states)
for idx in range(len(states)):
memory.push(states[idx][0], actions[idx], mem_mask[idx], mem_next[idx], \
rewards[idx][0])
batch = memory.sample()
update_params(batch)
tes))
states = torch.from_numpy(np.concatenate(states))
labeled_num = min(expert_traj.shape[0], num_steps)
idx = np.random.choice(all_idx, labeled_num, p=feasibility.reshape(-1))
expert_state_action = expert_traj[idx, :]
expert_state_action = torch.Tensor(expert_state_action).to(device)
real = discriminator(expert_state_action)
state_action = torch.cat((states, next_states), 1).to(device)
fake = discriminator(state_action)
disc_optimizer.zero_grad()
disc_loss = disc_criterion(fake, torch.ones(fake.size(0), 1).to(device)) + \
disc_criterion(real, torch.zeros(real.size(0), 1).to(device))
disc_loss.backward()
disc_optimizer.step()
disc_loss.item()))
log_file.flush()
| true | true |
1c3f115aa78e8cbc7c892e6d2132d4943dd4af80 | 1,527 | py | Python | projekt/backend/biljnevrste/biljnevrsteapp/views.py | toni4848/biljnevrste_repo | 8d48a75c67a0208ddad1be78284d653fb2303c94 | [
"MIT"
] | null | null | null | projekt/backend/biljnevrste/biljnevrsteapp/views.py | toni4848/biljnevrste_repo | 8d48a75c67a0208ddad1be78284d653fb2303c94 | [
"MIT"
] | 51 | 2019-04-01T14:56:31.000Z | 2022-03-21T00:35:42.000Z | projekt/backend/biljnevrste/biljnevrsteapp/views.py | toni4848/biljnevrste_repo | 8d48a75c67a0208ddad1be78284d653fb2303c94 | [
"MIT"
] | 14 | 2019-04-02T15:22:06.000Z | 2019-06-09T13:09:40.000Z | from rest_framework import viewsets
from .serializers import *
'''
API endpoint za unos, uredjivanje uporabnih dijelova
'''
class UporabniDioViewSet(viewsets.ModelViewSet):
queryset = UporabniDio.objects.all()
serializer_class = UporabniDioSerializer
'''
API endpint za unos, uredjivanje slika
'''
class SlikaViewSet(viewsets.ModelViewSet):
queryset = Slika.objects.all()
serializer_class = SlikaSerializer
'''
API endpint za unos, uredjivanje rodova
'''
class RodViewSet(viewsets.ModelViewSet):
queryset = Rod.objects.all()
serializer_class = RodSerializer
'''
API endpoint za unos, uredjivanje sistematicara
'''
class SistematicarViewSet(viewsets.ModelViewSet):
queryset = Sistematicar.objects.all()
serializer_class = SistematicarSerializer
'''
API endpoint za unos, uredjivanje biljnih vrsta
'''
class BiljnaVrstaViewSet(viewsets.ModelViewSet):
queryset = BiljnaVrsta.objects.all()
serializer_class = BiljnaVrstaSerializer
'''
API endpoint za unos, uredjivanje porodica
'''
class PorodicaViewSet(viewsets.ModelViewSet):
queryset = Porodica.objects.all()
serializer_class = PorodicaSerializer
'''
API endpoint za unos, uredjivanje podvrsta
'''
class PodvrstaViewSet(viewsets.ModelViewSet):
queryset = Podvrsta.objects.all()
serializer_class = PodvrstaSerializer
'''
API endpoint za unos, uredjivanje varijeta
'''
class VarijetViewSet(viewsets.ModelViewSet):
queryset = Varijet.objects.all()
serializer_class = VarijetSerializer
| 18.39759 | 52 | 0.756385 | from rest_framework import viewsets
from .serializers import *
class UporabniDioViewSet(viewsets.ModelViewSet):
queryset = UporabniDio.objects.all()
serializer_class = UporabniDioSerializer
class SlikaViewSet(viewsets.ModelViewSet):
queryset = Slika.objects.all()
serializer_class = SlikaSerializer
class RodViewSet(viewsets.ModelViewSet):
queryset = Rod.objects.all()
serializer_class = RodSerializer
class SistematicarViewSet(viewsets.ModelViewSet):
queryset = Sistematicar.objects.all()
serializer_class = SistematicarSerializer
class BiljnaVrstaViewSet(viewsets.ModelViewSet):
queryset = BiljnaVrsta.objects.all()
serializer_class = BiljnaVrstaSerializer
class PorodicaViewSet(viewsets.ModelViewSet):
queryset = Porodica.objects.all()
serializer_class = PorodicaSerializer
class PodvrstaViewSet(viewsets.ModelViewSet):
queryset = Podvrsta.objects.all()
serializer_class = PodvrstaSerializer
class VarijetViewSet(viewsets.ModelViewSet):
queryset = Varijet.objects.all()
serializer_class = VarijetSerializer
| true | true |
1c3f115fb666122c3ba070c75129db276760345b | 4,123 | py | Python | app/update_scheduler/views.py | AndrewLester/schedule-updates | 37ea9df14f01f7b8e7850a883760d4a692724c83 | [
"MIT"
] | 6 | 2021-02-17T03:23:18.000Z | 2021-04-09T14:35:42.000Z | app/update_scheduler/views.py | AndrewLester/schedule-updates | 37ea9df14f01f7b8e7850a883760d4a692724c83 | [
"MIT"
] | 6 | 2021-03-10T04:04:40.000Z | 2021-12-17T08:13:45.000Z | app/update_scheduler/views.py | AndrewLester/update-scheduler | 37ea9df14f01f7b8e7850a883760d4a692724c83 | [
"MIT"
] | null | null | null | from datetime import date, datetime, timedelta
from flask.globals import current_app
from flask_login.utils import login_required
from isodate.duration import Duration
import pytz
from rq.job import Job
from rq.exceptions import NoSuchJobError
from app.update_scheduler.scheduler import schedule_update
from typing import NoReturn, Optional, Union
from app.update_scheduler.forms import UpdateForm
from app.utils import rest_endpoint
from app.exts import db
from flask.templating import render_template
from app.update_scheduler.models import Attachment, ScheduledJob, Update
from app.schoology.api import get_user_realms
from flask import jsonify, abort
from flask_login import current_user
from flask.blueprints import Blueprint
blueprint = Blueprint(
'update_scheduler',
__name__,
url_prefix='/scheduler',
template_folder='../templates',
static_folder='../bundle',
)
@blueprint.route('')
@login_required
def scheduler():
return render_template('scheduler.html')
@blueprint.route('/realms')
@login_required
def realms():
realms = get_user_realms(current_user) # type: ignore
return jsonify(realms)
@rest_endpoint(
blueprint=blueprint,
route='/updates',
model=Update,
form=UpdateForm,
methods={'GET', 'POST', 'PUT', 'DELETE'}
)
@login_required
def updates(form: UpdateForm) -> Union[Update, NoReturn]:
update = Update.query.get(form.id.data)
attachments = []
if len(form.attachments.data) > 0:
for attachment in form.attachments.data:
attachments.append(Attachment(
type=attachment['type'],
title=attachment['title'],
url=attachment['url'],
image=attachment['image'],
icon=attachment['icon'],
summary=attachment['summary']
))
if update is None:
update = Update(
realm_type=form.realm_type.data,
realm_id=form.realm_id.data,
body=form.body.data,
user_id=current_user.id
)
if attachments:
update.attachments = attachments
if form.job.scheduled_for.data or form.job.scheduled_in.data:
schedule_update(
current_app.redis_queue,
scheduled_formdata_to_time(
form.job.scheduled_for.data,
form.job.scheduled_in.data
),
update
)
else:
update.realm_type = form.realm_type.data
update.realm_id = form.realm_id.data
update.body = form.body.data
if attachments:
update.attachments = attachments
if update.job is not None:
try:
job = Job.fetch(update.job.id, connection=current_app.redis)
except NoSuchJobError:
db.session.delete(update.job)
else:
job.cancel()
if form.job.scheduled_for.data or form.job.scheduled_in.data:
schedule_update(
current_app.redis_queue,
scheduled_formdata_to_time(
form.job.scheduled_for.data,
form.job.scheduled_in.data
),
update
)
else:
update.job = None
return update
def scheduled_formdata_to_time(
scheduled_for: Optional[datetime],
scheduled_in: Optional[Union[timedelta, Duration]]
) -> Union[datetime, timedelta]:
"""
Aborts if neither option has a value. Only put this in view functions
when a return value is necessary.
"""
if scheduled_for is not None:
user_tz = pytz.timezone(current_user.timezone)
dt = user_tz.localize(scheduled_for)
if dt < pytz.utc.localize(datetime.utcnow()).astimezone(user_tz):
abort(400)
return dt
elif scheduled_in is not None:
tdelta = scheduled_in.tdelta if isinstance(scheduled_in, Duration) else scheduled_in
# If the timedelta refers to the past
if tdelta < timedelta():
abort(400)
return tdelta
abort(400) | 30.094891 | 92 | 0.634732 | from datetime import date, datetime, timedelta
from flask.globals import current_app
from flask_login.utils import login_required
from isodate.duration import Duration
import pytz
from rq.job import Job
from rq.exceptions import NoSuchJobError
from app.update_scheduler.scheduler import schedule_update
from typing import NoReturn, Optional, Union
from app.update_scheduler.forms import UpdateForm
from app.utils import rest_endpoint
from app.exts import db
from flask.templating import render_template
from app.update_scheduler.models import Attachment, ScheduledJob, Update
from app.schoology.api import get_user_realms
from flask import jsonify, abort
from flask_login import current_user
from flask.blueprints import Blueprint
blueprint = Blueprint(
'update_scheduler',
__name__,
url_prefix='/scheduler',
template_folder='../templates',
static_folder='../bundle',
)
@blueprint.route('')
@login_required
def scheduler():
return render_template('scheduler.html')
@blueprint.route('/realms')
@login_required
def realms():
realms = get_user_realms(current_user)
return jsonify(realms)
@rest_endpoint(
blueprint=blueprint,
route='/updates',
model=Update,
form=UpdateForm,
methods={'GET', 'POST', 'PUT', 'DELETE'}
)
@login_required
def updates(form: UpdateForm) -> Union[Update, NoReturn]:
update = Update.query.get(form.id.data)
attachments = []
if len(form.attachments.data) > 0:
for attachment in form.attachments.data:
attachments.append(Attachment(
type=attachment['type'],
title=attachment['title'],
url=attachment['url'],
image=attachment['image'],
icon=attachment['icon'],
summary=attachment['summary']
))
if update is None:
update = Update(
realm_type=form.realm_type.data,
realm_id=form.realm_id.data,
body=form.body.data,
user_id=current_user.id
)
if attachments:
update.attachments = attachments
if form.job.scheduled_for.data or form.job.scheduled_in.data:
schedule_update(
current_app.redis_queue,
scheduled_formdata_to_time(
form.job.scheduled_for.data,
form.job.scheduled_in.data
),
update
)
else:
update.realm_type = form.realm_type.data
update.realm_id = form.realm_id.data
update.body = form.body.data
if attachments:
update.attachments = attachments
if update.job is not None:
try:
job = Job.fetch(update.job.id, connection=current_app.redis)
except NoSuchJobError:
db.session.delete(update.job)
else:
job.cancel()
if form.job.scheduled_for.data or form.job.scheduled_in.data:
schedule_update(
current_app.redis_queue,
scheduled_formdata_to_time(
form.job.scheduled_for.data,
form.job.scheduled_in.data
),
update
)
else:
update.job = None
return update
def scheduled_formdata_to_time(
scheduled_for: Optional[datetime],
scheduled_in: Optional[Union[timedelta, Duration]]
) -> Union[datetime, timedelta]:
if scheduled_for is not None:
user_tz = pytz.timezone(current_user.timezone)
dt = user_tz.localize(scheduled_for)
if dt < pytz.utc.localize(datetime.utcnow()).astimezone(user_tz):
abort(400)
return dt
elif scheduled_in is not None:
tdelta = scheduled_in.tdelta if isinstance(scheduled_in, Duration) else scheduled_in
if tdelta < timedelta():
abort(400)
return tdelta
abort(400) | true | true |
1c3f117b7554b5c24faba0e6bbfc5cd2f0e1466d | 720 | py | Python | mysite/polls/models.py | yangyi-d/django_base | b59543143156c1a011d31026af6de05e79aa0ce3 | [
"MIT"
] | null | null | null | mysite/polls/models.py | yangyi-d/django_base | b59543143156c1a011d31026af6de05e79aa0ce3 | [
"MIT"
] | null | null | null | mysite/polls/models.py | yangyi-d/django_base | b59543143156c1a011d31026af6de05e79aa0ce3 | [
"MIT"
] | null | null | null | from django.db import models
import datetime
# Create your models here.
from django.utils import timezone
class Question(models.Model):
"""问题模型类"""
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('datepublished')
def was_published_recently(self):
now = timezone.now()
return now-datetime.timedelta(days=1)<=self.pub_date<=now
def __str__(self):
return self.question_text
class Choice(models.Model):
"""选项模型类"""
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 23.225806 | 68 | 0.706944 | from django.db import models
import datetime
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('datepublished')
def was_published_recently(self):
now = timezone.now()
return now-datetime.timedelta(days=1)<=self.pub_date<=now
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| true | true |
1c3f11e69fe04bfb2afc4ce32bfeb9113d316cc8 | 3,109 | py | Python | classes_base/Peca.py | lffloyd/TrabalhoIA1_Domino | b78a9cbc3ff043cedda8118741bc5fbc42ee7010 | [
"MIT"
] | null | null | null | classes_base/Peca.py | lffloyd/TrabalhoIA1_Domino | b78a9cbc3ff043cedda8118741bc5fbc42ee7010 | [
"MIT"
] | null | null | null | classes_base/Peca.py | lffloyd/TrabalhoIA1_Domino | b78a9cbc3ff043cedda8118741bc5fbc42ee7010 | [
"MIT"
] | null | null | null | # MIT License
#
# Copyright (c) 2018 Luiz Felipe de Melo (lffloyd), Vítor Costa (vitorhardoim), Renato Bastos (RenatoBastos33)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################################################
#Define uma Peça de dominó e operações básicas associadas.
#Escrito por: Luiz Felipe, Vítor Costa
class Peca():
#Construtor de classe define os valores para cada um dos lados da Peça.
def __init__(self, nEsq=None, nDir=None):
self.__nEsq = nEsq
self.__nDir = nDir
self.__ordem = -1
def __str__(self): return "(" + str(self.__nEsq) +"|"+ str(self.__nDir) + ")"
#Método de comparação entre Peças. Retorna se duas peças equivalem entre si de acordo com o método abaixo.
def __cmp__(self, other): return self.__eq__(other)
# Método de verificação de equivalência de Peças. Compara apenas se os números de duas Peças são iguais.
def __eq__(self, other):
# Checa se "other" é instância de Peça.
if isinstance(other, self.__class__):
if (self.__nEsq == other.esq()) and (self.__nDir == other.dir()): return True
if (self.__nEsq == other.dir()) and (self.__nDir == other.esq()): return True
return False
#Getter para val. esquerdo da Peça.
def esq(self): return self.__nEsq
# Getter para val. direito da Peça.
def dir(self): return self.__nDir
def pegaOrdem(self): return self.__ordem
def ordem(self, ordem): self.__ordem = ordem
#Retorna a soma dos valores dos dois lados da Peça.
def somatorio(self): return self.__nEsq + self.__nDir
#Vira a Peça, trocando os valores dos lados. Usada para posicionar a instância de forma diferente no tabuleiro/mesa.
def viraPeca(self):
aux = self.__nEsq
self.__nEsq = self.__nDir
self.__nDir = aux
return self
#Verifica se a instância de Peça pode ser encaixada numa dada posição de um modo ou outro (virando-a).
def ehJogavel(self, pos): return (self.__nEsq == pos) or (self.__nDir == pos)
| 43.788732 | 120 | 0.685751 | true | true | |
1c3f12aef0a7e4cc86449e49fc4fb21fe710fa91 | 1,579 | py | Python | get_largest_cc.py | uniooo/graph_tools | 5cbd5f69d2a7304225b1126bbf25431cdd5bf5bf | [
"MIT"
] | null | null | null | get_largest_cc.py | uniooo/graph_tools | 5cbd5f69d2a7304225b1126bbf25431cdd5bf5bf | [
"MIT"
] | null | null | null | get_largest_cc.py | uniooo/graph_tools | 5cbd5f69d2a7304225b1126bbf25431cdd5bf5bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
'''
Author: uniooo
Date: 2021-06-01 11:28:56
LastEditors: uniooo
LastEditTime: 2021-06-03 10:15:06
FilePath: /graph_tools/get_largest_cc.py
Description:
'''
import sys
from count_ccs import get_ccs
from collections import Counter
from check_edge_consecutive import GraphChecker
def get_largest_cc(filename):
cnt, n, cc_id = get_ccs(filename)
if cnt == 1:
print("Only 1 connected components\n")
return
result = Counter(cc_id[1:])
largest_id = max(result, key=result.get)
included_vertex = set()
included_vertex.add(largest_id)
for i in range(1,n+1):
if cc_id[i] == largest_id:
included_vertex.add(i)
ck = GraphChecker()
with open(filename, "r") as fin:
read_edges = lambda fin: (map(int, line.strip().split()) for line in fin)
edge_list = read_edges(fin)
included_edge_list = [(u,v) for u,v in edge_list if (u in included_vertex and v in included_vertex)]
ck.set_graph_by_edges(included_edge_list)
edge_list = ck.remapping_graph()
with open(filename+".largestCC", "w") as fout:
fout.write(str(len(included_vertex)) + " " + str(len(edge_list)) + "\n")
for a, b in edge_list:
fout.write(str(a) + " " + str(b) + "\n")
print("New graph file with largest CC is written to disk as " + filename + ".largestCC\n")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("./" + sys.argv[0] + " graph_file\n")
exit(0)
get_largest_cc(sys.argv[1]) | 31.58 | 108 | 0.632046 |
import sys
from count_ccs import get_ccs
from collections import Counter
from check_edge_consecutive import GraphChecker
def get_largest_cc(filename):
cnt, n, cc_id = get_ccs(filename)
if cnt == 1:
print("Only 1 connected components\n")
return
result = Counter(cc_id[1:])
largest_id = max(result, key=result.get)
included_vertex = set()
included_vertex.add(largest_id)
for i in range(1,n+1):
if cc_id[i] == largest_id:
included_vertex.add(i)
ck = GraphChecker()
with open(filename, "r") as fin:
read_edges = lambda fin: (map(int, line.strip().split()) for line in fin)
edge_list = read_edges(fin)
included_edge_list = [(u,v) for u,v in edge_list if (u in included_vertex and v in included_vertex)]
ck.set_graph_by_edges(included_edge_list)
edge_list = ck.remapping_graph()
with open(filename+".largestCC", "w") as fout:
fout.write(str(len(included_vertex)) + " " + str(len(edge_list)) + "\n")
for a, b in edge_list:
fout.write(str(a) + " " + str(b) + "\n")
print("New graph file with largest CC is written to disk as " + filename + ".largestCC\n")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("./" + sys.argv[0] + " graph_file\n")
exit(0)
get_largest_cc(sys.argv[1]) | true | true |
1c3f139daa73f91c5326ea382d5f0a2c6f80ede0 | 11,332 | py | Python | src/relstorage/adapters/mysql/locker.py | mamico/relstorage | 2df5fb721d75efad3395f34f4d6c7c34826bc56c | [
"ZPL-2.1"
] | null | null | null | src/relstorage/adapters/mysql/locker.py | mamico/relstorage | 2df5fb721d75efad3395f34f4d6c7c34826bc56c | [
"ZPL-2.1"
] | null | null | null | src/relstorage/adapters/mysql/locker.py | mamico/relstorage | 2df5fb721d75efad3395f34f4d6c7c34826bc56c | [
"ZPL-2.1"
] | null | null | null | ##############################################################################
#
# Copyright (c) 2009 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""
Locker implementations.
"""
from __future__ import absolute_import
from __future__ import print_function
from contextlib import contextmanager
from zope.interface import implementer
from ..interfaces import ILocker
from ..interfaces import UnableToAcquireCommitLockError
from ..interfaces import UnableToAcquirePackUndoLockError
from ..locker import AbstractLocker
class CommitLockQueryFailedError(UnableToAcquireCommitLockError):
pass
_SET_TIMEOUT_STMT = 'SET SESSION innodb_lock_wait_timeout = %s'
# DEFAULT is a literal, not a param, so cursor.execute(stmt, ('DEFAULT',))
# does not work.
_SET_TIMEOUT_DEFAULT_STMT = _SET_TIMEOUT_STMT % ('DEFAULT',)
@contextmanager
def lock_timeout(cursor, timeout, restore_to=None):
"""
ContextManager that sets the lock timeout to the given value,
and returns it to the DEFAULT when done.
If *timeout* is ``None``, makes no changes to the connection.
"""
if timeout is not None: # 0 is valid
# Min value of timeout is 1; a value less than that produces
# a warning but gets truncated to 1
timeout = timeout if timeout >= 1 else 1
cursor.execute(_SET_TIMEOUT_STMT, (timeout,))
try:
yield
finally:
if restore_to is None:
cursor.execute(_SET_TIMEOUT_DEFAULT_STMT)
else:
cursor.execute(_SET_TIMEOUT_STMT, (restore_to,))
else:
yield
@implementer(ILocker)
class MySQLLocker(AbstractLocker):
"""
MySQL locks.
.. rubric:: Commit and Object Locks
Two types of locks are used. The ordinary commit lock and the
object locks are standard InnoDB row-level locks; this brings the
benefits of being lightweight and automatically being released if
the transaction aborts or commits, plus instant deadlock
detection. Prior to MySQL 8.0, these don't support ``NOWAIT``
syntax, so we synthesize that by setting the session variable
`innodb_lock_wait_timeout
<https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_lock_wait_timeout>`_.
Note that this lock cannot be against the ``object_state`` or
``current_object`` tables: arbitrary rows in those tables may have
been locked by other transactions, and we risk deadlock.
Also note that by default, a lock timeout will only rollback the
current *statement*, not the whole transaction, as in most
databases (this doesn't apply to ``NOWAIT`` in MySQL 8); to
release any locks taken earlier, we must explicitly rollback the
transaction. Fortunately, a lock timeout only rolling back the
single statement is exactly what we want to implement ``NOWAIT``
on earlier databases. In contrast, a detected deadlock will
actually rollback the entire transaction.
The ``ensure_current`` argument is essentially ignored; the locks
taken out by ``lock_current_objects`` take care of that.
.. rubric:: Shared and Exclusive Locks Can Block Each Other On Unrelated Rows
We use two lock classes for object locks: shared locks for
readCurrent, and exclusive locks for modified objects.
MySQL 5.7 and 8 handle this weird, though. If two transactions are
at any level besides ``SERIALIZABLE``, and one locks the *odd*
rows ``FOR UPDATE`` the other one blocks trying to lock the *even*
rows ``FOR UPDATE`` *or* in shared mode, if they happened to use
queries like ``WHERE (zoid % 2) = 1``. This is surprising. (It's
not surprising in ``SERIALIZABLE``; MySQL's ``SERIALIZABLE`` is
quite pessimistic.)
This is because (quoting
https://dev.mysql.com/doc/refman/5.7/en/innodb-locks-set.html)
"``SELECT ... LOCK IN SHARE MODE`` sets shared next-key locks on
all index records the search encounters." While "``SELECT ... FOR
UPDATE`` sets an exclusive next-key lock on every record the
search encounters. However, only an index record lock is required
for statements that lock rows using a unique index to search for a
unique row. For index records the search encounters, ``SELECT ...
FOR UPDATE`` blocks other sessions from doing ``SELECT ... LOCK IN
SHARE MODE`` or from reading in certain transaction isolation
levels." The complex ``WHERE`` clause does range queries and
traversal of the index such that it winds up locking many
unexpected rows.
The good news is that the query we actually use for locking,
``SELECT zoid FROM ... WHERE zoid in (SELECT zoid from
temp_store)``, doesn't do a range scan. It first accessess the
``temp_store`` table and does a sort into a temporary table using
the index; then it accesses ``object_state`` or ``current_object``
using the ``eq_ref`` method and the PRIMARY key index in a nested
loop (sadly all MySQL joins are nested loops). This locks only the
actually required rows.
We should probably add some optimizer hints to make absolutely
sure of that.
.. rubric:: Pack Locks
The second type of lock, an advisory lock, is used for pack locks.
This lock uses the `GET_LOCK
<https://dev.mysql.com/doc/refman/5.7/en/locking-functions.html#function_get-lock>`_
and ``RELEASE_LOCK`` functions. These locks persist for the
duration of a session, and *must* be explicitly released. They do
*not* participate in deadlock detection.
Prior to MySQL 5.7.5, it is not possible to hold more than one
advisory lock in a single session. In the past we used advisory
locks for the commit lock, and that meant we had to use multiple
sessions (connections) to be able to hold both the commit lock and
the pack lock. Fortunately, that limitation has been lifted: we no
longer support older versions of MySQL, and we don't need multiple
advisory locks anyway.
"""
# The old MySQL 5.7 syntax is the default
_lock_share_clause = 'LOCK IN SHARE MODE'
_lock_share_clause_nowait = 'LOCK IN SHARE MODE'
def __init__(self, options, driver, batcher_factory, version_detector):
super(MySQLLocker, self).__init__(options, driver, batcher_factory)
assert self.supports_row_lock_nowait # Set by default in the class.
self.supports_row_lock_nowait = None
self.version_detector = version_detector
# No good preparing this, mysql can't take parameters in EXECUTE,
# they have to be user variables, which defeats most of the point
# (Although in this case, because it's a static value, maybe not;
# it could be set once and re-used.)
self.set_timeout_stmt = _SET_TIMEOUT_STMT
def on_store_opened(self, cursor, restart=False):
super(MySQLLocker, self).on_store_opened(cursor, restart=restart)
if restart:
return
if self.supports_row_lock_nowait is None:
self.supports_row_lock_nowait = self.version_detector.supports_nowait(cursor)
if self.supports_row_lock_nowait:
self._lock_share_clause = 'FOR SHARE'
self._lock_share_clause_nowait = 'FOR SHARE NOWAIT'
else:
assert self._lock_readCurrent_oids_for_share
self._lock_readCurrent_oids_for_share = self.__lock_readCurrent_nowait
def _on_store_opened_set_row_lock_timeout(self, cursor, restart=False):
self._set_row_lock_timeout(cursor, self.commit_lock_timeout)
def _set_row_lock_timeout(self, cursor, timeout):
# Min value of timeout is 1; a value less than that produces
# a warning.
timeout = timeout if timeout >= 1 else 1
cursor.execute(self.set_timeout_stmt, (timeout,))
# It's INCREDIBLY important to fetch a row after we execute the SET statement;
# otherwise, the binary drivers that use libmysqlclient tend to crash,
# usually with a 'malloc: freeing not allocated data' or 'malloc:
# corrupted data, written after free?' or something like that.
cursor.fetchone()
def __lock_readCurrent_nowait(self, cursor, current_oids, shared_locks_block):
# For MySQL 5.7, we emulate NOWAIT by setting the lock timeout
if shared_locks_block:
return AbstractLocker._lock_readCurrent_oids_for_share(self, cursor, current_oids, True)
with lock_timeout(cursor, 0, self.commit_lock_timeout):
return AbstractLocker._lock_readCurrent_oids_for_share(self, cursor, current_oids,
False)
def release_commit_lock(self, cursor):
"Auto-released by transaction end."
def _get_commit_lock_debug_info(self, cursor, was_failure=False):
cursor.execute('SELECT connection_id()')
conn_id = str(cursor.fetchone()[0])
try:
# MySQL 8
cursor.execute("""
SELECT *
FROM performance_schema.events_transactions_current AS parent
INNER JOIN performance_schema.data_locks AS child
INNER JOIN performance_schema.data_lock_waits dlw on (child.engine_lock_id
= dlw.blocking_engine_lock_id)
WHERE
parent.THREAD_ID = child.THREAD_ID
AND parent.EVENT_ID < child.EVENT_ID
AND (
child.EVENT_ID <= parent.END_EVENT_ID
OR parent.END_EVENT_ID IS NULL
)""")
return 'Connection: ' + conn_id + '\n' + self._rows_as_pretty_string(cursor)
except self.driver.driver_module.Error:
# MySQL 5, or no permissions
try:
cursor.execute("""
SELECT * from information_schema.innodb_locks l
INNER JOIN information_schema.INNODB_TRX x ON l.lock_trx_id = x.trx_id
""")
rows = self._rows_as_pretty_string(cursor)
except self.driver.driver_module.Error:
# MySQL 8, and we had no permissions.
return 'Connection: ' + conn_id
return 'Connection: ' + conn_id + '\n' + rows
def hold_pack_lock(self, cursor):
"""Try to acquire the pack lock.
Raise an exception if packing or undo is already in progress.
"""
stmt = "SELECT GET_LOCK(CONCAT(DATABASE(), '.pack'), 0)"
cursor.execute(stmt)
res = cursor.fetchone()[0]
if not res:
raise UnableToAcquirePackUndoLockError('A pack or undo operation is in progress')
def release_pack_lock(self, cursor):
"""Release the pack lock."""
stmt = "SELECT RELEASE_LOCK(CONCAT(DATABASE(), '.pack'))"
cursor.execute(stmt)
rows = cursor.fetchall() # stay in sync
assert rows
| 43.752896 | 103 | 0.677639 | true | true | |
1c3f150a285c70c43f3e81dc32a10b36249db1d5 | 1,442 | py | Python | ftrace/parsers/binder_transaction_buffer_release.py | bagobor/ftrace | a41bfff97447ff6503b80ffc60111cd7e53fed86 | [
"Apache-2.0"
] | 62 | 2016-05-29T15:20:15.000Z | 2022-03-11T11:40:48.000Z | ftrace/parsers/binder_transaction_buffer_release.py | bagobor/ftrace | a41bfff97447ff6503b80ffc60111cd7e53fed86 | [
"Apache-2.0"
] | 2 | 2017-12-12T09:37:40.000Z | 2018-05-09T10:29:05.000Z | ftrace/parsers/binder_transaction_buffer_release.py | bagobor/ftrace | a41bfff97447ff6503b80ffc60111cd7e53fed86 | [
"Apache-2.0"
] | 32 | 2016-08-01T08:33:22.000Z | 2021-11-03T02:18:38.000Z | import re
from ftrace.common import ParserError
from .register import register_parser
from .binder import parse_binder_cmd
from collections import namedtuple
TRACEPOINT = 'binder_transaction_buffer_release'
__all__ = [TRACEPOINT]
#binder_transaction_buffer_release: transaction=135918 data_size=28 offsets_size=0
BinderTransactionBufferReleaseBase = namedtuple(TRACEPOINT,
[
'transaction',
'data_size',
'offsets_size'
]
)
class BinderTransactionBufferRelease(BinderTransactionBufferReleaseBase):
__slots__ = ()
def __new__(cls, transaction, data_size, offsets_size):
return super(cls, BinderTransactionBufferRelease).__new__(
cls,
transaction=transaction,
data_size=data_size,
offsets_size=offsets_size
)
binder_transaction_buffer_release_pattern = re.compile(
r"""
transaction=(\d+)\s+
data_size=(\d+)\s+
offsets_size=(\d+)
""",
re.X|re.M
)
@register_parser
def binder_transaction_buffer_release(payload):
"""Parser for `binder_transaction_buffer_release`"""
try:
match = re.match(binder_transaction_buffer_release_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BinderTransactionBufferRelease(int(match.group(1)), int(match.group(2)), int(match.group(3)))
except Exception as e:
raise ParserError(e.message)
| 28.27451 | 112 | 0.701803 | import re
from ftrace.common import ParserError
from .register import register_parser
from .binder import parse_binder_cmd
from collections import namedtuple
TRACEPOINT = 'binder_transaction_buffer_release'
__all__ = [TRACEPOINT]
BinderTransactionBufferReleaseBase = namedtuple(TRACEPOINT,
[
'transaction',
'data_size',
'offsets_size'
]
)
class BinderTransactionBufferRelease(BinderTransactionBufferReleaseBase):
__slots__ = ()
def __new__(cls, transaction, data_size, offsets_size):
return super(cls, BinderTransactionBufferRelease).__new__(
cls,
transaction=transaction,
data_size=data_size,
offsets_size=offsets_size
)
binder_transaction_buffer_release_pattern = re.compile(
r"""
transaction=(\d+)\s+
data_size=(\d+)\s+
offsets_size=(\d+)
""",
re.X|re.M
)
@register_parser
def binder_transaction_buffer_release(payload):
try:
match = re.match(binder_transaction_buffer_release_pattern, payload)
if match:
match_group_dict = match.groupdict()
return BinderTransactionBufferRelease(int(match.group(1)), int(match.group(2)), int(match.group(3)))
except Exception as e:
raise ParserError(e.message)
| true | true |
1c3f1a2fe6124f3558400ab87736acd636988b9e | 7,649 | py | Python | simtools/simtel/simtel_runner_array.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | 5 | 2020-06-02T09:46:38.000Z | 2022-03-26T16:42:26.000Z | simtools/simtel/simtel_runner_array.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | 166 | 2020-04-24T10:22:16.000Z | 2022-03-31T12:51:02.000Z | simtools/simtel/simtel_runner_array.py | gammasim/gammasim-tools | 0b746254916f4c2e2a3fbd1854c565c3bc90d493 | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
from pathlib import Path
import simtools.io_handler as io
import simtools.util.general as gen
from simtools.util import names
from simtools.simtel.simtel_runner import SimtelRunner, InvalidOutputFile
__all__ = ['SimtelRunnerArray']
class SimtelRunnerArray(SimtelRunner):
'''
SimtelRunnerArray is the interface with sim_telarray to perform array simulations.
Configurable parameters:
simtelDataDirectory:
len: 1
default: null
unit: null
primary:
len: 1
unit: null
zenithAngle:
len: 1
unit: deg
default: 20 deg
azimuthAngle:
len: 1
unit: deg
default: 0 deg
Attributes
----------
label: str, optional
Instance label.
arrayModel: ArrayModel
Instance of the ArrayModel class.
config: namedtuple
Contains the configurable parameters (zenithAngle).
Methods
-------
getRunScript(self, test=False, inputFile=None, run=None)
Builds and returns the full path of the bash run script containing
the sim_telarray command.
run(test=False, force=False, inputFile=None, run=None)
Run sim_telarray. test=True will make it faster and force=True will remove existing files
and run again.
'''
def __init__(
self,
arrayModel,
label=None,
simtelSourcePath=None,
filesLocation=None,
configData=None,
configFile=None
):
'''
SimtelRunnerArray.
Parameters
----------
arrayModel: str
Instance of TelescopeModel class.
label: str, optional
Instance label. Important for output file naming.
simtelSourcePath: str (or Path), optional
Location of sim_telarray installation. If not given, it will be taken from the
config.yml file.
filesLocation: str (or Path), optional
Parent location of the output files created by this class. If not given, it will be
taken from the config.yml file.
configData: dict.
Dict containing the configurable parameters.
configFile: str or Path
Path of the yaml file containing the configurable parameters.
'''
self._logger = logging.getLogger(__name__)
self._logger.debug('Init SimtelRunnerArray')
super().__init__(
label=label,
simtelSourcePath=simtelSourcePath,
filesLocation=filesLocation
)
self.arrayModel = self._validateArrayModel(arrayModel)
self.label = label if label is not None else self.arrayModel.label
# File location
self._baseDirectory = io.getOutputDirectory(
self._filesLocation,
self.label,
'array'
)
self._baseDirectory.mkdir(parents=True, exist_ok=True)
# Loading configData
_configDataIn = gen.collectDataFromYamlOrDict(configFile, configData)
_parameterFile = io.getDataFile('parameters', 'simtel-runner-array_parameters.yml')
_parameters = gen.collectDataFromYamlOrDict(_parameterFile, None)
self.config = gen.validateConfigData(_configDataIn, _parameters)
self._loadSimtelDataDirectories()
def _loadSimtelDataDirectories(self):
'''
Create sim_telarray output directories for data, log and input.
If simtelDataDirectory is not given as a configurable parameter,
the standard directory of simtools output (simtools-output) will
be used. A sub directory simtel-data will be created and subdirectories for
log and data will be created inside it.
'''
if self.config.simtelDataDirectory is None:
# Default config value
simtelBaseDir = self._baseDirectory
else:
simtelBaseDir = Path(self.config.simtelDataDirectory)
simtelBaseDir = simtelBaseDir.joinpath('simtel-data')
simtelBaseDir = simtelBaseDir.joinpath(self.arrayModel.site)
simtelBaseDir = simtelBaseDir.joinpath(self.config.primary)
simtelBaseDir = simtelBaseDir.absolute()
self._simtelDataDir = simtelBaseDir.joinpath('data')
self._simtelDataDir.mkdir(parents=True, exist_ok=True)
self._simtelLogDir = simtelBaseDir.joinpath('log')
self._simtelLogDir.mkdir(parents=True, exist_ok=True)
def getLogFile(self, run):
''' Get full path of the simtel log file for a given run. '''
fileName = names.simtelLogFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelLogDir.joinpath(fileName)
def getHistogramFile(self, run):
''' Get full path of the simtel histogram file for a given run. '''
fileName = names.simtelHistogramFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelDataDir.joinpath(fileName)
def getOutputFile(self, run):
''' Get full path of the simtel output file for a given run. '''
fileName = names.simtelOutputFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelDataDir.joinpath(fileName)
def _shallRun(self, run=None):
''' Tells if simulations should be run again based on the existence of output files. '''
return not self.getOutputFile(run).exists()
def _makeRunCommand(self, inputFile, run=1):
''' Builds and returns the command to run simtel_array. '''
self._logFile = self.getLogFile(run)
histogramFile = self.getHistogramFile(run)
outputFile = self.getOutputFile(run)
# Array
command = str(self._simtelSourcePath.joinpath('sim_telarray/bin/sim_telarray'))
command += ' -c {}'.format(self.arrayModel.getConfigFile())
command += ' -I{}'.format(self.arrayModel.getConfigDirectory())
command += super()._configOption('telescope_theta', self.config.zenithAngle)
command += super()._configOption('telescope_phi', self.config.azimuthAngle)
command += super()._configOption('power_law', '2.5')
command += super()._configOption('histogram_file', histogramFile)
command += super()._configOption('output_file', outputFile)
command += super()._configOption('random_state', 'auto')
command += super()._configOption('show', 'all')
command += ' ' + str(inputFile)
command += ' > ' + str(self._logFile) + ' 2>&1'
return command
# END of makeRunCommand
def _checkRunResult(self, run):
# Checking run
if not self.getOutputFile(run).exists():
msg = 'sim_telarray output file does not exist.'
self._logger.error(msg)
raise InvalidOutputFile(msg)
else:
self._logger.debug('Everything looks fine with the sim_telarray output file.')
| 36.251185 | 97 | 0.6359 | import logging
import os
from pathlib import Path
import simtools.io_handler as io
import simtools.util.general as gen
from simtools.util import names
from simtools.simtel.simtel_runner import SimtelRunner, InvalidOutputFile
__all__ = ['SimtelRunnerArray']
class SimtelRunnerArray(SimtelRunner):
def __init__(
self,
arrayModel,
label=None,
simtelSourcePath=None,
filesLocation=None,
configData=None,
configFile=None
):
self._logger = logging.getLogger(__name__)
self._logger.debug('Init SimtelRunnerArray')
super().__init__(
label=label,
simtelSourcePath=simtelSourcePath,
filesLocation=filesLocation
)
self.arrayModel = self._validateArrayModel(arrayModel)
self.label = label if label is not None else self.arrayModel.label
self._baseDirectory = io.getOutputDirectory(
self._filesLocation,
self.label,
'array'
)
self._baseDirectory.mkdir(parents=True, exist_ok=True)
_configDataIn = gen.collectDataFromYamlOrDict(configFile, configData)
_parameterFile = io.getDataFile('parameters', 'simtel-runner-array_parameters.yml')
_parameters = gen.collectDataFromYamlOrDict(_parameterFile, None)
self.config = gen.validateConfigData(_configDataIn, _parameters)
self._loadSimtelDataDirectories()
def _loadSimtelDataDirectories(self):
if self.config.simtelDataDirectory is None:
simtelBaseDir = self._baseDirectory
else:
simtelBaseDir = Path(self.config.simtelDataDirectory)
simtelBaseDir = simtelBaseDir.joinpath('simtel-data')
simtelBaseDir = simtelBaseDir.joinpath(self.arrayModel.site)
simtelBaseDir = simtelBaseDir.joinpath(self.config.primary)
simtelBaseDir = simtelBaseDir.absolute()
self._simtelDataDir = simtelBaseDir.joinpath('data')
self._simtelDataDir.mkdir(parents=True, exist_ok=True)
self._simtelLogDir = simtelBaseDir.joinpath('log')
self._simtelLogDir.mkdir(parents=True, exist_ok=True)
def getLogFile(self, run):
fileName = names.simtelLogFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelLogDir.joinpath(fileName)
def getHistogramFile(self, run):
fileName = names.simtelHistogramFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelDataDir.joinpath(fileName)
def getOutputFile(self, run):
fileName = names.simtelOutputFileName(
run=run,
primary=self.config.primary,
arrayName=self.arrayModel.layoutName,
site=self.arrayModel.site,
zenith=self.config.zenithAngle,
azimuth=self.config.azimuthAngle,
label=self.label
)
return self._simtelDataDir.joinpath(fileName)
def _shallRun(self, run=None):
return not self.getOutputFile(run).exists()
def _makeRunCommand(self, inputFile, run=1):
self._logFile = self.getLogFile(run)
histogramFile = self.getHistogramFile(run)
outputFile = self.getOutputFile(run)
command = str(self._simtelSourcePath.joinpath('sim_telarray/bin/sim_telarray'))
command += ' -c {}'.format(self.arrayModel.getConfigFile())
command += ' -I{}'.format(self.arrayModel.getConfigDirectory())
command += super()._configOption('telescope_theta', self.config.zenithAngle)
command += super()._configOption('telescope_phi', self.config.azimuthAngle)
command += super()._configOption('power_law', '2.5')
command += super()._configOption('histogram_file', histogramFile)
command += super()._configOption('output_file', outputFile)
command += super()._configOption('random_state', 'auto')
command += super()._configOption('show', 'all')
command += ' ' + str(inputFile)
command += ' > ' + str(self._logFile) + ' 2>&1'
return command
def _checkRunResult(self, run):
if not self.getOutputFile(run).exists():
msg = 'sim_telarray output file does not exist.'
self._logger.error(msg)
raise InvalidOutputFile(msg)
else:
self._logger.debug('Everything looks fine with the sim_telarray output file.')
| true | true |
1c3f1a31f04942811db0866efc38f49a7509460b | 13,577 | py | Python | optibot/casadi.py | AunSiro/optibot | 186c9556473071b583f1ed677e2e1a647aeb0513 | [
"MIT"
] | 1 | 2021-06-01T15:58:45.000Z | 2021-06-01T15:58:45.000Z | optibot/casadi.py | AunSiro/optibot | 186c9556473071b583f1ed677e2e1a647aeb0513 | [
"MIT"
] | null | null | null | optibot/casadi.py | AunSiro/optibot | 186c9556473071b583f1ed677e2e1a647aeb0513 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon May 31 12:52:24 2021
@author: Siro Moreno
sympy2casadi function original author: Joris Gillis
https://gist.github.com/jgillis/80bb594a6c8fcf55891d1d88b12b68b8
"""
import casadi as cas
from casadi import sin, cos
def get_str(x):
return x.__str__()
def list2casadi(vallist):
"""convert a list into a casadi array of the apropiate shape"""
return cas.horzcat(*vallist).T
def sympy2casadi(sympy_expr, sympy_var, casadi_var):
"""
Transforms a sympy expression into a casadi function.
Parameters
----------
sympy_expr : sympy expression
sympy_var : list of sympy symbols
casadi_var : list of casady symbols
Returns
-------
Casadi Function
"""
# assert casadi_var.is_vector()
# if casadi_var.shape[1] > 1:
# casadi_var = casadi_var.T
# casadi_var = cas.vertsplit(casadi_var)
from sympy.utilities.lambdify import lambdify
mapping = {
"ImmutableDenseMatrix": cas.blockcat,
"MutableDenseMatrix": cas.blockcat,
"Abs": cas.fabs,
}
f = lambdify(sympy_var, sympy_expr, modules=[mapping, cas])
return f(*casadi_var)
def symlist2cas(symlist):
caslist = []
for symbol in symlist:
caslist.append(cas.MX.sym(symbol.__str__()))
return caslist
def unpack(arr):
arr = cas.horzcat(arr)
if arr.shape[-1] == 1:
arr = arr.T
dim = arr.shape[-1]
res = [arr[:, ii] for ii in range(dim)]
return res
def rhs_to_casadi_function(RHS, q_vars, u_vars=None, verbose=False):
"""
Converts an array of symbolic expressions RHS(x, u, params) to a casadi
function.
Designed to work with systems so that
x' = RHS(x, u, params)
Parameters
----------
RHS : Sympy matrix
Vertical symbolic matrix RHS(x, x', u, lambdas, params)
q_vars : TYPE
DESCRIPTION.
u_vars : TYPE, optional
DESCRIPTION. The default is None.
Returns
-------
TYPE
DESCRIPTION.
"""
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
RHS = list(RHS)
RHS = [standard_notation(diff_to_symb_expr(expr)) for expr in RHS]
arguments = find_arguments(RHS, q_vars, u_vars, verbose=verbose)
q_args, v_args, _, u_args_found, params, lambda_args = arguments
x_args = q_args + v_args
funcs = v_args + RHS
all_vars = x_args + u_args_found + params
msg = "Function Arguments:\n"
msg += f"\tx: {x_args}\n"
msg += f"\tu: {u_args_found}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_x_args = cas.MX.sym("x", len(x_args))
cas_u_args = cas.MX.sym("u", len(u_args_found))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_x_args[ii] for ii in range(len(x_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args_found))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in funcs:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"F",
[cas_x_args, cas_u_args, cas_params],
[cas_funcs,],
["x", "u", "params"],
["x_dot"],
)
def implicit_dynamic_x_to_casadi_function(D, x_vars, u_vars=None, verbose=False):
"""
Converts an array D(x, x', u, lambdas, params) of symbolic expressions to a
Casadi function.
Symbols in the expressions not found in x_vars, x_dot_vars or u_vars
will be considered parameters.
Parameters
----------
D : Sympy matrix
Vertical symbolic matrix D(x, x', u, lambdas, params)
x_vars : int or list of Sympy dynamic symbols
list of x symbols to look for in the expressions.
If int, they will be generated as 'x_i' for i in [0, x_vars]
u_vars : list of Sympy dynamic symbols
List of u symbols to look for. The default is None.
Returns
-------
Casadi Function
Casadi Function of x, x', u, lambdas, params.
"""
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
from sympy.physics.mechanics import dynamicsymbols
D = list(D)
D = [standard_notation(diff_to_symb_expr(expr)) for expr in D]
if type(x_vars) == int:
x_vars = list(dynamicsymbols("x_0:" + str(x_vars)))
elif type(x_vars) != list:
raise TypeError("x_vars must be int or list of symbols")
arguments = find_arguments(
D, x_vars, u_vars, separate_lambdas=True, verbose=verbose
)
x_args, x_dot_args, _, u_args, params, lambda_args = arguments
all_vars = x_args + x_dot_args + u_args + lambda_args + params
msg = "Function Arguments:\n"
msg += f"\tx: {x_args}\n"
msg += f"\tx_dot: {x_dot_args}\n"
msg += f"\tu: {u_args}\n"
msg += f"\tlambdas: {lambda_args}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_x_args = cas.MX.sym("x", len(x_args))
cas_x_dot_args = cas.MX.sym("x", len(x_dot_args))
cas_u_args = cas.MX.sym("u", len(u_args))
cas_lambda_args = cas.MX.sym("u", len(lambda_args))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_x_args[ii] for ii in range(len(x_args))]
cas_all_vars += [cas_x_dot_args[ii] for ii in range(len(x_dot_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args))]
cas_all_vars += [cas_lambda_args[ii] for ii in range(len(lambda_args))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in D:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"M",
[cas_x_args, cas_x_dot_args, cas_u_args, cas_lambda_args, cas_params],
[cas_funcs,],
["x", "x_dot", "u", "lambdas", "params"],
["residue"],
)
def implicit_dynamic_q_to_casadi_function(D, q_vars, u_vars=None, verbose=False):
"""
Converts an array D(q, q', q'', u, lambdas, params) of symbolic expressions to a
Casadi function.
Symbols in the expressions not found in x_vars, x_dot_vars or u_vars
will be considered parameters.
Parameters
----------
D : Sympy matrix
Vertical symbolic matrix D(q, q', q'', u, lambdas, params)
q_vars : int or list of Sympy dynamic symbols
list of q symbols to look for in the expressions.
If int, they will be generated as 'q_i' for i in [0, q_vars]
u_vars : list of Sympy dynamic symbols
List of u symbols to look for. The default is None.
Returns
-------
Casadi Function
Casadi Function of q, q', q'', u, lambdas, params.
"""
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
from sympy.physics.mechanics import dynamicsymbols
D = list(D)
D = [standard_notation(diff_to_symb_expr(expr)) for expr in D]
if type(q_vars) == int:
q_vars = list(dynamicsymbols("q_0:" + str(q_vars)))
elif type(q_vars) != list:
raise TypeError("q_vars must be int or list of symbols")
arguments = find_arguments(
D, q_vars, u_vars, separate_as=True, separate_lambdas=True, verbose=verbose
)
q_args, v_args, a_args, u_args, params, lambda_args = arguments
all_vars = q_args + v_args + a_args + u_args + lambda_args + params
msg = "Function Arguments:\n"
msg += f"\tq: {q_args}\n"
msg += f"\tv: {v_args}\n"
msg += f"\ta: {a_args}\n"
msg += f"\tu: {u_args}\n"
msg += f"\tlambda: {lambda_args}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_q_args = cas.MX.sym("q", len(q_args))
cas_v_args = cas.MX.sym("v", len(v_args))
cas_a_args = cas.MX.sym("a", len(a_args))
cas_u_args = cas.MX.sym("u", len(u_args))
cas_lambda_args = cas.MX.sym("lambda", len(lambda_args))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_q_args[ii] for ii in range(len(q_args))]
cas_all_vars += [cas_v_args[ii] for ii in range(len(v_args))]
cas_all_vars += [cas_a_args[ii] for ii in range(len(a_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args))]
cas_all_vars += [cas_lambda_args[ii] for ii in range(len(lambda_args))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in D:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"F",
[cas_q_args, cas_v_args, cas_a_args, cas_u_args, cas_lambda_args, cas_params],
[cas_funcs,],
["q", "v", "a", "u", "lambda", "params"],
["Residue"],
)
def restriction2casadi(F_scheme, F, n_vars, n_u, n_params, n_scheme_params=0):
"""
Converts a restriction funtion F to a casadi function that can be
more efficiently used in casadi
Parameters
----------
F_scheme : Function of the form F(x, x_n, u, u_n, F, dt, p, [sch_p])
Restriction function that each step has to be equal to zero,
argument sch_p is only mandatory if n_scheme_params != 0
F : Function of the form F(x, u, p)
Physics function that describes the system
n_vars : int
Number of q variables or coordinates in the problem, x variables
will be then twice this amount as they include velocities.
n_u : int
Number of u variables or actions in the problem
n_params : int
Number of parameters in the problem
n_scheme_params : int, default 0
Number of scheme parameters, not passed to F(x, u, p)
Returns
-------
Casadi Function
A casadi function of the form F(x, x_n, u, u_n, dt, p, sch_p)
Restriction function that each step has to be equal to zero
"""
from inspect import signature
if n_scheme_params != 0 and len(signature(F_scheme).parameters) == 7:
raise ValueError(
"Detected a value of n_scheme_params larger than zero in a function F_scheme that does not contain sch_p argument"
)
x = cas.SX.sym("x", 2 * n_vars).T
x_n = cas.SX.sym("x_n", 2 * n_vars).T
u = cas.SX.sym("u", n_u).T
u_n = cas.SX.sym("u_n", n_u).T
p = cas.SX.sym("p", n_params)
dt = cas.SX.sym("dt")
if n_scheme_params == 0:
result = F_scheme(x, x_n, u, u_n, F, dt, p)
return cas.Function(
"Restriction",
[x, x_n, u, u_n, dt, p],
[result,],
["x", "x_n", "u", "u_n", "dt", "params"],
["residue"],
)
else:
sch_p = cas.SX.sym("sch_p", n_scheme_params)
result = F_scheme(x, x_n, u, u_n, F, dt, p, sch_p)
return cas.Function(
"Restriction",
[x, x_n, u, u_n, dt, p, sch_p],
[result,],
["x", "x_n", "u", "u_n", "dt", "params", "scheme_params"],
["residue"],
)
def accelrestriction2casadi(F_scheme, n_vars, n_scheme_params=0):
"""
Converts a restriction funtion F to a casadi function that can be
more efficiently used in casadi
Parameters
----------
F_scheme : Function of the form F(x, x_n, a, a_n, dt, scheme_params)
Restriction function that each step has to be equal to zero
n_vars : int
Number of q variables or coordinates in the problem, x variables
will be then twice this amount as they include velocities.
n_scheme_params : int, default 0
Number of scheme parameters, not involved in the dynamics
Returns
-------
Casadi Function
A casadi function of the form F(x, x_n, a, a_n, dt, scheme_params)
Restriction function that each step has to be equal to zero
"""
x = cas.SX.sym("x", 2 * n_vars).T
x_n = cas.SX.sym("x_n", 2 * n_vars).T
a = cas.SX.sym("a", n_vars).T
a_n = cas.SX.sym("a_n", n_vars).T
dt = cas.SX.sym("dt")
sch_p = cas.SX.sym("sch_p", n_scheme_params)
result = F_scheme(x, x_n, a, a_n, dt, sch_p)
return cas.Function(
"Restriction",
[x, x_n, a, a_n, dt, sch_p],
[result,],
["x", "x_n", "a", "a_n", "dt", "scheme_params"],
["residue"],
)
# --- Double Pendulum ---
def doub_pend_F(x, u, params=[1, 1, 1, 1, 1]):
q_0, q_1, v_0, v_1 = unpack(x)
u_0, u_1 = unpack(u)
m_1, l_1, l_0, m_0, g, m_1, l_1, l_0, m_0, g = params
result = [
v_0,
v_1,
]
result.append(
(
l_0
* (l_1 * m_1 * (g * sin(q_1) - l_0 * v_0 ** 2 * sin(q_0 - q_1)) - u_1)
* cos(q_0 - q_1)
+ l_1
* (
-l_0
* (
g * m_0 * sin(q_0)
+ g * m_1 * sin(q_0)
+ l_1 * m_1 * v_1 ** 2 * sin(q_0 - q_1)
)
+ u_0
)
)
/ (l_0 ** 2 * l_1 * (m_0 - m_1 * cos(q_0 - q_1) ** 2 + m_1))
)
result.append(
(
-l_0
* (m_0 + m_1)
* (l_1 * m_1 * (g * sin(q_1) - l_0 * v_0 ** 2 * sin(q_0 - q_1)) - u_1)
+ l_1
* m_1
* (
l_0
* (
g * m_0 * sin(q_0)
+ g * m_1 * sin(q_0)
+ l_1 * m_1 * v_1 ** 2 * sin(q_0 - q_1)
)
- u_0
)
* cos(q_0 - q_1)
)
/ (l_0 * l_1 ** 2 * m_1 * (m_0 - m_1 * cos(q_0 - q_1) ** 2 + m_1))
)
return cas.horzcat(*result)
| 31.945882 | 126 | 0.592399 |
import casadi as cas
from casadi import sin, cos
def get_str(x):
return x.__str__()
def list2casadi(vallist):
return cas.horzcat(*vallist).T
def sympy2casadi(sympy_expr, sympy_var, casadi_var):
from sympy.utilities.lambdify import lambdify
mapping = {
"ImmutableDenseMatrix": cas.blockcat,
"MutableDenseMatrix": cas.blockcat,
"Abs": cas.fabs,
}
f = lambdify(sympy_var, sympy_expr, modules=[mapping, cas])
return f(*casadi_var)
def symlist2cas(symlist):
caslist = []
for symbol in symlist:
caslist.append(cas.MX.sym(symbol.__str__()))
return caslist
def unpack(arr):
arr = cas.horzcat(arr)
if arr.shape[-1] == 1:
arr = arr.T
dim = arr.shape[-1]
res = [arr[:, ii] for ii in range(dim)]
return res
def rhs_to_casadi_function(RHS, q_vars, u_vars=None, verbose=False):
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
RHS = list(RHS)
RHS = [standard_notation(diff_to_symb_expr(expr)) for expr in RHS]
arguments = find_arguments(RHS, q_vars, u_vars, verbose=verbose)
q_args, v_args, _, u_args_found, params, lambda_args = arguments
x_args = q_args + v_args
funcs = v_args + RHS
all_vars = x_args + u_args_found + params
msg = "Function Arguments:\n"
msg += f"\tx: {x_args}\n"
msg += f"\tu: {u_args_found}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_x_args = cas.MX.sym("x", len(x_args))
cas_u_args = cas.MX.sym("u", len(u_args_found))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_x_args[ii] for ii in range(len(x_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args_found))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in funcs:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"F",
[cas_x_args, cas_u_args, cas_params],
[cas_funcs,],
["x", "u", "params"],
["x_dot"],
)
def implicit_dynamic_x_to_casadi_function(D, x_vars, u_vars=None, verbose=False):
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
from sympy.physics.mechanics import dynamicsymbols
D = list(D)
D = [standard_notation(diff_to_symb_expr(expr)) for expr in D]
if type(x_vars) == int:
x_vars = list(dynamicsymbols("x_0:" + str(x_vars)))
elif type(x_vars) != list:
raise TypeError("x_vars must be int or list of symbols")
arguments = find_arguments(
D, x_vars, u_vars, separate_lambdas=True, verbose=verbose
)
x_args, x_dot_args, _, u_args, params, lambda_args = arguments
all_vars = x_args + x_dot_args + u_args + lambda_args + params
msg = "Function Arguments:\n"
msg += f"\tx: {x_args}\n"
msg += f"\tx_dot: {x_dot_args}\n"
msg += f"\tu: {u_args}\n"
msg += f"\tlambdas: {lambda_args}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_x_args = cas.MX.sym("x", len(x_args))
cas_x_dot_args = cas.MX.sym("x", len(x_dot_args))
cas_u_args = cas.MX.sym("u", len(u_args))
cas_lambda_args = cas.MX.sym("u", len(lambda_args))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_x_args[ii] for ii in range(len(x_args))]
cas_all_vars += [cas_x_dot_args[ii] for ii in range(len(x_dot_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args))]
cas_all_vars += [cas_lambda_args[ii] for ii in range(len(lambda_args))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in D:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"M",
[cas_x_args, cas_x_dot_args, cas_u_args, cas_lambda_args, cas_params],
[cas_funcs,],
["x", "x_dot", "u", "lambdas", "params"],
["residue"],
)
def implicit_dynamic_q_to_casadi_function(D, q_vars, u_vars=None, verbose=False):
from .symbolic import find_arguments, standard_notation, diff_to_symb_expr
from sympy.physics.mechanics import dynamicsymbols
D = list(D)
D = [standard_notation(diff_to_symb_expr(expr)) for expr in D]
if type(q_vars) == int:
q_vars = list(dynamicsymbols("q_0:" + str(q_vars)))
elif type(q_vars) != list:
raise TypeError("q_vars must be int or list of symbols")
arguments = find_arguments(
D, q_vars, u_vars, separate_as=True, separate_lambdas=True, verbose=verbose
)
q_args, v_args, a_args, u_args, params, lambda_args = arguments
all_vars = q_args + v_args + a_args + u_args + lambda_args + params
msg = "Function Arguments:\n"
msg += f"\tq: {q_args}\n"
msg += f"\tv: {v_args}\n"
msg += f"\ta: {a_args}\n"
msg += f"\tu: {u_args}\n"
msg += f"\tlambda: {lambda_args}\n"
msg += f"\tparams: {params}\n"
print(msg)
cas_q_args = cas.MX.sym("q", len(q_args))
cas_v_args = cas.MX.sym("v", len(v_args))
cas_a_args = cas.MX.sym("a", len(a_args))
cas_u_args = cas.MX.sym("u", len(u_args))
cas_lambda_args = cas.MX.sym("lambda", len(lambda_args))
cas_params = cas.MX.sym("p", len(params))
cas_all_vars = [cas_q_args[ii] for ii in range(len(q_args))]
cas_all_vars += [cas_v_args[ii] for ii in range(len(v_args))]
cas_all_vars += [cas_a_args[ii] for ii in range(len(a_args))]
cas_all_vars += [cas_u_args[ii] for ii in range(len(u_args))]
cas_all_vars += [cas_lambda_args[ii] for ii in range(len(lambda_args))]
cas_all_vars += [cas_params[ii] for ii in range(len(params))]
cas_funcs = []
for function in D:
cas_funcs.append(sympy2casadi(function, all_vars, cas_all_vars))
cas_funcs = cas.horzcat(*cas_funcs)
return cas.Function(
"F",
[cas_q_args, cas_v_args, cas_a_args, cas_u_args, cas_lambda_args, cas_params],
[cas_funcs,],
["q", "v", "a", "u", "lambda", "params"],
["Residue"],
)
def restriction2casadi(F_scheme, F, n_vars, n_u, n_params, n_scheme_params=0):
from inspect import signature
if n_scheme_params != 0 and len(signature(F_scheme).parameters) == 7:
raise ValueError(
"Detected a value of n_scheme_params larger than zero in a function F_scheme that does not contain sch_p argument"
)
x = cas.SX.sym("x", 2 * n_vars).T
x_n = cas.SX.sym("x_n", 2 * n_vars).T
u = cas.SX.sym("u", n_u).T
u_n = cas.SX.sym("u_n", n_u).T
p = cas.SX.sym("p", n_params)
dt = cas.SX.sym("dt")
if n_scheme_params == 0:
result = F_scheme(x, x_n, u, u_n, F, dt, p)
return cas.Function(
"Restriction",
[x, x_n, u, u_n, dt, p],
[result,],
["x", "x_n", "u", "u_n", "dt", "params"],
["residue"],
)
else:
sch_p = cas.SX.sym("sch_p", n_scheme_params)
result = F_scheme(x, x_n, u, u_n, F, dt, p, sch_p)
return cas.Function(
"Restriction",
[x, x_n, u, u_n, dt, p, sch_p],
[result,],
["x", "x_n", "u", "u_n", "dt", "params", "scheme_params"],
["residue"],
)
def accelrestriction2casadi(F_scheme, n_vars, n_scheme_params=0):
x = cas.SX.sym("x", 2 * n_vars).T
x_n = cas.SX.sym("x_n", 2 * n_vars).T
a = cas.SX.sym("a", n_vars).T
a_n = cas.SX.sym("a_n", n_vars).T
dt = cas.SX.sym("dt")
sch_p = cas.SX.sym("sch_p", n_scheme_params)
result = F_scheme(x, x_n, a, a_n, dt, sch_p)
return cas.Function(
"Restriction",
[x, x_n, a, a_n, dt, sch_p],
[result,],
["x", "x_n", "a", "a_n", "dt", "scheme_params"],
["residue"],
)
def doub_pend_F(x, u, params=[1, 1, 1, 1, 1]):
q_0, q_1, v_0, v_1 = unpack(x)
u_0, u_1 = unpack(u)
m_1, l_1, l_0, m_0, g, m_1, l_1, l_0, m_0, g = params
result = [
v_0,
v_1,
]
result.append(
(
l_0
* (l_1 * m_1 * (g * sin(q_1) - l_0 * v_0 ** 2 * sin(q_0 - q_1)) - u_1)
* cos(q_0 - q_1)
+ l_1
* (
-l_0
* (
g * m_0 * sin(q_0)
+ g * m_1 * sin(q_0)
+ l_1 * m_1 * v_1 ** 2 * sin(q_0 - q_1)
)
+ u_0
)
)
/ (l_0 ** 2 * l_1 * (m_0 - m_1 * cos(q_0 - q_1) ** 2 + m_1))
)
result.append(
(
-l_0
* (m_0 + m_1)
* (l_1 * m_1 * (g * sin(q_1) - l_0 * v_0 ** 2 * sin(q_0 - q_1)) - u_1)
+ l_1
* m_1
* (
l_0
* (
g * m_0 * sin(q_0)
+ g * m_1 * sin(q_0)
+ l_1 * m_1 * v_1 ** 2 * sin(q_0 - q_1)
)
- u_0
)
* cos(q_0 - q_1)
)
/ (l_0 * l_1 ** 2 * m_1 * (m_0 - m_1 * cos(q_0 - q_1) ** 2 + m_1))
)
return cas.horzcat(*result)
| true | true |
1c3f1ac5e6b0cb7fbf8d419b4d3a085f34c38922 | 6,867 | py | Python | pokecord/dev.py | qenu/pokecord-red | 35007e83297e1bf7430aa318a7d58745e2c1943c | [
"MIT"
] | 9 | 2020-06-06T20:17:01.000Z | 2021-10-10T18:28:54.000Z | pokecord/dev.py | flaree/pokecord-red | 6810b45f3a2608c2726664b5d3d96b90c401e7b1 | [
"MIT"
] | 12 | 2020-07-09T00:32:49.000Z | 2021-11-09T20:21:02.000Z | pokecord/dev.py | qenu/pokecord-red | 35007e83297e1bf7430aa318a7d58745e2c1943c | [
"MIT"
] | 12 | 2020-07-24T15:44:15.000Z | 2022-03-14T10:14:19.000Z | import json
import pprint
from typing import Optional
import discord
import tabulate
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import *
from .abc import MixinMeta
from .pokemixin import poke
from .statements import *
_ = Translator("Pokecord", __file__)
class Dev(MixinMeta):
"""Pokecord Development Commands"""
@poke.group(hidden=True)
@commands.is_owner()
async def dev(self, ctx):
"""Pokecord Development Commands"""
@dev.command(name="spawn")
async def dev_spawn(self, ctx, *pokemon):
"""Spawn a pokemon by name or random"""
pokemon = " ".join(pokemon).strip().lower()
if pokemon is "":
await self.spawn_pokemon(ctx.channel)
return
else:
for i, pokemondata in enumerate(self.pokemondata):
name = (
pokemondata.get("alias").lower()
if pokemondata.get("alias")
else pokemondata["name"]["english"].lower()
)
if name == pokemon:
await self.spawn_pokemon(ctx.channel, pokemon=self.pokemondata[i])
return
await ctx.send("No pokemon found.")
async def get_pokemon(self, ctx, user: discord.Member, pokeid: int) -> list:
"""Returns pokemons from user list if exists"""
if pokeid <= 0:
return await ctx.send("The ID must be greater than 0!")
async with ctx.typing():
result = await self.cursor.fetch_all(query=SELECT_POKEMON, values={"user_id": user.id})
pokemons = [None]
for data in result:
pokemons.append([json.loads(data[0]), data[1]])
if not pokemons:
return await ctx.send("You don't have any pokémon, trainer!")
if pokeid >= len(pokemons):
return await ctx.send("There's no pokemon at that slot.")
return pokemons[pokeid]
@dev.command(name="ivs")
async def dev_ivs(
self,
ctx,
user: Optional[discord.Member],
pokeid: int,
hp: int,
attack: int,
defence: int,
spatk: int,
spdef: int,
speed: int,
):
"""Manually set a pokemons IVs"""
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["ivs"] = {
"HP": hp,
"Attack": attack,
"Defence": defence,
"Sp. Atk": spatk,
"Sp. Def": spdef,
"Speed": speed,
}
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="stats")
async def dev_stats(
self,
ctx,
user: Optional[discord.Member],
pokeid: int,
hp: int,
attack: int,
defence: int,
spatk: int,
spdef: int,
speed: int,
):
"""Manually set a pokemons stats"""
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["stats"] = {
"HP": hp,
"Attack": attack,
"Defence": defence,
"Sp. Atk": spatk,
"Sp. Def": spdef,
"Speed": speed,
}
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="level")
async def dev_lvl(self, ctx, user: Optional[discord.Member], pokeid: int, lvl: int):
"""Manually set a pokemons level"""
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["level"] = lvl
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="reveal")
async def dev_reveal(self, ctx, user: Optional[discord.Member], pokeid: int):
"""Shows raw info for an owned pokemon"""
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
await ctx.send(content=pprint.pformat(pokemon[0]))
@dev.command(name="strip")
async def dev_strip(self, ctx, user: discord.Member, id: int):
"""Forcably removes a pokemone from user"""
if id <= 0:
return await ctx.send("The ID must be greater than 0!")
async with ctx.typing():
result = await self.cursor.fetch_all(query=SELECT_POKEMON, values={"user_id": user.id})
pokemons = [None]
for data in result:
pokemons.append([json.loads(data[0]), data[1]])
if not pokemons:
return await ctx.send(f"{user.display_name} don't have any pokémon!")
if id >= len(pokemons):
return await ctx.send("There's no pokemon at that slot.")
pokemon = pokemons[id]
msg = ""
userconf = await self.user_is_global(user)
pokeid = await userconf.pokeid()
if id < pokeid:
msg += _(
"\nTheir default pokemon may have changed. I have tried to account for this change."
)
await userconf.pokeid.set(pokeid - 1)
elif id == pokeid:
msg += _(
"\nYou have released their selected pokemon. I have reset their selected pokemon to their first pokemon."
)
await userconf.pokeid.set(1)
if len(pokemons) == 2: # it was their last pokemon, resets starter
await userconf.has_starter.set(False)
msg = _(
f"\n{user.display_name} has no pokemon left. I have granted them another chance to pick a starter."
)
await self.cursor.execute(
query="DELETE FROM users where message_id = :message_id",
values={"message_id": pokemon[1]},
)
name = self.get_name(pokemon[0]["name"], user)
await ctx.send(
_(f"{user.display_name}'s {name} has been freed.{msg}").format(name=name, msg=msg)
)
| 33.661765 | 121 | 0.541139 | import json
import pprint
from typing import Optional
import discord
import tabulate
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import *
from .abc import MixinMeta
from .pokemixin import poke
from .statements import *
_ = Translator("Pokecord", __file__)
class Dev(MixinMeta):
@poke.group(hidden=True)
@commands.is_owner()
async def dev(self, ctx):
@dev.command(name="spawn")
async def dev_spawn(self, ctx, *pokemon):
pokemon = " ".join(pokemon).strip().lower()
if pokemon is "":
await self.spawn_pokemon(ctx.channel)
return
else:
for i, pokemondata in enumerate(self.pokemondata):
name = (
pokemondata.get("alias").lower()
if pokemondata.get("alias")
else pokemondata["name"]["english"].lower()
)
if name == pokemon:
await self.spawn_pokemon(ctx.channel, pokemon=self.pokemondata[i])
return
await ctx.send("No pokemon found.")
async def get_pokemon(self, ctx, user: discord.Member, pokeid: int) -> list:
if pokeid <= 0:
return await ctx.send("The ID must be greater than 0!")
async with ctx.typing():
result = await self.cursor.fetch_all(query=SELECT_POKEMON, values={"user_id": user.id})
pokemons = [None]
for data in result:
pokemons.append([json.loads(data[0]), data[1]])
if not pokemons:
return await ctx.send("You don't have any pokémon, trainer!")
if pokeid >= len(pokemons):
return await ctx.send("There's no pokemon at that slot.")
return pokemons[pokeid]
@dev.command(name="ivs")
async def dev_ivs(
self,
ctx,
user: Optional[discord.Member],
pokeid: int,
hp: int,
attack: int,
defence: int,
spatk: int,
spdef: int,
speed: int,
):
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["ivs"] = {
"HP": hp,
"Attack": attack,
"Defence": defence,
"Sp. Atk": spatk,
"Sp. Def": spdef,
"Speed": speed,
}
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="stats")
async def dev_stats(
self,
ctx,
user: Optional[discord.Member],
pokeid: int,
hp: int,
attack: int,
defence: int,
spatk: int,
spdef: int,
speed: int,
):
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["stats"] = {
"HP": hp,
"Attack": attack,
"Defence": defence,
"Sp. Atk": spatk,
"Sp. Def": spdef,
"Speed": speed,
}
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="level")
async def dev_lvl(self, ctx, user: Optional[discord.Member], pokeid: int, lvl: int):
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
pokemon[0]["level"] = lvl
await self.cursor.execute(
query=UPDATE_POKEMON,
values={
"user_id": user.id,
"message_id": pokemon[1],
"pokemon": json.dumps(pokemon[0]),
},
)
await ctx.tick()
@dev.command(name="reveal")
async def dev_reveal(self, ctx, user: Optional[discord.Member], pokeid: int):
if user is None:
user = ctx.author
pokemon = await self.get_pokemon(ctx, user, pokeid)
if not isinstance(pokemon, list):
return
await ctx.send(content=pprint.pformat(pokemon[0]))
@dev.command(name="strip")
async def dev_strip(self, ctx, user: discord.Member, id: int):
if id <= 0:
return await ctx.send("The ID must be greater than 0!")
async with ctx.typing():
result = await self.cursor.fetch_all(query=SELECT_POKEMON, values={"user_id": user.id})
pokemons = [None]
for data in result:
pokemons.append([json.loads(data[0]), data[1]])
if not pokemons:
return await ctx.send(f"{user.display_name} don't have any pokémon!")
if id >= len(pokemons):
return await ctx.send("There's no pokemon at that slot.")
pokemon = pokemons[id]
msg = ""
userconf = await self.user_is_global(user)
pokeid = await userconf.pokeid()
if id < pokeid:
msg += _(
"\nTheir default pokemon may have changed. I have tried to account for this change."
)
await userconf.pokeid.set(pokeid - 1)
elif id == pokeid:
msg += _(
"\nYou have released their selected pokemon. I have reset their selected pokemon to their first pokemon."
)
await userconf.pokeid.set(1)
if len(pokemons) == 2:
await userconf.has_starter.set(False)
msg = _(
f"\n{user.display_name} has no pokemon left. I have granted them another chance to pick a starter."
)
await self.cursor.execute(
query="DELETE FROM users where message_id = :message_id",
values={"message_id": pokemon[1]},
)
name = self.get_name(pokemon[0]["name"], user)
await ctx.send(
_(f"{user.display_name}'s {name} has been freed.{msg}").format(name=name, msg=msg)
)
| true | true |
1c3f1c778320fa7c201b58bb02189d6858253a62 | 609 | py | Python | spacy_ann/types.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | spacy_ann/types.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | spacy_ann/types.py | StanciuMarius/spacy-ann-linker | d889a15b877c153269bc3068c8c4ed32773b182a | [
"MIT"
] | null | null | null | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from pydantic import BaseModel
class AliasCandidate(BaseModel):
"""A data class representing a candidate alias
that a NER mention may be linked to.
"""
alias: str
similarity: float
class KnowledgeBaseCandidate(BaseModel):
entity: str
context_similarity: float
prior_probability: float
type_label: str
index_vs_kb_type = {
0: 'UNK',
1: 'ORG',
2: 'GPE',
3: 'PERSON',
100: 'UNK'
}
kb_type_vs_index = {value: key for key, value in index_vs_kb_type.items()} | 21 | 74 | 0.686371 |
from pydantic import BaseModel
class AliasCandidate(BaseModel):
alias: str
similarity: float
class KnowledgeBaseCandidate(BaseModel):
entity: str
context_similarity: float
prior_probability: float
type_label: str
index_vs_kb_type = {
0: 'UNK',
1: 'ORG',
2: 'GPE',
3: 'PERSON',
100: 'UNK'
}
kb_type_vs_index = {value: key for key, value in index_vs_kb_type.items()} | true | true |
1c3f1c80099f18839d7e33b327db7ad92b8d4137 | 3,908 | py | Python | tests/bugs/core_1512_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1512_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | tests/bugs/core_1512_test.py | reevespaul/firebird-qa | 98f16f425aa9ab8ee63b86172f959d63a2d76f21 | [
"MIT"
] | null | null | null | #coding:utf-8
#
# id: bugs.core_1512
# title: Connection lost running script
# decription:
# tracker_id: CORE-1512
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """
-- Confirmed crash on WI-V2.1.7.18553 for: CREATE TABLE FHO_OS(...)
CREATE DOMAIN DM_COD AS
NUMERIC(4,0);
CREATE DOMAIN DM_COD2 AS
NUMERIC(8,0);
CREATE DOMAIN DM_DES AS
VARCHAR(80)
COLLATE PT_PT;
CREATE DOMAIN DM_FONE AS
VARCHAR(20)
COLLATE PT_PT;
CREATE DOMAIN DM_ID AS
NUMERIC(4,0);
CREATE DOMAIN DM_ID2 AS
NUMERIC(8,0);
CREATE DOMAIN DM_IMG AS
BLOB SUB_TYPE 0 SEGMENT SIZE 4096;
CREATE DOMAIN DM_IND AS
CHAR(1)
COLLATE PT_PT;
CREATE DOMAIN DM_IND2 AS
CHAR(2)
COLLATE PT_PT;
CREATE DOMAIN DM_NM AS
VARCHAR(80)
COLLATE PT_PT;
CREATE DOMAIN DM_PWS AS
VARCHAR(10)
COLLATE PT_PT;
CREATE DOMAIN DM_TP AS
CHAR(1)
COLLATE PT_PT;
CREATE DOMAIN DM_TXT AS
BLOB SUB_TYPE 1 SEGMENT SIZE 4096;
CREATE TABLE FHO_ATIV_TEC (
COD_USUARIO DM_COD NOT NULL,
DT_INICIO TIMESTAMP NOT NULL,
DT_TERMINO TIMESTAMP,
COD_ATIVIDADE DM_COD2 NOT NULL,
ID_OS DM_ID2
);
CREATE TABLE FHO_OS (
ID_OS DM_ID2 NOT NULL,
DT_INICIO TIMESTAMP NOT NULL,
DT_TERMINO TIMESTAMP,
COD_FICHA DM_COD2,
COD_USUARIO DM_COD NOT NULL,
COD_ATIVIDADE DM_COD2 NOT NULL,
COD_PROJETO DM_COD2,
TXT_DESCRICAO DM_TXT,
IND_PENDENTE DM_IND NOT NULL,
IND_CANCELADO DM_IND NOT NULL,
ID_OS_ORIGEM DM_ID2,
COD_USUARIO_ENVIOU DM_COD,
NRO_PRIORIDADE INTEGER,
PERC_FATURAR NUMERIC(5,2),
TXT_DESCRICAO_TECNICA DM_TXT,
DT_PREVISAO_TERMINO DATE,
NM_CONTATO DM_NM,
IND_EXIBE_RELEASE DM_IND,
HRS_ORCAMENTO NUMERIC(5,2),
IND_STATUS COMPUTED BY (CASE WHEN (FHO_OS.IND_CANCELADO <> 'N') THEN
'CANCELADA'
WHEN (SELECT FIRST 1 (CASE WHEN T.DT_TERMINO IS NULL THEN 1 END) FROM FHO_ATIV_TEC T
WHERE (T.COD_USUARIO = FHO_OS.COD_USUARIO) AND (T.ID_OS = FHO_OS.ID_OS) ORDER BY T.DT_INICIO DESC) IS NOT NULL THEN
'EM USO'
WHEN (FHO_OS.DT_TERMINO IS NULL) THEN
'ABERTA'
WHEN (FHO_OS.IND_PENDENTE = 'S') THEN
'PENDENTE'
ELSE
'FECHADA'
END),
COD_TP_OS DM_COD,
HRS_CONTRATO NUMERIC(5,2),
HRS_PREVISAO_TERMINO NUMERIC(5,2),
PERC_HORA_MAQUINA NUMERIC(5,2)
);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.execute()
| 30.771654 | 185 | 0.487462 |
import pytest
from firebird.qa import db_factory, isql_act, Action
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(page_size=4096, charset='ISO8859_1', sql_dialect=3, init=init_script_1)
test_script_1 = """
-- Confirmed crash on WI-V2.1.7.18553 for: CREATE TABLE FHO_OS(...)
CREATE DOMAIN DM_COD AS
NUMERIC(4,0);
CREATE DOMAIN DM_COD2 AS
NUMERIC(8,0);
CREATE DOMAIN DM_DES AS
VARCHAR(80)
COLLATE PT_PT;
CREATE DOMAIN DM_FONE AS
VARCHAR(20)
COLLATE PT_PT;
CREATE DOMAIN DM_ID AS
NUMERIC(4,0);
CREATE DOMAIN DM_ID2 AS
NUMERIC(8,0);
CREATE DOMAIN DM_IMG AS
BLOB SUB_TYPE 0 SEGMENT SIZE 4096;
CREATE DOMAIN DM_IND AS
CHAR(1)
COLLATE PT_PT;
CREATE DOMAIN DM_IND2 AS
CHAR(2)
COLLATE PT_PT;
CREATE DOMAIN DM_NM AS
VARCHAR(80)
COLLATE PT_PT;
CREATE DOMAIN DM_PWS AS
VARCHAR(10)
COLLATE PT_PT;
CREATE DOMAIN DM_TP AS
CHAR(1)
COLLATE PT_PT;
CREATE DOMAIN DM_TXT AS
BLOB SUB_TYPE 1 SEGMENT SIZE 4096;
CREATE TABLE FHO_ATIV_TEC (
COD_USUARIO DM_COD NOT NULL,
DT_INICIO TIMESTAMP NOT NULL,
DT_TERMINO TIMESTAMP,
COD_ATIVIDADE DM_COD2 NOT NULL,
ID_OS DM_ID2
);
CREATE TABLE FHO_OS (
ID_OS DM_ID2 NOT NULL,
DT_INICIO TIMESTAMP NOT NULL,
DT_TERMINO TIMESTAMP,
COD_FICHA DM_COD2,
COD_USUARIO DM_COD NOT NULL,
COD_ATIVIDADE DM_COD2 NOT NULL,
COD_PROJETO DM_COD2,
TXT_DESCRICAO DM_TXT,
IND_PENDENTE DM_IND NOT NULL,
IND_CANCELADO DM_IND NOT NULL,
ID_OS_ORIGEM DM_ID2,
COD_USUARIO_ENVIOU DM_COD,
NRO_PRIORIDADE INTEGER,
PERC_FATURAR NUMERIC(5,2),
TXT_DESCRICAO_TECNICA DM_TXT,
DT_PREVISAO_TERMINO DATE,
NM_CONTATO DM_NM,
IND_EXIBE_RELEASE DM_IND,
HRS_ORCAMENTO NUMERIC(5,2),
IND_STATUS COMPUTED BY (CASE WHEN (FHO_OS.IND_CANCELADO <> 'N') THEN
'CANCELADA'
WHEN (SELECT FIRST 1 (CASE WHEN T.DT_TERMINO IS NULL THEN 1 END) FROM FHO_ATIV_TEC T
WHERE (T.COD_USUARIO = FHO_OS.COD_USUARIO) AND (T.ID_OS = FHO_OS.ID_OS) ORDER BY T.DT_INICIO DESC) IS NOT NULL THEN
'EM USO'
WHEN (FHO_OS.DT_TERMINO IS NULL) THEN
'ABERTA'
WHEN (FHO_OS.IND_PENDENTE = 'S') THEN
'PENDENTE'
ELSE
'FECHADA'
END),
COD_TP_OS DM_COD,
HRS_CONTRATO NUMERIC(5,2),
HRS_PREVISAO_TERMINO NUMERIC(5,2),
PERC_HORA_MAQUINA NUMERIC(5,2)
);
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.execute()
| true | true |
1c3f1fed977056deaa2fd347e047dfdb1294474b | 901 | py | Python | language/mentionmemory/utils/custom_types.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 1,199 | 2018-10-16T01:30:18.000Z | 2022-03-31T21:05:24.000Z | language/mentionmemory/utils/custom_types.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 116 | 2018-10-18T03:31:46.000Z | 2022-03-24T13:40:50.000Z | language/mentionmemory/utils/custom_types.py | greck2908/language | 61fa7260ac7d690d11ef72ca863e45a37c0bdc80 | [
"Apache-2.0"
] | 303 | 2018-10-22T12:35:12.000Z | 2022-03-27T17:38:17.000Z | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains custom type definitions."""
from typing import Any, Callable, Dict, Iterable
import jax.numpy as jnp
Array = jnp.ndarray
PRNGKey = jnp.ndarray
Dtype = Any
Shape = Iterable[int]
InitType = Callable[[PRNGKey, Shape, Dtype], Array]
MetricGroups = Dict[str, Dict[str, Array]]
| 34.653846 | 74 | 0.755827 |
from typing import Any, Callable, Dict, Iterable
import jax.numpy as jnp
Array = jnp.ndarray
PRNGKey = jnp.ndarray
Dtype = Any
Shape = Iterable[int]
InitType = Callable[[PRNGKey, Shape, Dtype], Array]
MetricGroups = Dict[str, Dict[str, Array]]
| true | true |
1c3f2005445b5e5e44f4e6b24656f8c6abadab1b | 2,913 | py | Python | elo_system/elo_recolection_scripts/getTemporalelo.py | rafaOrtega14/tennisStats | 4f4f92532f6437a24e6c51b8aa5ac106b5d25102 | [
"MIT"
] | null | null | null | elo_system/elo_recolection_scripts/getTemporalelo.py | rafaOrtega14/tennisStats | 4f4f92532f6437a24e6c51b8aa5ac106b5d25102 | [
"MIT"
] | null | null | null | elo_system/elo_recolection_scripts/getTemporalelo.py | rafaOrtega14/tennisStats | 4f4f92532f6437a24e6c51b8aa5ac106b5d25102 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import multiprocessing
pd.options.mode.chained_assignment = None
games=pd.read_csv("TrainsetGrass.csv",low_memory=False)
players=pd.read_csv("eloCourt.csv",low_memory=False)
def find_eloplayer(ID):
hard=[]
clay=[]
grass=[]
pos=934959345
for j in range(len(players['ID_player'])):
if ID==players['ID_player'][j]:
eloh=players['hard'][j]
eloc=players['clay'][j]
elog=players['grass'][j]
pos=j
break
if pos==934959345:
hard=1500
clay=1500
grass=1500
pos=addPlayer(ID)
else:
hard=eloh
clay=eloc
grass=elog
master={
'hard': hard,
'clay': clay,
'grass': grass,
'pos': pos
}
return master
def addPlayer(ID):
players.loc[-1]=[ID+5,ID,1500,1500,1500,1500]
players.index = players.index + 1
return len(players['ID_player'])
def expected(A, B):
return 1 / (1 + 10 ** ((B - A) / 400))
def elo(old, exp, score, k=32):
return old + k * (score - exp)
if __name__ == "__main__":
for z in range(len(games['ID1'])):
print(str(z)+" de : "+str(len(games['ID1'])))
elo_actualwin=find_eloplayer(games['ID1'][z])
elo_actuallose=find_eloplayer(games['ID2'][z])
posicionwin=elo_actualwin['pos']
posicionloser=elo_actuallose['pos']
if games['COURT'][z]=='Hard' or games['COURT'][z]=='I.hard':
hardwin=elo(elo_actualwin['hard'],expected(elo_actualwin['hard'],elo_actuallose['hard']), 1, k=32)
hardlose=elo(elo_actuallose['hard'],expected(elo_actuallose['hard'],elo_actualwin['hard']),0, k=32)
players.ix[posicionwin,'hard']=hardwin
players.ix[posicionloser,'hard']=hardlose
games.ix[z,'eloWinner']=hardwin
games.ix[z,'eloLoser']=hardlose
if games['COURT'][z]=='Clay':
claywin=elo(elo_actualwin['clay'],expected(elo_actualwin['clay'],elo_actuallose['clay']), 1, k=32)
claylose=elo(elo_actuallose['clay'],expected(elo_actuallose['clay'],elo_actualwin['clay']),0, k=32)
players.ix[posicionwin,'clay']=claywin
players.ix[posicionloser,'clay']=claylose
games.ix[z,'eloWinner']=claywin
games.ix[z,'eloLoser']=claylose
if games['COURT'][z]=='Grass':
grasswin=elo(float(elo_actualwin['grass']),expected(float(elo_actualwin['grass']),float(elo_actuallose['grass'])), 1, k=64)
grasslose=elo(float(elo_actuallose['grass']),expected(float(elo_actuallose['grass']),float(elo_actualwin['grass'])),0, k=64)
players.ix[posicionwin,'grass']=grasswin
players.ix[posicionloser,'grass']=grasslose
games.ix[z,'eloWinner']=grasswin
games.ix[z,'eloLoser']=grasslose
games.to_csv('TrainsetGrassV2.csv',index=False) | 37.831169 | 136 | 0.604188 | import pandas as pd
import numpy as np
import multiprocessing
pd.options.mode.chained_assignment = None
games=pd.read_csv("TrainsetGrass.csv",low_memory=False)
players=pd.read_csv("eloCourt.csv",low_memory=False)
def find_eloplayer(ID):
hard=[]
clay=[]
grass=[]
pos=934959345
for j in range(len(players['ID_player'])):
if ID==players['ID_player'][j]:
eloh=players['hard'][j]
eloc=players['clay'][j]
elog=players['grass'][j]
pos=j
break
if pos==934959345:
hard=1500
clay=1500
grass=1500
pos=addPlayer(ID)
else:
hard=eloh
clay=eloc
grass=elog
master={
'hard': hard,
'clay': clay,
'grass': grass,
'pos': pos
}
return master
def addPlayer(ID):
players.loc[-1]=[ID+5,ID,1500,1500,1500,1500]
players.index = players.index + 1
return len(players['ID_player'])
def expected(A, B):
return 1 / (1 + 10 ** ((B - A) / 400))
def elo(old, exp, score, k=32):
return old + k * (score - exp)
if __name__ == "__main__":
for z in range(len(games['ID1'])):
print(str(z)+" de : "+str(len(games['ID1'])))
elo_actualwin=find_eloplayer(games['ID1'][z])
elo_actuallose=find_eloplayer(games['ID2'][z])
posicionwin=elo_actualwin['pos']
posicionloser=elo_actuallose['pos']
if games['COURT'][z]=='Hard' or games['COURT'][z]=='I.hard':
hardwin=elo(elo_actualwin['hard'],expected(elo_actualwin['hard'],elo_actuallose['hard']), 1, k=32)
hardlose=elo(elo_actuallose['hard'],expected(elo_actuallose['hard'],elo_actualwin['hard']),0, k=32)
players.ix[posicionwin,'hard']=hardwin
players.ix[posicionloser,'hard']=hardlose
games.ix[z,'eloWinner']=hardwin
games.ix[z,'eloLoser']=hardlose
if games['COURT'][z]=='Clay':
claywin=elo(elo_actualwin['clay'],expected(elo_actualwin['clay'],elo_actuallose['clay']), 1, k=32)
claylose=elo(elo_actuallose['clay'],expected(elo_actuallose['clay'],elo_actualwin['clay']),0, k=32)
players.ix[posicionwin,'clay']=claywin
players.ix[posicionloser,'clay']=claylose
games.ix[z,'eloWinner']=claywin
games.ix[z,'eloLoser']=claylose
if games['COURT'][z]=='Grass':
grasswin=elo(float(elo_actualwin['grass']),expected(float(elo_actualwin['grass']),float(elo_actuallose['grass'])), 1, k=64)
grasslose=elo(float(elo_actuallose['grass']),expected(float(elo_actuallose['grass']),float(elo_actualwin['grass'])),0, k=64)
players.ix[posicionwin,'grass']=grasswin
players.ix[posicionloser,'grass']=grasslose
games.ix[z,'eloWinner']=grasswin
games.ix[z,'eloLoser']=grasslose
games.to_csv('TrainsetGrassV2.csv',index=False) | true | true |
1c3f21c6980082d2b5b98180066cf9ba8b94eb50 | 156 | py | Python | utils/runtime_mode.py | omiderfanmanesh/dengue-infections-prediction | 6b4e4aa4af6f6e2cc581fd7828634bbfdc446340 | [
"Apache-2.0"
] | null | null | null | utils/runtime_mode.py | omiderfanmanesh/dengue-infections-prediction | 6b4e4aa4af6f6e2cc581fd7828634bbfdc446340 | [
"Apache-2.0"
] | null | null | null | utils/runtime_mode.py | omiderfanmanesh/dengue-infections-prediction | 6b4e4aa4af6f6e2cc581fd7828634bbfdc446340 | [
"Apache-2.0"
] | 1 | 2021-06-05T10:05:44.000Z | 2021-06-05T10:05:44.000Z | # Copyright (c) 2021, Omid Erfanmanesh, All rights reserved.
class RuntimeMode:
TRAIN = 0
TUNING = 1
CROSS_VAL = 2
FEATURE_IMPORTANCE = 3
| 19.5 | 61 | 0.666667 |
class RuntimeMode:
TRAIN = 0
TUNING = 1
CROSS_VAL = 2
FEATURE_IMPORTANCE = 3
| true | true |
1c3f21e25d49d260b961e83632b06c4e38d57eec | 42,191 | py | Python | pykotor/common/stream.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-02-21T15:17:28.000Z | 2022-02-21T15:17:28.000Z | pykotor/common/stream.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | 1 | 2022-03-12T16:06:23.000Z | 2022-03-12T16:06:23.000Z | pykotor/common/stream.py | NickHugi/PyKotor | cab1089f8a8a135861bef45340203718d39f5e1f | [
"MIT"
] | null | null | null | """
This module holds classes relating to read and write operations.
"""
from __future__ import annotations
import io
import struct
from abc import ABC, abstractmethod
from typing import BinaryIO, Union, TextIO, List, overload, Optional
from pykotor.common.geometry import Vector3, Vector4, Vector2
from pykotor.common.language import LocalizedString
def _endian_char(big) -> str:
"""
Returns the character that represents either big endian or small endian in struct unpack.
Args:
big: True if big endian.
Returns:
Character representing either big or small endian.
"""
return '>' if big else '<'
class ArrayHead:
def __init__(self, array_offset: int = 0, array_length: int = 0):
self.length: int = array_length
self.offset: int = array_offset
class BinaryReader:
"""
Used for easy reading of binary files.
"""
def __init__(self, stream: BinaryIO, offset: int = 0):
self._stream: BinaryIO = stream
self._offset: int = offset
self.auto_close: bool = True
self._stream.seek(offset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.auto_close: self.close()
@classmethod
def from_file(cls, path: str, offset: int = 0) -> BinaryReader:
"""
Returns a new BinaryReader with a stream established to the specified path.
Args:
path: Path of the file to open.
offset: Number of bytes into the stream to consider as position 0.
Returns:
A new BinaryReader instance.
"""
stream = open(path, 'rb')
return BinaryReader(stream, offset)
@classmethod
def from_bytes(cls, data: bytes, offset: int = 0) -> BinaryReader:
"""
Returns a new BinaryReader with a stream established to the bytes stored in memory.
Args:
data: The bytes of data.
offset: Number of bytes into the stream to consider as position 0.
Returns:
A new BinaryReader instance.
"""
stream = io.BytesIO(data)
return BinaryReader(stream, offset)
@classmethod
def from_auto(cls, source: Optional[str, bytes, bytearray, BinaryReader], offset: int = 0):
if isinstance(source, str): # is path
reader = BinaryReader.from_file(source, offset)
elif isinstance(source, bytes) or isinstance(source, bytearray): # is binary data
reader = BinaryReader.from_bytes(source, offset)
elif isinstance(source, BinaryReader):
reader = source
reader._offset = offset
else:
raise NotImplementedError("Must specify a path, bytes object or an existing BinaryReader instance.")
return reader
@staticmethod
def load_file(path: str) -> bytes:
"""
Returns bytes of a file at from specified path.
Args:
path: The path of the file.
Returns:
The bytes of the file.
"""
with open(path, 'rb') as file:
return file.read()
def offset(self) -> int:
return self._offset
def set_offset(self, offset: int) -> None:
original = self._offset
self.seek(self.position() + offset)
self._offset = offset
def size(self) -> int:
"""
Returns the total number of bytes in the stream.
Returns:
The total file size.
"""
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size
def remaining(self) -> int:
"""
Returns the remaining number of bytes in the stream.
Returns:
The total file size.
"""
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size - pos
def close(self) -> None:
"""
Closes the stream.
"""
self._stream.close()
def skip(self, length) -> None:
"""
Skips ahead in the stream the specified number of bytes.
Args:
length: How many bytes to skip.
"""
self._stream.read(length)
def position(self) -> int:
"""
Returns the byte offset into the stream.
Returns:
The byte offset.
"""
return self._stream.tell() - self._offset
def seek(self, position) -> None:
"""
Moves the stream pointer to the byte offset.
Args:
position: The byte index into stream.
"""
self._stream.seek(position + self._offset)
def read_all(self) -> bytes:
length = self.size() - self._offset
self._stream.seek(self._offset)
return self._stream.read(length)
def read_uint8(self, *, big: bool = False) -> int:
"""
Reads an unsigned 8-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'B', self._stream.read(1))[0]
def read_int8(self, *, big: bool = False) -> int:
"""
Reads an signed 8-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'b', self._stream.read(1))[0]
def read_uint16(self, *, big: bool = False) -> int:
"""
Reads an unsigned 16-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'H', self._stream.read(2))[0]
def read_int16(self, *, big: bool = False) -> int:
"""
Reads an signed 16-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'h', self._stream.read(2))[0]
def read_uint32(self, *, max_neg1: bool = False, big: bool = False) -> int:
"""
Reads an unsigned 32-bit integer from the stream.
If max_is_neg1 flag is set to true and the bytes read off the stream are equal to 0xFFFFFFFF then the method
will return a value of -1 instead of 4294967295.
Args:
max_neg1: Return -1 when the value of the stream equals 0xFFFFFFFF.
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
unpacked = struct.unpack(_endian_char(big) + "I", self._stream.read(4))[0]
if unpacked == 4294967295 and max_neg1:
unpacked = -1
return unpacked
def read_int32(self, *, big: bool = False) -> int:
"""
Reads an signed 32-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'i', self._stream.read(4))[0]
def read_uint64(self, *, big: bool = False) -> int:
"""
Reads an unsigned 64-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'Q', self._stream.read(8))[0]
def read_int64(self, *, big: bool = False) -> int:
"""
Reads an signed 64-bit integer from the stream.
Args:
big: Read int bytes as big endian.
Returns:
An integer from the stream.
"""
return struct.unpack(_endian_char(big) + 'q', self._stream.read(8))[0]
def read_single(self, *, big: bool = False) -> int:
"""
Reads an 32-bit floating point number from the stream.
Args:
big: Read float bytes as big endian.
Returns:
An float from the stream.
"""
return struct.unpack(_endian_char(big) + 'f', self._stream.read(4))[0]
def read_double(self, *, big: bool = False) -> int:
"""
Reads an 64-bit floating point number from the stream.
Args:
big: Read float bytes as big endian.
Returns:
An float from the stream.
"""
return struct.unpack(_endian_char(big) + 'd', self._stream.read(8))[0]
def read_vector2(self, *, big: bool = False) -> Vector2:
"""
Reads a two 32-bit floating point numbers from the stream.
Args:
big: Read bytes as big endian.
Returns:
A new Vector2 instance using floats read from the stream.
"""
x, y = self.read_single(big=big), self.read_single(big=big)
return Vector2(x, y)
def read_vector3(self, *, big: bool = False) -> Vector3:
"""
Reads a three 32-bit floating point numbers from the stream.
Args:
big: Read bytes as big endian.
Returns:
A new Vector3 instance using floats read from the stream.
"""
x, y, z = self.read_single(big=big), self.read_single(big=big), self.read_single(big=big)
return Vector3(x, y, z)
def read_vector4(self, *, big: bool = False) -> Vector4:
"""
Reads a four 32-bit floating point numbers from the stream.
Args:
big: Read bytes as big endian.
Returns:
A new Vector4 instance using floats read from the stream.
"""
x, y, z, w = self.read_single(big=big), self.read_single(big=big), self.read_single(big=big), \
self.read_single(big=big)
return Vector4(x, y, z, w)
def read_bytes(self, length: int) -> bytes:
"""
Reads a specified number of bytes from the stream.
Args:
length: Number of bytes to read.
Returns:
A bytes object containing the read bytes.
"""
return self._stream.read(length)
def read_string(self, length: int) -> str:
"""
Reads a string from the stream with the specified length. Any null bytes and characters proceeding a null byte
are trimmed from the final value and any unknown characters are ignored.
Args:
length: Amount of character to read.
Returns:
A string read from the stream.
"""
string = self._stream.read(length).decode('ascii', errors='ignore')
if '\0' in string:
string = string[:string.index('\0')].rstrip('\0')
string = string.replace('\0', '')
return string
def read_terminated_string(self, terminator: str) -> str:
"""
Reads a string continuously from the stream until it hits the terminator string specified. Any unknown
characters are ignored.
Args:
terminator: The terminator string.
Returns:
A string read from the stream.
"""
string = ""
char = ""
while char != terminator:
string += char
char = self.read_bytes(1).decode('ascii', errors='ignore')
return string
def read_localized_string(self) -> LocalizedString:
"""
Reads the localized string data structure from the stream.
The binary data structure that is read follows the structure found in the GFF format specification.
Returns:
A LocalizedString read from the stream.
"""
locstring = LocalizedString.from_invalid()
self.skip(4) # total number of bytes of the localized string
locstring.stringref = self.read_uint32(max_neg1=True)
string_count = self.read_uint32()
for i in range(string_count):
string_id = self.read_uint32()
length = self.read_uint32()
string = self.read_string(length)
language, gender = LocalizedString.substring_pair(string_id)
locstring.set(language, gender, string)
return locstring
def read_array_head(self) -> ArrayHead:
return ArrayHead(self.read_uint32(), self.read_uint32())
def peek(self, length: int = 1) -> bytes:
data = self._stream.read(length)
self._stream.seek(-length, 1)
return data
class BinaryWriter(ABC):
@classmethod
def to_file(cls, path: str) -> BinaryWriter:
"""
Returns a new BinaryWriter with a stream established to the specified path.
Args:
path: Path of the file to open.
Returns:
A new BinaryWriter instance.
"""
stream = open(path, 'wb')
return BinaryWriterFile(stream)
@classmethod
def to_bytearray(cls, data: bytearray = None) -> BinaryWriter:
"""
Returns a new BinaryWriter with a stream established to the specified bytes.
Args:
data: The bytes to write to.
Returns:
A new BinaryWriter instance.
"""
if data is None:
data = bytearray()
return BinaryWriterBytearray(data)
@classmethod
def to_auto(cls, source: Union[str, bytearray, BinaryWriter]) -> BinaryWriter:
if isinstance(source, str): # is path
return BinaryWriter.to_file(source)
elif isinstance(source, bytearray): # is binary data
return BinaryWriter.to_bytearray(source)
elif isinstance(source, BinaryWriter):
return source
else:
raise NotImplementedError("Must specify a path, bytes object or an existing BinaryWriter instance.")
@staticmethod
def dump(path: str, data: bytes) -> None:
"""
Convenience method used to writes the specified data to the specified file.
Args:
path: The filepath of the file.
data: The data to write to the file.
"""
with open(path, 'wb') as file:
file.write(data)
@abstractmethod
def close(self) -> None:
"""
Closes the stream.
"""
@abstractmethod
def size(self) -> int:
"""
Returns the total file size.
Returns:
The total file size.
"""
@abstractmethod
def data(self) -> bytes:
"""
Returns the full file data.
Returns:
The full file data.
"""
@abstractmethod
def clear(self) -> None:
"""
Clears all the data in the file.
"""
@abstractmethod
def seek(self, position) -> None:
"""
Moves the stream pointer to the byte offset.
Args:
position: The byte index into stream.
"""
@abstractmethod
def end(self) -> None:
"""
Moves the pointer for the stream to the end.
"""
@abstractmethod
def position(self) -> int:
"""
Returns the byte offset into the stream.
Returns:
The byte offset.
"""
@abstractmethod
def write_uint8(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_int8(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_uint16(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_int16(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
"""
Writes an unsigned 32-bit integer to the stream.
If the max_neg1 flag is set to true and the specified value is equal to -1 then the stream will accept the value
and write 0xFFFFFFFF to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
max_neg1: When the value is -1 it is to be converted to the max uint32 value.
"""
@abstractmethod
def write_int32(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 32-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_uint64(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_int64(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_single(self, value: float, *, big: bool = False) -> None:
"""
Writes an 32-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
@abstractmethod
def write_double(self, value: int, *, big: bool = False) -> None:
"""
Writes a 64-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
@abstractmethod
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
"""
Writes two 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
@abstractmethod
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
"""
Writes three 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
@abstractmethod
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
"""
Writes four 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
@abstractmethod
def write_bytes(self, value: bytes) -> None:
"""
Writes the specified bytes to the stream.
Args:
value: The bytes to be written.
"""
@abstractmethod
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
"""
Writes the specified string to the stream. The string can also be prefixed by an integer specifying the
strings length.
Args:
value: The string to be written.
prefix_length: The number of bytes for the string length prefix. Valid options are 0, 1, 2 and 4.
big: Write the prefix length integer as big endian.
string_length: Fixes the string length to this size, truncating or padding where necessary. Ignores if -1.
padding: What character is used as padding where applicable.
"""
@abstractmethod
def write_line(self, indent: int, *args) -> None:
"""
Writes a line with specified indentation and array of values that are separated by whitespace.
Args:
indent: Level of indentation.
*args: Values to write.
"""
@abstractmethod
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
"""
Writes the specified localized string to the stream.
The binary data structure that is read follows the structure found in the GFF format specification.
Args:
value: The localized string to be written.
big: Write any integers as big endian.
"""
class BinaryWriterFile(BinaryWriter):
def __init__(self, stream: BinaryIO, offset: int = 0):
self._stream: BinaryIO = stream
self.offset: int = offset
self.auto_close: bool = True
self._stream.seek(offset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.auto_close: self.close()
def close(self) -> None:
"""
Closes the stream.
"""
self._stream.close()
def size(self) -> int:
"""
Returns the total file size.
Returns:
The total file size.
"""
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size
def data(self) -> bytes:
"""
Returns the full file data.
Returns:
The full file data.
"""
pos = self._stream.tell()
self._stream.seek(0)
data = self._stream.read()
self._stream.seek(pos)
return data
def clear(self) -> None:
"""
Clears all the data in the file.
"""
self._stream.seek(0)
self._stream.truncate()
def seek(self, position) -> None:
"""
Moves the stream pointer to the byte offset.
Args:
position: The byte index into stream.
"""
self._stream.seek(position + self.offset)
def end(self) -> None:
"""
Moves the pointer for the stream to the end.
"""
self._stream.seek(0, 2)
def position(self) -> int:
"""
Returns the byte offset into the stream.
Returns:
The byte offset.
"""
return self._stream.tell() - self.offset
def write_uint8(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'B', value))
def write_int8(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'b', value))
def write_uint16(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'H', value))
def write_int16(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'h', value))
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
"""
Writes an unsigned 32-bit integer to the stream.
If the max_neg1 flag is set to true and the specified value is equal to -1 then the stream will accept the value
and write 0xFFFFFFFF to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
max_neg1: When the value is -1 it is to be converted to the max uint32 value.
"""
if max_neg1 and value == -1:
value = 4294967295
self._stream.write(struct.pack(_endian_char(big) + 'I', value))
def write_int32(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 32-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'i', value))
def write_uint64(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'Q', value))
def write_int64(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'q', value))
def write_single(self, value: float, *, big: bool = False) -> None:
"""
Writes an 32-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'f', value))
def write_double(self, value: int, *, big: bool = False) -> None:
"""
Writes a 64-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'd', value))
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
"""
Writes two 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
"""
Writes three 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.z))
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
"""
Writes four 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.z))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.w))
def write_bytes(self, value: bytes) -> None:
"""
Writes the specified bytes to the stream.
Args:
value: The bytes to be written.
"""
self._stream.write(value)
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
"""
Writes the specified string to the stream. The string can also be prefixed by an integer specifying the
strings length.
Args:
value: The string to be written.
prefix_length: The number of bytes for the string length prefix. Valid options are 0, 1, 2 and 4.
big: Write the prefix length integer as big endian.
string_length: Fixes the string length to this size, truncating or padding where necessary. Ignores if -1.
padding: What character is used as padding where applicable.
"""
if prefix_length == 1:
if len(value) > 255:
raise ValueError("The string length is too large for a prefix length of 1.")
self.write_uint8(len(value), big=big)
elif prefix_length == 2:
if len(value) > 65535:
raise ValueError("The string length is too large for a prefix length of 2.")
self.write_uint16(len(value), big=big)
elif prefix_length == 4:
if len(value) > 4294967295:
raise ValueError("The string length is too large for a prefix length of 4.")
self.write_uint32(len(value), big=big)
elif prefix_length != 0:
raise ValueError("An invalid prefix length was provided.")
if string_length != -1:
while len(value) < string_length:
value += padding
value = value[:string_length]
self._stream.write(value.encode('ascii'))
def write_line(self, indent: int, *args) -> None:
"""
Writes a line with specified indentation and array of values that are separated by whitespace.
Args:
indent: Level of indentation.
*args: Values to write.
"""
line = " " * indent
for arg in args:
if isinstance(arg, float):
line += str(round(arg, 7))
else:
line += str(arg)
line += " "
line += "\n"
self._stream.write(line.encode())
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
"""
Writes the specified localized string to the stream.
The binary data structure that is read follows the structure found in the GFF format specification.
Args:
value: The localized string to be written.
big: Write any integers as big endian.
"""
bw = BinaryWriter.to_bytes(b'')
bw.write_uint32(value.stringref, big=big, max_neg1=True)
bw.write_uint32(len(value), big=big)
for language, gender, substring in value:
string_id = LocalizedString.substring_id(language, gender)
bw.write_uint32(string_id, big=big)
bw.write_string(substring, prefix_length=4)
locstring_data = bw.data()
self.write_uint32(len(locstring_data))
self.write_bytes(locstring_data)
class BinaryWriterBytearray(BinaryWriter):
def __init__(self, ba: bytearray, offset: int = 0):
self._ba = ba
self._offset: int = offset
self._position = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
...
def close(self) -> None:
"""
Closes the stream.
"""
def size(self) -> int:
"""
Returns the total file size.
Returns:
The total file size.
"""
return len(self._ba)
def data(self) -> bytes:
"""
Returns the full file data.
Returns:
The full file data.
"""
return bytes(self._ba)
def clear(self) -> None:
"""
Clears all the data in the file.
"""
self._ba.clear()
def seek(self, position) -> None:
"""
Moves the stream pointer to the byte offset.
Args:
position: The byte index into stream.
"""
self._position = position
def end(self) -> None:
"""
Moves the pointer for the stream to the end.
"""
self._position = len(self._ba)
def position(self) -> int:
"""
Returns the byte offset into the stream.
Returns:
The byte offset.
"""
return self._position - self._offset
def write_uint8(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 1] = struct.pack(_endian_char(big) + 'B', value)
self._position += 1
def write_int8(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 8-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 1] = struct.pack(_endian_char(big) + 'b', value)
self._position += 1
def write_uint16(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 2] = struct.pack(_endian_char(big) + 'H', value)
self._position += 2
def write_int16(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 16-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 2] = struct.pack(_endian_char(big) + 'h', value)
self._position += 2
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
"""
Writes an unsigned 32-bit integer to the stream.
If the max_neg1 flag is set to true and the specified value is equal to -1 then the stream will accept the value
and write 0xFFFFFFFF to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
max_neg1: When the value is -1 it is to be converted to the max uint32 value.
"""
if max_neg1 and value == -1:
value = 4294967295
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'I', value)
self._position += 4
def write_int32(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 32-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'i', value)
self._position += 4
def write_uint64(self, value: int, *, big: bool = False) -> None:
"""
Writes an unsigned 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'Q', value)
self._position += 8
def write_int64(self, value: int, *, big: bool = False) -> None:
"""
Writes a signed 64-bit integer to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'q', value)
self._position += 8
def write_single(self, value: float, *, big: bool = False) -> None:
"""
Writes an 32-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write int bytes as big endian.
"""
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value)
self._position += 4
def write_double(self, value: int, *, big: bool = False) -> None:
"""
Writes a 64-bit floating point number to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'd', value)
self._position += 8
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
"""
Writes two 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._position += 8
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
"""
Writes three 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._ba[self._position + 8:self._position + 12] = struct.pack(_endian_char(big) + 'f', value.z)
self._position += 12
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
"""
Writes four 32-bit floating point numbers to the stream.
Args:
value: The value to be written.
big: Write bytes as big endian.
"""
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._ba[self._position + 8:self._position + 12] = struct.pack(_endian_char(big) + 'f', value.z)
self._ba[self._position + 12:self._position + 16] = struct.pack(_endian_char(big) + 'f', value.w)
self._position += 16
def write_bytes(self, value: bytes) -> None:
"""
Writes the specified bytes to the stream.
Args:
value: The bytes to be written.
"""
self._ba[self._position:self._position + len(value)] = value
self._position += len(value)
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
"""
Writes the specified string to the stream. The string can also be prefixed by an integer specifying the
strings length.
Args:
value: The string to be written.
prefix_length: The number of bytes for the string length prefix. Valid options are 0, 1, 2 and 4.
big: Write the prefix length integer as big endian.
string_length: Fixes the string length to this size, truncating or padding where necessary. Ignores if -1.
padding: What character is used as padding where applicable.
"""
if prefix_length == 1:
if len(value) > 255:
raise ValueError("The string length is too large for a prefix length of 1.")
self.write_uint8(len(value), big=big)
elif prefix_length == 2:
if len(value) > 65535:
raise ValueError("The string length is too large for a prefix length of 2.")
self.write_uint16(len(value), big=big)
elif prefix_length == 4:
if len(value) > 4294967295:
raise ValueError("The string length is too large for a prefix length of 4.")
self.write_uint32(len(value), big=big)
elif prefix_length != 0:
raise ValueError("An invalid prefix length was provided.")
if string_length != -1:
while len(value) < string_length:
value += padding
value = value[:string_length]
encoded = value.encode('ascii')
self._ba[self._position:self._position + len(encoded)] = encoded
self._position += len(encoded)
def write_line(self, indent: int, *args) -> None:
"""
Writes a line with specified indentation and array of values that are separated by whitespace.
Args:
indent: Level of indentation.
*args: Values to write.
"""
line = " " * indent
for arg in args:
if isinstance(arg, float):
line += str(round(arg, 7))
else:
line += str(arg)
line += " "
line += "\n"
encoded = line.encode('ascii')
self._ba[self._position:self._position + len(encoded)] = encoded
self._position += len(encoded)
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
"""
Writes the specified localized string to the stream.
The binary data structure that is read follows the structure found in the GFF format specification.
Args:
value: The localized string to be written.
big: Write any integers as big endian.
"""
bw = BinaryWriter.to_bytearray()
bw.write_uint32(value.stringref, big=big, max_neg1=True)
bw.write_uint32(len(value), big=big)
for language, gender, substring in value:
string_id = LocalizedString.substring_id(language, gender)
bw.write_uint32(string_id, big=big)
bw.write_string(substring, prefix_length=4)
locstring_data = bw.data()
self.write_uint32(len(locstring_data))
self.write_bytes(locstring_data)
| 31.770331 | 120 | 0.572492 | from __future__ import annotations
import io
import struct
from abc import ABC, abstractmethod
from typing import BinaryIO, Union, TextIO, List, overload, Optional
from pykotor.common.geometry import Vector3, Vector4, Vector2
from pykotor.common.language import LocalizedString
def _endian_char(big) -> str:
return '>' if big else '<'
class ArrayHead:
def __init__(self, array_offset: int = 0, array_length: int = 0):
self.length: int = array_length
self.offset: int = array_offset
class BinaryReader:
def __init__(self, stream: BinaryIO, offset: int = 0):
self._stream: BinaryIO = stream
self._offset: int = offset
self.auto_close: bool = True
self._stream.seek(offset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.auto_close: self.close()
@classmethod
def from_file(cls, path: str, offset: int = 0) -> BinaryReader:
stream = open(path, 'rb')
return BinaryReader(stream, offset)
@classmethod
def from_bytes(cls, data: bytes, offset: int = 0) -> BinaryReader:
stream = io.BytesIO(data)
return BinaryReader(stream, offset)
@classmethod
def from_auto(cls, source: Optional[str, bytes, bytearray, BinaryReader], offset: int = 0):
if isinstance(source, str):
reader = BinaryReader.from_file(source, offset)
elif isinstance(source, bytes) or isinstance(source, bytearray):
reader = BinaryReader.from_bytes(source, offset)
elif isinstance(source, BinaryReader):
reader = source
reader._offset = offset
else:
raise NotImplementedError("Must specify a path, bytes object or an existing BinaryReader instance.")
return reader
@staticmethod
def load_file(path: str) -> bytes:
with open(path, 'rb') as file:
return file.read()
def offset(self) -> int:
return self._offset
def set_offset(self, offset: int) -> None:
original = self._offset
self.seek(self.position() + offset)
self._offset = offset
def size(self) -> int:
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size
def remaining(self) -> int:
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size - pos
def close(self) -> None:
self._stream.close()
def skip(self, length) -> None:
self._stream.read(length)
def position(self) -> int:
return self._stream.tell() - self._offset
def seek(self, position) -> None:
self._stream.seek(position + self._offset)
def read_all(self) -> bytes:
length = self.size() - self._offset
self._stream.seek(self._offset)
return self._stream.read(length)
def read_uint8(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'B', self._stream.read(1))[0]
def read_int8(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'b', self._stream.read(1))[0]
def read_uint16(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'H', self._stream.read(2))[0]
def read_int16(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'h', self._stream.read(2))[0]
def read_uint32(self, *, max_neg1: bool = False, big: bool = False) -> int:
unpacked = struct.unpack(_endian_char(big) + "I", self._stream.read(4))[0]
if unpacked == 4294967295 and max_neg1:
unpacked = -1
return unpacked
def read_int32(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'i', self._stream.read(4))[0]
def read_uint64(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'Q', self._stream.read(8))[0]
def read_int64(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'q', self._stream.read(8))[0]
def read_single(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'f', self._stream.read(4))[0]
def read_double(self, *, big: bool = False) -> int:
return struct.unpack(_endian_char(big) + 'd', self._stream.read(8))[0]
def read_vector2(self, *, big: bool = False) -> Vector2:
x, y = self.read_single(big=big), self.read_single(big=big)
return Vector2(x, y)
def read_vector3(self, *, big: bool = False) -> Vector3:
x, y, z = self.read_single(big=big), self.read_single(big=big), self.read_single(big=big)
return Vector3(x, y, z)
def read_vector4(self, *, big: bool = False) -> Vector4:
x, y, z, w = self.read_single(big=big), self.read_single(big=big), self.read_single(big=big), \
self.read_single(big=big)
return Vector4(x, y, z, w)
def read_bytes(self, length: int) -> bytes:
return self._stream.read(length)
def read_string(self, length: int) -> str:
string = self._stream.read(length).decode('ascii', errors='ignore')
if '\0' in string:
string = string[:string.index('\0')].rstrip('\0')
string = string.replace('\0', '')
return string
def read_terminated_string(self, terminator: str) -> str:
string = ""
char = ""
while char != terminator:
string += char
char = self.read_bytes(1).decode('ascii', errors='ignore')
return string
def read_localized_string(self) -> LocalizedString:
locstring = LocalizedString.from_invalid()
self.skip(4)
locstring.stringref = self.read_uint32(max_neg1=True)
string_count = self.read_uint32()
for i in range(string_count):
string_id = self.read_uint32()
length = self.read_uint32()
string = self.read_string(length)
language, gender = LocalizedString.substring_pair(string_id)
locstring.set(language, gender, string)
return locstring
def read_array_head(self) -> ArrayHead:
return ArrayHead(self.read_uint32(), self.read_uint32())
def peek(self, length: int = 1) -> bytes:
data = self._stream.read(length)
self._stream.seek(-length, 1)
return data
class BinaryWriter(ABC):
@classmethod
def to_file(cls, path: str) -> BinaryWriter:
stream = open(path, 'wb')
return BinaryWriterFile(stream)
@classmethod
def to_bytearray(cls, data: bytearray = None) -> BinaryWriter:
if data is None:
data = bytearray()
return BinaryWriterBytearray(data)
@classmethod
def to_auto(cls, source: Union[str, bytearray, BinaryWriter]) -> BinaryWriter:
if isinstance(source, str):
return BinaryWriter.to_file(source)
elif isinstance(source, bytearray):
return BinaryWriter.to_bytearray(source)
elif isinstance(source, BinaryWriter):
return source
else:
raise NotImplementedError("Must specify a path, bytes object or an existing BinaryWriter instance.")
@staticmethod
def dump(path: str, data: bytes) -> None:
with open(path, 'wb') as file:
file.write(data)
@abstractmethod
def close(self) -> None:
@abstractmethod
def size(self) -> int:
@abstractmethod
def data(self) -> bytes:
@abstractmethod
def clear(self) -> None:
@abstractmethod
def seek(self, position) -> None:
@abstractmethod
def end(self) -> None:
@abstractmethod
def position(self) -> int:
@abstractmethod
def write_uint8(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_int8(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_uint16(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_int16(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
@abstractmethod
def write_int32(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_uint64(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_int64(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_single(self, value: float, *, big: bool = False) -> None:
@abstractmethod
def write_double(self, value: int, *, big: bool = False) -> None:
@abstractmethod
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
@abstractmethod
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
@abstractmethod
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
@abstractmethod
def write_bytes(self, value: bytes) -> None:
@abstractmethod
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
@abstractmethod
def write_line(self, indent: int, *args) -> None:
@abstractmethod
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
class BinaryWriterFile(BinaryWriter):
def __init__(self, stream: BinaryIO, offset: int = 0):
self._stream: BinaryIO = stream
self.offset: int = offset
self.auto_close: bool = True
self._stream.seek(offset)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.auto_close: self.close()
def close(self) -> None:
self._stream.close()
def size(self) -> int:
pos = self._stream.tell()
self._stream.seek(0, 2)
size = self._stream.tell()
self._stream.seek(pos)
return size
def data(self) -> bytes:
pos = self._stream.tell()
self._stream.seek(0)
data = self._stream.read()
self._stream.seek(pos)
return data
def clear(self) -> None:
self._stream.seek(0)
self._stream.truncate()
def seek(self, position) -> None:
self._stream.seek(position + self.offset)
def end(self) -> None:
self._stream.seek(0, 2)
def position(self) -> int:
return self._stream.tell() - self.offset
def write_uint8(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'B', value))
def write_int8(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'b', value))
def write_uint16(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'H', value))
def write_int16(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'h', value))
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
if max_neg1 and value == -1:
value = 4294967295
self._stream.write(struct.pack(_endian_char(big) + 'I', value))
def write_int32(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'i', value))
def write_uint64(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'Q', value))
def write_int64(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'q', value))
def write_single(self, value: float, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'f', value))
def write_double(self, value: int, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'd', value))
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.z))
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
self._stream.write(struct.pack(_endian_char(big) + 'f', value.x))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.y))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.z))
self._stream.write(struct.pack(_endian_char(big) + 'f', value.w))
def write_bytes(self, value: bytes) -> None:
self._stream.write(value)
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
if prefix_length == 1:
if len(value) > 255:
raise ValueError("The string length is too large for a prefix length of 1.")
self.write_uint8(len(value), big=big)
elif prefix_length == 2:
if len(value) > 65535:
raise ValueError("The string length is too large for a prefix length of 2.")
self.write_uint16(len(value), big=big)
elif prefix_length == 4:
if len(value) > 4294967295:
raise ValueError("The string length is too large for a prefix length of 4.")
self.write_uint32(len(value), big=big)
elif prefix_length != 0:
raise ValueError("An invalid prefix length was provided.")
if string_length != -1:
while len(value) < string_length:
value += padding
value = value[:string_length]
self._stream.write(value.encode('ascii'))
def write_line(self, indent: int, *args) -> None:
line = " " * indent
for arg in args:
if isinstance(arg, float):
line += str(round(arg, 7))
else:
line += str(arg)
line += " "
line += "\n"
self._stream.write(line.encode())
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
bw = BinaryWriter.to_bytes(b'')
bw.write_uint32(value.stringref, big=big, max_neg1=True)
bw.write_uint32(len(value), big=big)
for language, gender, substring in value:
string_id = LocalizedString.substring_id(language, gender)
bw.write_uint32(string_id, big=big)
bw.write_string(substring, prefix_length=4)
locstring_data = bw.data()
self.write_uint32(len(locstring_data))
self.write_bytes(locstring_data)
class BinaryWriterBytearray(BinaryWriter):
def __init__(self, ba: bytearray, offset: int = 0):
self._ba = ba
self._offset: int = offset
self._position = 0
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
...
def close(self) -> None:
def size(self) -> int:
return len(self._ba)
def data(self) -> bytes:
return bytes(self._ba)
def clear(self) -> None:
self._ba.clear()
def seek(self, position) -> None:
self._position = position
def end(self) -> None:
self._position = len(self._ba)
def position(self) -> int:
return self._position - self._offset
def write_uint8(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 1] = struct.pack(_endian_char(big) + 'B', value)
self._position += 1
def write_int8(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 1] = struct.pack(_endian_char(big) + 'b', value)
self._position += 1
def write_uint16(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 2] = struct.pack(_endian_char(big) + 'H', value)
self._position += 2
def write_int16(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 2] = struct.pack(_endian_char(big) + 'h', value)
self._position += 2
def write_uint32(self, value: int, *, max_neg1: bool = False, big: bool = False) -> None:
if max_neg1 and value == -1:
value = 4294967295
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'I', value)
self._position += 4
def write_int32(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'i', value)
self._position += 4
def write_uint64(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'Q', value)
self._position += 8
def write_int64(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'q', value)
self._position += 8
def write_single(self, value: float, *, big: bool = False) -> None:
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value)
self._position += 4
def write_double(self, value: int, *, big: bool = False) -> None:
self._ba[self._position:self._position + 8] = struct.pack(_endian_char(big) + 'd', value)
self._position += 8
def write_vector2(self, value: Vector2, *, big: bool = False) -> None:
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._position += 8
def write_vector3(self, value: Vector3, *, big: bool = False) -> None:
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._ba[self._position + 8:self._position + 12] = struct.pack(_endian_char(big) + 'f', value.z)
self._position += 12
def write_vector4(self, value: Vector4, *, big: bool = False) -> None:
self._ba[self._position:self._position + 4] = struct.pack(_endian_char(big) + 'f', value.x)
self._ba[self._position + 4:self._position + 8] = struct.pack(_endian_char(big) + 'f', value.y)
self._ba[self._position + 8:self._position + 12] = struct.pack(_endian_char(big) + 'f', value.z)
self._ba[self._position + 12:self._position + 16] = struct.pack(_endian_char(big) + 'f', value.w)
self._position += 16
def write_bytes(self, value: bytes) -> None:
self._ba[self._position:self._position + len(value)] = value
self._position += len(value)
def write_string(self, value: str, *, big: bool = False, prefix_length: int = 0, string_length: int = -1,
padding: str = '\0') -> None:
if prefix_length == 1:
if len(value) > 255:
raise ValueError("The string length is too large for a prefix length of 1.")
self.write_uint8(len(value), big=big)
elif prefix_length == 2:
if len(value) > 65535:
raise ValueError("The string length is too large for a prefix length of 2.")
self.write_uint16(len(value), big=big)
elif prefix_length == 4:
if len(value) > 4294967295:
raise ValueError("The string length is too large for a prefix length of 4.")
self.write_uint32(len(value), big=big)
elif prefix_length != 0:
raise ValueError("An invalid prefix length was provided.")
if string_length != -1:
while len(value) < string_length:
value += padding
value = value[:string_length]
encoded = value.encode('ascii')
self._ba[self._position:self._position + len(encoded)] = encoded
self._position += len(encoded)
def write_line(self, indent: int, *args) -> None:
line = " " * indent
for arg in args:
if isinstance(arg, float):
line += str(round(arg, 7))
else:
line += str(arg)
line += " "
line += "\n"
encoded = line.encode('ascii')
self._ba[self._position:self._position + len(encoded)] = encoded
self._position += len(encoded)
def write_localized_string(self, value: LocalizedString, *, big: bool = False):
bw = BinaryWriter.to_bytearray()
bw.write_uint32(value.stringref, big=big, max_neg1=True)
bw.write_uint32(len(value), big=big)
for language, gender, substring in value:
string_id = LocalizedString.substring_id(language, gender)
bw.write_uint32(string_id, big=big)
bw.write_string(substring, prefix_length=4)
locstring_data = bw.data()
self.write_uint32(len(locstring_data))
self.write_bytes(locstring_data)
| true | true |
1c3f2211b675a7a05e8ac50df4265131fc2f31c0 | 127 | py | Python | install.py | dmdhrumilmistry/Termux-SSH | 65ba7868a0e8961f9a262a85e79b56f8b8a65b9e | [
"MIT"
] | 5 | 2021-07-17T20:40:42.000Z | 2022-02-27T09:41:19.000Z | install.py | dmdhrumilmistry/Termux-SSH | 65ba7868a0e8961f9a262a85e79b56f8b8a65b9e | [
"MIT"
] | null | null | null | install.py | dmdhrumilmistry/Termux-SSH | 65ba7868a0e8961f9a262a85e79b56f8b8a65b9e | [
"MIT"
] | 1 | 2021-07-17T22:36:39.000Z | 2021-07-17T22:36:39.000Z | #!usr/bin/env python3
from termux import get_user, generate_passwd, install_termux_req
install_termux_req()
generate_passwd()
| 21.166667 | 64 | 0.834646 |
from termux import get_user, generate_passwd, install_termux_req
install_termux_req()
generate_passwd()
| true | true |
1c3f23d96ace4872083b8af2bc17ca07cccb8d00 | 3,082 | py | Python | socialite/jython/Lib/test/test_pkg_jy.py | Wangqge/PowerLog_ae | 8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9 | [
"Apache-2.0"
] | 49 | 2015-03-10T17:34:19.000Z | 2021-11-10T22:23:18.000Z | socialite/jython/Lib/test/test_pkg_jy.py | Wangqge/PowerLog_ae | 8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9 | [
"Apache-2.0"
] | null | null | null | socialite/jython/Lib/test/test_pkg_jy.py | Wangqge/PowerLog_ae | 8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9 | [
"Apache-2.0"
] | 32 | 2015-02-06T12:10:32.000Z | 2019-06-18T03:21:36.000Z | # Test packages (dotted-name import)
# XXX: This test is borrowed from CPython 2.7 as it tickles
# http://bugs.jython.org/issue1871 so it should be removed in Jython 2.7
import sys
import os
import tempfile
import textwrap
import unittest
from test import test_support
# Helpers to create and destroy hierarchies.
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
class Test(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
def tearDown(self):
sys.path[:] = self.syspath
if self.root: # Only clean if the test was actually run
cleanout(self.root)
# delete all modules concerning the tested hierarchy
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
# package name is the name of the first item
self.pkgname = descr[0][0]
def test_5(self):
hier = [
("t5", None),
("t5 __init__"+os.extsep+"py", "import t5.foo"),
("t5 string"+os.extsep+"py", "spam = 1"),
("t5 foo"+os.extsep+"py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__doc__', '__file__', '__name__',
'__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__doc__', '__file__', '__name__',
'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__doc__', '__file__', '__name__',
'spam'])
if __name__ == "__main__":
unittest.main()
| 29.352381 | 72 | 0.524984 |
import sys
import os
import tempfile
import textwrap
import unittest
from test import test_support
def cleanout(root):
names = os.listdir(root)
for name in names:
fullname = os.path.join(root, name)
if os.path.isdir(fullname) and not os.path.islink(fullname):
cleanout(fullname)
else:
os.remove(fullname)
os.rmdir(root)
def fixdir(lst):
if "__builtins__" in lst:
lst.remove("__builtins__")
return lst
class Test(unittest.TestCase):
def setUp(self):
self.root = None
self.pkgname = None
self.syspath = list(sys.path)
def tearDown(self):
sys.path[:] = self.syspath
if self.root:
cleanout(self.root)
if self.pkgname:
modules = [name for name in sys.modules
if self.pkgname in name.split('.')]
for name in modules:
del sys.modules[name]
def run_code(self, code):
exec(textwrap.dedent(code), globals(), {"self": self})
def mkhier(self, descr):
root = tempfile.mkdtemp()
sys.path.insert(0, root)
if not os.path.isdir(root):
os.mkdir(root)
for name, contents in descr:
comps = name.split()
fullname = root
for c in comps:
fullname = os.path.join(fullname, c)
if contents is None:
os.mkdir(fullname)
else:
f = open(fullname, "w")
f.write(contents)
if contents and contents[-1] != '\n':
f.write('\n')
f.close()
self.root = root
self.pkgname = descr[0][0]
def test_5(self):
hier = [
("t5", None),
("t5 __init__"+os.extsep+"py", "import t5.foo"),
("t5 string"+os.extsep+"py", "spam = 1"),
("t5 foo"+os.extsep+"py",
"from . import string; assert string.spam == 1"),
]
self.mkhier(hier)
import t5
s = """
from t5 import *
self.assertEqual(dir(), ['foo', 'self', 'string', 't5'])
"""
self.run_code(s)
import t5
self.assertEqual(fixdir(dir(t5)),
['__doc__', '__file__', '__name__',
'__path__', 'foo', 'string', 't5'])
self.assertEqual(fixdir(dir(t5.foo)),
['__doc__', '__file__', '__name__',
'string'])
self.assertEqual(fixdir(dir(t5.string)),
['__doc__', '__file__', '__name__',
'spam'])
if __name__ == "__main__":
unittest.main()
| true | true |
1c3f2546f93edc21097975e658b41e2c47bd89f7 | 4,392 | py | Python | BackpackTF/currency.py | Epicalert/BackpackTf-API | dca4b3e1e6b2ada5f7357c929bd729d673310b57 | [
"MIT"
] | null | null | null | BackpackTF/currency.py | Epicalert/BackpackTf-API | dca4b3e1e6b2ada5f7357c929bd729d673310b57 | [
"MIT"
] | null | null | null | BackpackTF/currency.py | Epicalert/BackpackTf-API | dca4b3e1e6b2ada5f7357c929bd729d673310b57 | [
"MIT"
] | 1 | 2020-03-15T21:11:33.000Z | 2020-03-15T21:11:33.000Z | class Currency:
#
# Documentation for the backpack.tf API https://backpack.tf/api/index.html#/
#
def __init__(self, apikey=""):
import requests
import json
if apikey == "":
print("Error, you need to specify an API key")
else:
self.api_key = apikey
#
# Function Returns A JSON of the value of currencies
#
def getCurrencies(self):
import requests
import json
currencies = requests.get(
"https://backpack.tf/api/IGetCurrencies/v1?key=" + self.api_key)
currencyJSON = json.loads(currencies.text)
if currencyJSON['response']['success'] == "1" or currencyJSON['response']['success'] == 1:
return currencyJSON['response']['currencies']
else:
raise Exception('Your API key is invalid')
#
# Gets Price History of a specific item in an array of previous values
#
# Name - The item's base name
# Quality - The item's quality, Strange, Unique, Unusual
# Craftable - Get the item's craftable or not 0 or 1
# Tradable - get the item's tradable status
# PriceIndex - Most items is 0, however particle effects is the ID of the particle effect
# for crates it corresponds to the crate series, for strangifiers/unusualifiers is the
# definition index of the item it can be used on, chemistry set is a hyphented
# definition index 1086-14 is the index for a collector's festive wrangler
# here's a link to an item http://prntscr.com/pf2s0h
#
def priceHistory(self, name="Pyromancer's Mask", quality="Unique", craftable=1, tradable=1, priceIndex=0):
import requests
import urllib.parse
import json
payload = {
"appid": "440",
"quality": str(quality),
"item": name,
"tradable": str(tradable),
"craftable": str(craftable),
"priceindex": str(priceIndex),
"key": self.api_key
}
encoded = urllib.parse.urlencode(payload)
r = requests.get(
"https://backpack.tf/api/IGetPriceHistory/v1?" + encoded)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']['history']
#
# Gets Price of a specific item
#
# Name - The item's base name
# Quality - The item's quality, Strange, Unique, Unusual
# Craftable - Get the item's craftable or not 0 or 1
# Tradable - get the item's tradable status
# PriceIndex - Not really sure to be honest
#
def itemPrice(self, name="Pyromancer's Mask", quality="Unique", craftable=1, tradable=1, priceIndex=0):
import requests
import urllib.parse
import json
payload = {
"appid": "440",
"quality": str(quality),
"item": name,
"tradable": str(tradable),
"craftable": str(craftable),
"priceindex": str(priceIndex),
"key": self.api_key
}
encoded = urllib.parse.urlencode(payload)
r = requests.get(
"https://backpack.tf/api/IGetPriceHistory/v1?" + encoded)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']['history'][len(jsondata['response']['history']) - 1]
#
# Gets all prices, requires an elevated API key
#
# Since - Only prices that have been updated since the unix EPOCH will be shown
#
def getAllPrices(self, raw=2, since=0):
import requests
import json
r = requests.get("https://backpack.tf/api/IGetPrices/v4?raw=" +
str(raw) + "&since=" + str(since) + "&key=" + self.api_key)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']
| 32.776119 | 110 | 0.574454 | class Currency:
def __init__(self, apikey=""):
import requests
import json
if apikey == "":
print("Error, you need to specify an API key")
else:
self.api_key = apikey
def getCurrencies(self):
import requests
import json
currencies = requests.get(
"https://backpack.tf/api/IGetCurrencies/v1?key=" + self.api_key)
currencyJSON = json.loads(currencies.text)
if currencyJSON['response']['success'] == "1" or currencyJSON['response']['success'] == 1:
return currencyJSON['response']['currencies']
else:
raise Exception('Your API key is invalid')
# Quality - The item's quality, Strange, Unique, Unusual
# Tradable - get the item's tradable status
# here's a link to an item http://prntscr.com/pf2s0h
def priceHistory(self, name="Pyromancer's Mask", quality="Unique", craftable=1, tradable=1, priceIndex=0):
import requests
import urllib.parse
import json
payload = {
"appid": "440",
"quality": str(quality),
"item": name,
"tradable": str(tradable),
"craftable": str(craftable),
"priceindex": str(priceIndex),
"key": self.api_key
}
encoded = urllib.parse.urlencode(payload)
r = requests.get(
"https://backpack.tf/api/IGetPriceHistory/v1?" + encoded)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']['history']
#
# Gets Price of a specific item
#
# Name - The item's base name
# Craftable - Get the item's craftable or not 0 or 1
# PriceIndex - Not really sure to be honest
#
def itemPrice(self, name="Pyromancer's Mask", quality="Unique", craftable=1, tradable=1, priceIndex=0):
import requests
import urllib.parse
import json
payload = {
"appid": "440",
"quality": str(quality),
"item": name,
"tradable": str(tradable),
"craftable": str(craftable),
"priceindex": str(priceIndex),
"key": self.api_key
}
encoded = urllib.parse.urlencode(payload)
r = requests.get(
"https://backpack.tf/api/IGetPriceHistory/v1?" + encoded)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']['history'][len(jsondata['response']['history']) - 1]
def getAllPrices(self, raw=2, since=0):
import requests
import json
r = requests.get("https://backpack.tf/api/IGetPrices/v4?raw=" +
str(raw) + "&since=" + str(since) + "&key=" + self.api_key)
jsondata = json.loads(r.text)
try:
if jsondata['response']['success'] == 1 or jsondata['response']['success'] == "1":
success = True
except:
return jsondata
if success:
return jsondata['response']
| true | true |
1c3f25eeb542dd19eee2427b9f519e968190dd43 | 1,401 | py | Python | tkinter/terminal_like.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | tkinter/terminal_like.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | tkinter/terminal_like.py | terasakisatoshi/pythonCodes | baee095ecee96f6b5ec6431267cdc6c40512a542 | [
"MIT"
] | null | null | null | import tkinter
from tkinter import *
import subprocess
import os
from os import system as cmd
WINDOW_SIZE = "600x400"
top = tkinter.Tk()
top.geometry(WINDOW_SIZE)
def helloCallBack():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/scripts_coverage.pl', shell=True)
def BasicCovTests():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/basic_coverage_tests.pl', shell=True)
def FullCovTests():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/basic_coverage_tests.pl', shell=True)
Scripts_coverage = tkinter.Button(top, text ="Scripts Coverage", command = helloCallBack)
Scripts_coverage.pack()
Basic_coverage_tests = tkinter.Button(top, text ="Basic Coverage Tests", command = BasicCovTests)
Basic_coverage_tests.pack()
Full_coverage_tests = tkinter.Button(top, text ="Full Coverage Tests", command = FullCovTests)
Full_coverage_tests.pack()
termf = Frame(top, height=100, width=500)
termf.pack(fill=BOTH, expand=YES)
wid = termf.winfo_id()
os.system('xterm -into %d -geometry 100x20 -sb &' % wid)
def send_entry_to_terminal(*args):
"""*args needed since callback may be called from no arg (button)
or one arg (entry)
"""
cmd("%s" % (BasicCovTests))
top.mainloop() | 31.840909 | 98 | 0.745896 | import tkinter
from tkinter import *
import subprocess
import os
from os import system as cmd
WINDOW_SIZE = "600x400"
top = tkinter.Tk()
top.geometry(WINDOW_SIZE)
def helloCallBack():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/scripts_coverage.pl', shell=True)
def BasicCovTests():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/basic_coverage_tests.pl', shell=True)
def FullCovTests():
print ("Below is the output from the shell script in terminal")
subprocess.call('perl /projects/tfs/users/$USER/basic_coverage_tests.pl', shell=True)
Scripts_coverage = tkinter.Button(top, text ="Scripts Coverage", command = helloCallBack)
Scripts_coverage.pack()
Basic_coverage_tests = tkinter.Button(top, text ="Basic Coverage Tests", command = BasicCovTests)
Basic_coverage_tests.pack()
Full_coverage_tests = tkinter.Button(top, text ="Full Coverage Tests", command = FullCovTests)
Full_coverage_tests.pack()
termf = Frame(top, height=100, width=500)
termf.pack(fill=BOTH, expand=YES)
wid = termf.winfo_id()
os.system('xterm -into %d -geometry 100x20 -sb &' % wid)
def send_entry_to_terminal(*args):
cmd("%s" % (BasicCovTests))
top.mainloop() | true | true |
1c3f28246dcadec804bcbfc32bf5b0c6e925821b | 1,238 | py | Python | Python/standardDeviation.py | giandrea77/RExercises | d435e303775b154d4cbbc25f990eb4b23272039d | [
"MIT"
] | null | null | null | Python/standardDeviation.py | giandrea77/RExercises | d435e303775b154d4cbbc25f990eb4b23272039d | [
"MIT"
] | null | null | null | Python/standardDeviation.py | giandrea77/RExercises | d435e303775b154d4cbbc25f990eb4b23272039d | [
"MIT"
] | null | null | null | #
# Exerciese from book Data Science - Sinan Ozdemir
#
# @since : Fri Apr 9 14:41:38 CEST 2021
#
### Calculate standard deviance
#
# Distanza di un punto dei dati rispetto alla media
#
import numpy
temps = [32, 32, 31, 28, 29, 31, 39, 32, 32, 35, 26, 29]
# Calculate mean of values
mean = numpy.mean(temps)
squared_differences = []
num_items = len(temps)
products = 1
for temperature in temps:
# Geometric mean
products *= temperature
geometric_mean = products ** (1./num_items)
# Distance of single point from mean
difference = temperature - mean
# Square of difference
squared_difference = difference ** 2
squared_differences.append(squared_difference)
# Calculate VARIANCE
average_squared_difference = numpy.mean(squared_differences)
# Calculate standard deviation
standard_deviation = numpy.sqrt(average_squared_difference)
print ('mean: ', mean)
print ('variance: ', average_squared_difference)
print ('standard_deviation: ', standard_deviation)
print ('geometric mean: ', geometric_mean)
# mean: 31.333333333333332
# variance: 10.388888888888888
# standard_deviation: 3.2231799343022858
# geometric mean: 31.173240057688545 | 24.76 | 60 | 0.703554 |
, 35, 26, 29]
mean = numpy.mean(temps)
squared_differences = []
num_items = len(temps)
products = 1
for temperature in temps:
products *= temperature
geometric_mean = products ** (1./num_items)
difference = temperature - mean
squared_difference = difference ** 2
squared_differences.append(squared_difference)
average_squared_difference = numpy.mean(squared_differences)
standard_deviation = numpy.sqrt(average_squared_difference)
print ('mean: ', mean)
print ('variance: ', average_squared_difference)
print ('standard_deviation: ', standard_deviation)
print ('geometric mean: ', geometric_mean)
| true | true |
1c3f283673b6f1a01e98e29d842f6d08e7c0768c | 880 | py | Python | common/ciphers/block/counter.py | lukius/mts | 96d3d8b28742a474aca67bfcb079577c878bbb4c | [
"MIT"
] | 2 | 2015-04-04T01:44:11.000Z | 2017-11-04T11:59:27.000Z | common/ciphers/block/counter.py | lukius/mts | 96d3d8b28742a474aca67bfcb079577c878bbb4c | [
"MIT"
] | null | null | null | common/ciphers/block/counter.py | lukius/mts | 96d3d8b28742a474aca67bfcb079577c878bbb4c | [
"MIT"
] | null | null | null | from modes import CTR
from common.tools.endianness import LittleEndian
class CTRModeCounter(object):
def __init__(self, block_size):
self.block_size = block_size if block_size is not None\
else CTR.DEFAULT_BLOCK_SIZE
def count(self, index):
raise NotImplementedError
class DefaultCounter(CTRModeCounter):
def count(self, index):
return LittleEndian.from_int(index, size=self.block_size).value()
class NonceBasedCounter(CTRModeCounter):
def __init__(self, nonce, block_size):
CTRModeCounter.__init__(self, block_size)
self.nonce = nonce
def count(self, index):
size = self.block_size/2
nonce = LittleEndian.from_int(self.nonce, size=size).value()
index = LittleEndian.from_int(index, size=size).value()
return nonce + index | 27.5 | 73 | 0.663636 | from modes import CTR
from common.tools.endianness import LittleEndian
class CTRModeCounter(object):
def __init__(self, block_size):
self.block_size = block_size if block_size is not None\
else CTR.DEFAULT_BLOCK_SIZE
def count(self, index):
raise NotImplementedError
class DefaultCounter(CTRModeCounter):
def count(self, index):
return LittleEndian.from_int(index, size=self.block_size).value()
class NonceBasedCounter(CTRModeCounter):
def __init__(self, nonce, block_size):
CTRModeCounter.__init__(self, block_size)
self.nonce = nonce
def count(self, index):
size = self.block_size/2
nonce = LittleEndian.from_int(self.nonce, size=size).value()
index = LittleEndian.from_int(index, size=size).value()
return nonce + index | true | true |
1c3f2a0faae6c5df9633a8467f5fe3dafec0dc15 | 1,911 | py | Python | murano/cmd/engine.py | ISCAS-VDI/murano-base | 34287bd9109b32a2bb0960c0428fe402dee6d9b2 | [
"Apache-2.0"
] | 1 | 2021-07-28T23:19:49.000Z | 2021-07-28T23:19:49.000Z | murano/cmd/engine.py | ISCAS-VDI/murano-base | 34287bd9109b32a2bb0960c0428fe402dee6d9b2 | [
"Apache-2.0"
] | null | null | null | murano/cmd/engine.py | ISCAS-VDI/murano-base | 34287bd9109b32a2bb0960c0428fe402dee6d9b2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import eventlet
if os.name == 'nt':
# eventlet monkey patching causes subprocess.Popen to fail on Windows
# when using pipes due to missing non blocking I/O support
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
import sys
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_service import service
from murano.common import config
from murano.common import engine
CONF = config.CONF
# If ../murano/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
root = os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)
if os.path.exists(os.path.join(root, 'murano', '__init__.py')):
sys.path.insert(0, root)
def main():
try:
config.parse_args()
logging.setup(CONF, 'murano')
workers = CONF.engine.workers
if not workers:
workers = processutils.get_worker_count()
launcher = service.launch(CONF,
engine.EngineService(), workers=workers)
launcher.wait()
except RuntimeError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
if __name__ == '__main__':
main()
| 28.954545 | 79 | 0.685505 |
import os
import eventlet
if os.name == 'nt':
eventlet.monkey_patch(os=False)
else:
eventlet.monkey_patch()
import sys
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_service import service
from murano.common import config
from murano.common import engine
CONF = config.CONF
root = os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir)
if os.path.exists(os.path.join(root, 'murano', '__init__.py')):
sys.path.insert(0, root)
def main():
try:
config.parse_args()
logging.setup(CONF, 'murano')
workers = CONF.engine.workers
if not workers:
workers = processutils.get_worker_count()
launcher = service.launch(CONF,
engine.EngineService(), workers=workers)
launcher.wait()
except RuntimeError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.exit(1)
if __name__ == '__main__':
main()
| true | true |
1c3f2a49078e66fc5cfdd4857b54f8ca97fad00f | 1,788 | py | Python | jdcloud_sdk/services/instancevoucher/apis/ModifyInstanceVoucherAttributeRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 14 | 2018-04-19T09:53:56.000Z | 2022-01-27T06:05:48.000Z | jdcloud_sdk/services/instancevoucher/apis/ModifyInstanceVoucherAttributeRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 15 | 2018-09-11T05:39:54.000Z | 2021-07-02T12:38:02.000Z | jdcloud_sdk/services/instancevoucher/apis/ModifyInstanceVoucherAttributeRequest.py | Tanc009/jdcloud-sdk-python | 8b045c99bc5b73ca7348e950b6f01e03a27982f5 | [
"Apache-2.0"
] | 33 | 2018-04-20T05:29:16.000Z | 2022-02-17T09:10:05.000Z | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ModifyInstanceVoucherAttributeRequest(JDCloudRequest):
"""
修改实例抵扣券的 名称 和 描述。<br>
name 和 description 必须要指定一个
"""
def __init__(self, parameters, header=None, version="v1"):
super(ModifyInstanceVoucherAttributeRequest, self).__init__(
'/regions/{regionId}/instanceVouchers/{instanceVoucherId}:modifyInstanceVoucherAttribute', 'PATCH', header, version)
self.parameters = parameters
class ModifyInstanceVoucherAttributeParameters(object):
def __init__(self, regionId, instanceVoucherId, ):
"""
:param regionId: 地域 ID
:param instanceVoucherId: 实例抵扣券 ID
"""
self.regionId = regionId
self.instanceVoucherId = instanceVoucherId
self.name = None
self.description = None
def setName(self, name):
"""
:param name: (Optional) 实例抵扣券名称
"""
self.name = name
def setDescription(self, description):
"""
:param description: (Optional) 实例抵扣券描述
"""
self.description = description
| 29.8 | 128 | 0.69519 |
from jdcloud_sdk.core.jdcloudrequest import JDCloudRequest
class ModifyInstanceVoucherAttributeRequest(JDCloudRequest):
def __init__(self, parameters, header=None, version="v1"):
super(ModifyInstanceVoucherAttributeRequest, self).__init__(
'/regions/{regionId}/instanceVouchers/{instanceVoucherId}:modifyInstanceVoucherAttribute', 'PATCH', header, version)
self.parameters = parameters
class ModifyInstanceVoucherAttributeParameters(object):
def __init__(self, regionId, instanceVoucherId, ):
self.regionId = regionId
self.instanceVoucherId = instanceVoucherId
self.name = None
self.description = None
def setName(self, name):
self.name = name
def setDescription(self, description):
self.description = description
| true | true |
1c3f2d7330211692e7ce6d41afd076ec8b682f77 | 16,747 | py | Python | GUI/PyQt/utils/CNN_main.py | thomaskuestner/CNNArt | c2fc639dd2ce035f6ca90113290682a0ccd26fb8 | [
"Apache-2.0"
] | 22 | 2018-04-27T21:28:46.000Z | 2021-12-24T06:44:55.000Z | GUI/PyQt/utils/CNN_main.py | thomaskuestner/CNNArt | c2fc639dd2ce035f6ca90113290682a0ccd26fb8 | [
"Apache-2.0"
] | 81 | 2017-11-09T17:23:15.000Z | 2020-01-28T22:54:13.000Z | GUI/PyQt/utils/CNN_main.py | thomaskuestner/CNNArt | c2fc639dd2ce035f6ca90113290682a0ccd26fb8 | [
"Apache-2.0"
] | 18 | 2017-11-13T16:12:17.000Z | 2020-08-27T10:17:34.000Z | # -*- coding: utf-8 -*-
"""
----------------------------------
Main function for calling the CNNs
----------------------------------
Created on Wed Jan 27 16:57:10 2016
Copyright: 2016, 2017 Thomas Kuestner (thomas.kuestner@med.uni-tuebingen.de) under Apache2 license
@author: Thomas Kuestner
"""
from tensorflow.python.keras.models import load_model
from config.PATH import CNN_PATH
"""Import"""
import sys
import numpy as np # for algebraic operations, matrices
import h5py
import scipy.io as sio # I/O
import os.path # operating system
import argparse
import keras.backend as K
# networks
from networks.motion.CNN2D import *
from networks.motion.CNN3D import *
from networks.motion.MNetArt import *
from networks.motion.VNetArt import *
from networks.multiclass.DenseResNet import *
from networks.multiclass.InceptionNet import *
from networks.multiclass.SENets import *
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
"""functions"""
RUN_CNN_TRAIN_TEST_VALIDATION = 0
RUN_CNN_TRAIN_TEST = 1
RUN_CNN_PREDICT = 2
def fLoadData(conten):
# prepared in matlab
print('Loading data')
for sVarname in ['X_train', 'X_test', 'y_train', 'y_test']:
if sVarname in conten:
exec(sVarname + '=conten[sVarname]')
else:
exec(sVarname + '= None')
pIdx = np.random.permutation(np.arange(len(X_train)))
X_train = X_train[pIdx]
y_train = y_train[pIdx]
y_train = np.asarray([y_train[:, 0], np.abs(np.asarray(y_train[:, 0], dtype=np.float32) - 1)]).T
y_test = np.asarray([y_test[:, 0], np.abs(np.asarray(y_test[:, 0], dtype=np.float32) - 1)]).T
return X_train, y_train, X_test, y_test
def fRemove_entries(entries, the_dict):
for key in entries:
if key in the_dict:
del the_dict[key]
def fLoadMat(sInPath):
"""Data"""
if os.path.isfile(sInPath):
try:
conten = sio.loadmat(sInPath)
except:
f = h5py.File(sInPath, 'r')
conten = {}
conten['X_train'] = np.transpose(np.array(f['X_train']), (3, 2, 0, 1))
conten['X_test'] = np.transpose(np.array(f['X_test']), (3, 2, 0, 1))
conten['y_train'] = np.transpose(np.array(f['y_train']))
conten['y_test'] = np.transpose(np.array(f['y_test']))
conten['patchSize'] = np.transpose(np.array(f['patchSize']))
else:
sys.exit('Input file is not existing')
X_train, y_train, X_test, y_test = fLoadData(conten) # output order needed for hyperas
fRemove_entries(('X_train', 'X_test', 'y_train', 'y_test'), conten)
dData = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test}
dOut = dData.copy()
dOut.update(conten)
return dOut # output dictionary (similar to conten, but with reshaped X_train, ...)
def fLoadDataForOptim(sInPath):
if os.path.isfile(sInPath):
conten = sio.loadmat(sInPath)
X_train, y_train, X_test, y_test = fLoadData(conten) # output order needed for hyperas
return X_train, y_train, X_test, y_test, conten["patchSize"]
# def fLoadAddData(sInPath): # deprecated
# if os.path.isfile(sInPath):
# conten = sio.loadmat(sInPath)
# else:
# sys.exit('Input file is not existing')
# for sVarname in conten:
# if not any(x in sVarname for x in ['X_train', 'X_test', 'y_train', 'y_test'] ):
# conten[sVarname]
def fRunCNN(dData, sModelIn, lTrain, sParaOptim, sOutPath, iBatchSize, iLearningRate, iEpochs, dlart_handle=None,
usingSegmentationMasks=False):
"""CNN Models"""
# check model
sModel = sModelIn
# dynamic loading of corresponding model
cnnModel = __import__(sModel, globals(), locals(), ['createModel', 'fTrain', 'fPredict', 'load_best_model'],
0) # dynamic module loading with specified functions and with absolute importing (level=0) -> work in both Python2 and Python3
# train (w/ or w/o optimization) and predicting
if lTrain == RUN_CNN_TRAIN_TEST: # training
if sParaOptim == 'hyperas': # hyperas parameter optimization
best_run, best_model = optim.minimize(model=cnnModel.fHyperasTrain,
data=fLoadDataForOptim(args.inPath[0]),
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, y_train, X_test, y_test, patchSize = fLoadDataForOptim(args.inPath[0])
score_test, acc_test = best_model.evaluate(X_test, y_test)
prob_test = best_model.predict(X_test, best_run['batch_size'], 0)
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_best'
weight_name = model_name + '_weights.h5'
model_json = model_name + '.json'
model_all = model_name + '_model.h5'
json_string = best_model.to_json()
open(model_json, 'w').write(json_string)
# wei = best_model.get_weights()
best_model.save_weights(weight_name)
best_model.save(model_all)
result = best_run['result']
# acc = result.history['acc']y,
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': -best_run['loss'],
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test})
elif sParaOptim == 'grid': # grid search << backward compatibility
cnnModel.fTrain(X_traind=dData['X_train'],
y_traind=dData['y_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else: # no optimization or grid search (if batchSize|learningRate are arrays)
if not usingSegmentationMasks:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
Y_segMasks_train=dData['Y_segMasks_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
Y_segMasks_test=dData['Y_segMasks_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
elif lTrain == RUN_CNN_TRAIN_TEST_VALIDATION:
if sParaOptim == 'hyperas': # hyperas parameter optimization
best_run, best_model = optim.minimize(model=cnnModel.fHyperasTrain,
data=fLoadDataForOptim(args.inPath[0]),
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, y_train, X_test, y_test, patchSize = fLoadDataForOptim(args.inPath[0])
score_test, acc_test = best_model.evaluate(X_test, y_test)
prob_test = best_model.predict(X_test, best_run['batch_size'], 0)
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_best'
weight_name = model_name + '_weights.h5'
model_json = model_name + '.json'
model_all = model_name + '_model.h5'
json_string = best_model.to_json()
open(model_json, 'w').write(json_string)
# wei = best_model.get_weights()
best_model.save_weights(weight_name)
best_model.save(model_all)
result = best_run['result']
# acc = result.history['acc']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': -best_run['loss'],
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test})
elif sParaOptim == 'grid': # grid search << backward compatibility
cnnModel.fTrain(X_traind=dData['X_train'],
y_traind=dData['y_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else: # no optimization or grid search (if batchSize|learningRate are arrays)
if not usingSegmentationMasks:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
Y_segMasks_train=dData['Y_segMasks_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
Y_segMasks_valid=dData['Y_segMasks_validation'],
X_test=dData['X_test'],
y_test=dData['y_test'],
Y_segMasks_test=dData['Y_segMasks_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
elif lTrain == RUN_CNN_PREDICT: # predicting
cnnModel.fPredict(dData['X_test'], dData['y_test'], dData['model_name'], sOutPath, dData['patchSize'],
iBatchSize[0])
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sOutPath + os.sep + sFilename
model_all = model_name + '_model.h5'
try:
model = load_model(model_all)
except:
try:
def dice_coef(y_true, y_pred, epsilon=1e-5):
dice_numerator = 2.0 * K.sum(y_true * y_pred, axis=[1, 2, 3, 4])
dice_denominator = K.sum(K.square(y_true), axis=[1, 2, 3, 4]) + K.sum(K.square(y_pred),
axis=[1, 2, 3, 4])
dice_score = dice_numerator / (dice_denominator + epsilon)
return K.mean(dice_score, axis=0)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
model = load_model(model_all,
custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef})
except:
model = {}
return model, model_all
# Main Code
if __name__ == "__main__": # for command line call
# input parsing
# ADD new options here!
parser = argparse.ArgumentParser(description='''CNN artifact detection''',
epilog='''(c) Thomas Kuestner, thomas.kuestner@iss.uni-stuttgart.de''')
parser.add_argument('-i', '--inPath', nargs=1, type=str, help='input path to *.mat of stored patches',
default=CNN_PATH + os.sep + 'Datatmp/in.mat')
parser.add_argument('-o', '--outPath', nargs=1, type=str,
help='output path to the file used for storage (subfiles _model, _weights, ... are automatically generated)',
default=CNN_PATH + os.sep + 'Datatmp/out')
parser.add_argument('-m', '--model', nargs=1, type=str,
choices=['motion_head_CNN2D', 'motion_abd_CNN2D', 'motion_all_CNN2D', 'motion_CNN3D',
'motion_MNetArt', 'motion_VNetArt', 'multi_DenseResNet', 'multi_InceptionNet'],
help='select CNN model', default='motion_2DCNN_head')
parser.add_argument('-t', '--train', dest='train', action='store_true',
help='if set -> training | if not set -> prediction')
parser.add_argument('-p', '--paraOptim', dest='paraOptim', type=str, choices=['grid', 'hyperas', 'none'],
help='parameter optimization via grid search, hyper optimization or no optimization',
default='none')
parser.add_argument('-b', '--batchSize', nargs='*', dest='batchSize', type=int, help='batchSize', default=64)
parser.add_argument('-l', '--learningRates', nargs='*', dest='learningRate', type=int, help='learningRate',
default=0.0001)
parser.add_argument('-e', '--epochs', nargs=1, dest='epochs', type=int, help='epochs', default=300)
args = parser.parse_args()
if os.path.isfile(args.outPath[0]):
print('Warning! Output file is already existing and will be overwritten')
# load input data
dData = fLoadMat(args.inPath[0])
# save path for keras model
if 'outPath' in dData:
sOutPath = dData['outPath']
else:
sOutPath = args.outPath[0]
fRunCNN(dData, args.model[0], args.train, args.paraOptim, sOutPath, args.batchSize, args.learningRate,
args.epochs[0])
| 46.519444 | 153 | 0.526482 |
from tensorflow.python.keras.models import load_model
from config.PATH import CNN_PATH
import sys
import numpy as np
import h5py
import scipy.io as sio
import os.path
import argparse
import keras.backend as K
from networks.motion.CNN2D import *
from networks.motion.CNN3D import *
from networks.motion.MNetArt import *
from networks.motion.VNetArt import *
from networks.multiclass.DenseResNet import *
from networks.multiclass.InceptionNet import *
from networks.multiclass.SENets import *
from hyperopt import Trials, STATUS_OK, tpe
from hyperas import optim
import tensorflow as tf
from keras.backend.tensorflow_backend import set_session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
set_session(tf.Session(config=config))
RUN_CNN_TRAIN_TEST_VALIDATION = 0
RUN_CNN_TRAIN_TEST = 1
RUN_CNN_PREDICT = 2
def fLoadData(conten):
print('Loading data')
for sVarname in ['X_train', 'X_test', 'y_train', 'y_test']:
if sVarname in conten:
exec(sVarname + '=conten[sVarname]')
else:
exec(sVarname + '= None')
pIdx = np.random.permutation(np.arange(len(X_train)))
X_train = X_train[pIdx]
y_train = y_train[pIdx]
y_train = np.asarray([y_train[:, 0], np.abs(np.asarray(y_train[:, 0], dtype=np.float32) - 1)]).T
y_test = np.asarray([y_test[:, 0], np.abs(np.asarray(y_test[:, 0], dtype=np.float32) - 1)]).T
return X_train, y_train, X_test, y_test
def fRemove_entries(entries, the_dict):
for key in entries:
if key in the_dict:
del the_dict[key]
def fLoadMat(sInPath):
if os.path.isfile(sInPath):
try:
conten = sio.loadmat(sInPath)
except:
f = h5py.File(sInPath, 'r')
conten = {}
conten['X_train'] = np.transpose(np.array(f['X_train']), (3, 2, 0, 1))
conten['X_test'] = np.transpose(np.array(f['X_test']), (3, 2, 0, 1))
conten['y_train'] = np.transpose(np.array(f['y_train']))
conten['y_test'] = np.transpose(np.array(f['y_test']))
conten['patchSize'] = np.transpose(np.array(f['patchSize']))
else:
sys.exit('Input file is not existing')
X_train, y_train, X_test, y_test = fLoadData(conten)
fRemove_entries(('X_train', 'X_test', 'y_train', 'y_test'), conten)
dData = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train, 'y_test': y_test}
dOut = dData.copy()
dOut.update(conten)
return dOut
def fLoadDataForOptim(sInPath):
if os.path.isfile(sInPath):
conten = sio.loadmat(sInPath)
X_train, y_train, X_test, y_test = fLoadData(conten)
return X_train, y_train, X_test, y_test, conten["patchSize"]
fRunCNN(dData, sModelIn, lTrain, sParaOptim, sOutPath, iBatchSize, iLearningRate, iEpochs, dlart_handle=None,
usingSegmentationMasks=False):
sModel = sModelIn
cnnModel = __import__(sModel, globals(), locals(), ['createModel', 'fTrain', 'fPredict', 'load_best_model'],
0)
if lTrain == RUN_CNN_TRAIN_TEST:
if sParaOptim == 'hyperas':
best_run, best_model = optim.minimize(model=cnnModel.fHyperasTrain,
data=fLoadDataForOptim(args.inPath[0]),
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, y_train, X_test, y_test, patchSize = fLoadDataForOptim(args.inPath[0])
score_test, acc_test = best_model.evaluate(X_test, y_test)
prob_test = best_model.predict(X_test, best_run['batch_size'], 0)
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_best'
weight_name = model_name + '_weights.h5'
model_json = model_name + '.json'
model_all = model_name + '_model.h5'
json_string = best_model.to_json()
open(model_json, 'w').write(json_string)
best_model.save_weights(weight_name)
best_model.save(model_all)
result = best_run['result']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': -best_run['loss'],
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test})
elif sParaOptim == 'grid':
cnnModel.fTrain(X_traind=dData['X_train'],
y_traind=dData['y_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
if not usingSegmentationMasks:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
Y_segMasks_train=dData['Y_segMasks_train'],
X_test=dData['X_test'],
y_test=dData['y_test'],
Y_segMasks_test=dData['Y_segMasks_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
elif lTrain == RUN_CNN_TRAIN_TEST_VALIDATION:
if sParaOptim == 'hyperas':
best_run, best_model = optim.minimize(model=cnnModel.fHyperasTrain,
data=fLoadDataForOptim(args.inPath[0]),
algo=tpe.suggest,
max_evals=5,
trials=Trials())
X_train, y_train, X_test, y_test, patchSize = fLoadDataForOptim(args.inPath[0])
score_test, acc_test = best_model.evaluate(X_test, y_test)
prob_test = best_model.predict(X_test, best_run['batch_size'], 0)
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sPath + '/' + sFilename + str(patchSize[0, 0]) + str(patchSize[0, 1]) + '_best'
weight_name = model_name + '_weights.h5'
model_json = model_name + '.json'
model_all = model_name + '_model.h5'
json_string = best_model.to_json()
open(model_json, 'w').write(json_string)
best_model.save_weights(weight_name)
best_model.save(model_all)
result = best_run['result']
loss = result.history['loss']
val_acc = result.history['val_acc']
val_loss = result.history['val_loss']
sio.savemat(model_name, {'model_settings': model_json,
'model': model_all,
'weights': weight_name,
'acc': -best_run['loss'],
'loss': loss,
'val_acc': val_acc,
'val_loss': val_loss,
'score_test': score_test,
'acc_test': acc_test,
'prob_test': prob_test})
elif sParaOptim == 'grid':
cnnModel.fTrain(X_traind=dData['X_train'],
y_traind=dData['y_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
if not usingSegmentationMasks:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
X_test=dData['X_test'],
y_test=dData['y_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
else:
cnnModel.fTrain(X_train=dData['X_train'],
y_train=dData['y_train'],
Y_segMasks_train=dData['Y_segMasks_train'],
X_valid=dData['X_valid'],
y_valid=dData['y_valid'],
Y_segMasks_valid=dData['Y_segMasks_validation'],
X_test=dData['X_test'],
y_test=dData['y_test'],
Y_segMasks_test=dData['Y_segMasks_test'],
sOutPath=sOutPath,
patchSize=dData['patchSize'],
batchSizes=iBatchSize,
learningRates=iLearningRate,
iEpochs=iEpochs,
dlart_handle=dlart_handle)
elif lTrain == RUN_CNN_PREDICT:
cnnModel.fPredict(dData['X_test'], dData['y_test'], dData['model_name'], sOutPath, dData['patchSize'],
iBatchSize[0])
_, sPath = os.path.splitdrive(sOutPath)
sPath, sFilename = os.path.split(sPath)
sFilename, sExt = os.path.splitext(sFilename)
model_name = sOutPath + os.sep + sFilename
model_all = model_name + '_model.h5'
try:
model = load_model(model_all)
except:
try:
def dice_coef(y_true, y_pred, epsilon=1e-5):
dice_numerator = 2.0 * K.sum(y_true * y_pred, axis=[1, 2, 3, 4])
dice_denominator = K.sum(K.square(y_true), axis=[1, 2, 3, 4]) + K.sum(K.square(y_pred),
axis=[1, 2, 3, 4])
dice_score = dice_numerator / (dice_denominator + epsilon)
return K.mean(dice_score, axis=0)
def dice_coef_loss(y_true, y_pred):
return 1 - dice_coef(y_true, y_pred)
model = load_model(model_all,
custom_objects={'dice_coef_loss': dice_coef_loss, 'dice_coef': dice_coef})
except:
model = {}
return model, model_all
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='''CNN artifact detection''',
epilog='''(c) Thomas Kuestner, thomas.kuestner@iss.uni-stuttgart.de''')
parser.add_argument('-i', '--inPath', nargs=1, type=str, help='input path to *.mat of stored patches',
default=CNN_PATH + os.sep + 'Datatmp/in.mat')
parser.add_argument('-o', '--outPath', nargs=1, type=str,
help='output path to the file used for storage (subfiles _model, _weights, ... are automatically generated)',
default=CNN_PATH + os.sep + 'Datatmp/out')
parser.add_argument('-m', '--model', nargs=1, type=str,
choices=['motion_head_CNN2D', 'motion_abd_CNN2D', 'motion_all_CNN2D', 'motion_CNN3D',
'motion_MNetArt', 'motion_VNetArt', 'multi_DenseResNet', 'multi_InceptionNet'],
help='select CNN model', default='motion_2DCNN_head')
parser.add_argument('-t', '--train', dest='train', action='store_true',
help='if set -> training | if not set -> prediction')
parser.add_argument('-p', '--paraOptim', dest='paraOptim', type=str, choices=['grid', 'hyperas', 'none'],
help='parameter optimization via grid search, hyper optimization or no optimization',
default='none')
parser.add_argument('-b', '--batchSize', nargs='*', dest='batchSize', type=int, help='batchSize', default=64)
parser.add_argument('-l', '--learningRates', nargs='*', dest='learningRate', type=int, help='learningRate',
default=0.0001)
parser.add_argument('-e', '--epochs', nargs=1, dest='epochs', type=int, help='epochs', default=300)
args = parser.parse_args()
if os.path.isfile(args.outPath[0]):
print('Warning! Output file is already existing and will be overwritten')
dData = fLoadMat(args.inPath[0])
if 'outPath' in dData:
sOutPath = dData['outPath']
else:
sOutPath = args.outPath[0]
fRunCNN(dData, args.model[0], args.train, args.paraOptim, sOutPath, args.batchSize, args.learningRate,
args.epochs[0])
| true | true |
1c3f2f8cea0a7ff37db06a263707542c416d7769 | 1,614 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/15_features/numtrees_30/rule_22.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/15_features/numtrees_30/rule_22.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/15_features/numtrees_30/rule_22.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Passanger, obj[1]: Time, obj[2]: Coupon, obj[3]: Coupon_validity, obj[4]: Gender, obj[5]: Age, obj[6]: Children, obj[7]: Education, obj[8]: Occupation, obj[9]: Income, obj[10]: Bar, obj[11]: Coffeehouse, obj[12]: Restaurant20to50, obj[13]: Direction_same, obj[14]: Distance
# {"feature": "Income", "instances": 34, "metric_value": 0.9975, "depth": 1}
if obj[9]<=6:
# {"feature": "Passanger", "instances": 28, "metric_value": 0.9852, "depth": 2}
if obj[0]>1:
# {"feature": "Gender", "instances": 14, "metric_value": 0.7496, "depth": 3}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
# {"feature": "Education", "instances": 7, "metric_value": 0.9852, "depth": 4}
if obj[7]>0:
# {"feature": "Age", "instances": 4, "metric_value": 0.8113, "depth": 5}
if obj[5]<=3:
return 'False'
elif obj[5]>3:
return 'True'
else: return 'True'
elif obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
# {"feature": "Children", "instances": 14, "metric_value": 0.9403, "depth": 3}
if obj[6]<=0:
# {"feature": "Occupation", "instances": 12, "metric_value": 0.8113, "depth": 4}
if obj[8]>4:
return 'False'
elif obj[8]<=4:
# {"feature": "Direction_same", "instances": 5, "metric_value": 0.971, "depth": 5}
if obj[13]<=0:
return 'True'
elif obj[13]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[9]>6:
return 'False'
else: return 'False'
| 37.534884 | 305 | 0.581784 | def findDecision(obj):
if obj[9]<=6:
if obj[0]>1:
if obj[4]>0:
return 'True'
elif obj[4]<=0:
if obj[7]>0:
if obj[5]<=3:
return 'False'
elif obj[5]>3:
return 'True'
else: return 'True'
elif obj[7]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[0]<=1:
if obj[6]<=0:
if obj[8]>4:
return 'False'
elif obj[8]<=4:
if obj[13]<=0:
return 'True'
elif obj[13]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[6]>0:
return 'True'
else: return 'True'
else: return 'False'
elif obj[9]>6:
return 'False'
else: return 'False'
| true | true |
1c3f3071b3c658bb957cf41a58de0fac4bd47b97 | 2,081 | py | Python | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | woslinux/gaia | eb6766d52c64a906101e548550cf09c23dad15e8 | [
"Apache-2.0"
] | 1 | 2019-04-26T21:30:24.000Z | 2019-04-26T21:30:24.000Z | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | woslinux/gaia | eb6766d52c64a906101e548550cf09c23dad15e8 | [
"Apache-2.0"
] | null | null | null | tests/python/gaia-ui-tests/gaiatest/tests/functional/ftu/test_ftu_with_tour.py | woslinux/gaia | eb6766d52c64a906101e548550cf09c23dad15e8 | [
"Apache-2.0"
] | 1 | 2021-09-03T10:18:22.000Z | 2021-09-03T10:18:22.000Z | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
from gaiatest.apps.ftu.app import Ftu
from gaiatest.apps.homescreen.app import Homescreen
class TestFtu(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.ftu = Ftu(self.marionette)
self.ftu.launch()
def test_ftu_with_tour(self):
"""
https://moztrap.mozilla.org/manage/case/6119/
"""
# Go through the FTU setup as quickly as possible to get to the Tour section
self.ftu.run_ftu_setup_with_default_values()
# Take the tour
self.ftu.tap_take_tour()
# Walk through the tour
self.assertEqual(self.ftu.step1_header_text, "Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step2_header_text, "Swipe down to access recent notifications, usage information and settings.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step3_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step4_header_text, "Tap on the search box anytime to start a search or go to a website.")
# Try going back a step
self.ftu.tap_back()
self.assertEqual(self.ftu.step3_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step4_header_text, "Tap on the search box anytime to start a search or go to a website.")
self.ftu.tap_tour_next()
self.ftu.wait_for_finish_tutorial_section()
self.ftu.tap_lets_go_button()
# Switch back to top level now that FTU app is gone
self.wait_for_condition(lambda m: self.apps.displayed_app.name == Homescreen.name)
| 43.354167 | 158 | 0.697261 |
from gaiatest import GaiaTestCase
from gaiatest.apps.ftu.app import Ftu
from gaiatest.apps.homescreen.app import Homescreen
class TestFtu(GaiaTestCase):
def setUp(self):
GaiaTestCase.setUp(self)
self.ftu = Ftu(self.marionette)
self.ftu.launch()
def test_ftu_with_tour(self):
self.ftu.run_ftu_setup_with_default_values()
self.ftu.tap_take_tour()
self.assertEqual(self.ftu.step1_header_text, "Swipe up and down to browse your apps and bookmarks. Tap and hold an icon to delete, move, or edit it.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step2_header_text, "Swipe down to access recent notifications, usage information and settings.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step3_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step4_header_text, "Tap on the search box anytime to start a search or go to a website.")
self.ftu.tap_back()
self.assertEqual(self.ftu.step3_header_text, "Drag from the left edge of your screen to return to recently used apps.")
self.ftu.tap_tour_next()
self.assertEqual(self.ftu.step4_header_text, "Tap on the search box anytime to start a search or go to a website.")
self.ftu.tap_tour_next()
self.ftu.wait_for_finish_tutorial_section()
self.ftu.tap_lets_go_button()
self.wait_for_condition(lambda m: self.apps.displayed_app.name == Homescreen.name)
| true | true |
1c3f30fcc910ed268fb074d46de85f5729d9d8ae | 859 | py | Python | utils/vocab_utils.py | sciforce/phones-las | f95523fbbdf1dd7f1acce5b25c37b620f3eb8e9b | [
"Apache-2.0"
] | 35 | 2019-07-04T10:13:29.000Z | 2022-02-22T03:41:39.000Z | utils/vocab_utils.py | sciforce/phones-las | f95523fbbdf1dd7f1acce5b25c37b620f3eb8e9b | [
"Apache-2.0"
] | 7 | 2019-11-04T15:34:03.000Z | 2020-06-21T04:30:22.000Z | utils/vocab_utils.py | sciforce/phones-las | f95523fbbdf1dd7f1acce5b25c37b620f3eb8e9b | [
"Apache-2.0"
] | 5 | 2019-07-15T20:09:46.000Z | 2021-08-05T09:55:29.000Z | import tensorflow as tf
import pickle
__all__ = [
'create_vocab_table',
'load_vocab',
'UNK',
'SOS',
'EOS',
'UNK_ID',
'SOS_ID',
'EOS_ID',
]
UNK = '<unk>'
SOS = '<s>'
EOS = '</s>'
UNK_ID = 0
SOS_ID = 1
EOS_ID = 2
def load_vocab(filename):
if not '.pickle' in filename:
with tf.io.gfile.GFile(filename, 'r') as f:
vocab_list = [vocab.strip('\r\n') for vocab in f]
vocab_list = [UNK, SOS, EOS] + vocab_list
else:
with tf.io.gfile.GFile(filename, 'rb') as f:
vocab_list = pickle.load(f)
vocab_list = [UNK, SOS, EOS] + vocab_list
return vocab_list
def create_vocab_table(filename):
vocab_list = load_vocab(filename)
return tf.contrib.lookup.index_table_from_tensor(
tf.constant(vocab_list), num_oov_buckets=0, default_value=UNK_ID)
| 20.452381 | 73 | 0.605355 | import tensorflow as tf
import pickle
__all__ = [
'create_vocab_table',
'load_vocab',
'UNK',
'SOS',
'EOS',
'UNK_ID',
'SOS_ID',
'EOS_ID',
]
UNK = '<unk>'
SOS = '<s>'
EOS = '</s>'
UNK_ID = 0
SOS_ID = 1
EOS_ID = 2
def load_vocab(filename):
if not '.pickle' in filename:
with tf.io.gfile.GFile(filename, 'r') as f:
vocab_list = [vocab.strip('\r\n') for vocab in f]
vocab_list = [UNK, SOS, EOS] + vocab_list
else:
with tf.io.gfile.GFile(filename, 'rb') as f:
vocab_list = pickle.load(f)
vocab_list = [UNK, SOS, EOS] + vocab_list
return vocab_list
def create_vocab_table(filename):
vocab_list = load_vocab(filename)
return tf.contrib.lookup.index_table_from_tensor(
tf.constant(vocab_list), num_oov_buckets=0, default_value=UNK_ID)
| true | true |
1c3f334e33497dd3183a46e03be879ae0cc7ebf6 | 5,664 | py | Python | tf_slim/nets/overfeat.py | adrianc-a/tf-slim | 4d4496e5ad26747f0d9f7b8af754ed73d56cede5 | [
"Apache-2.0"
] | 4 | 2019-11-07T09:20:52.000Z | 2022-01-04T22:38:22.000Z | tf_slim/nets/overfeat.py | adrianc-a/tf-slim | 4d4496e5ad26747f0d9f7b8af754ed73d56cede5 | [
"Apache-2.0"
] | 1 | 2019-12-02T10:10:58.000Z | 2019-12-02T10:10:58.000Z | tf_slim/nets/overfeat.py | adrianc-a/tf-slim | 4d4496e5ad26747f0d9f7b8af754ed73d56cede5 | [
"Apache-2.0"
] | 6 | 2019-11-27T19:25:58.000Z | 2022-01-26T07:54:22.000Z | # coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Usage:
with slim.arg_scope(overfeat.overfeat_arg_scope()):
outputs, end_points = overfeat.overfeat(inputs)
@@overfeat
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
# pylint:enable=g-direct-tensorflow-import
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
| 42.268657 | 80 | 0.707627 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib import layers
from tensorflow.contrib.framework.python.ops import arg_scope
from tensorflow.contrib.layers.python.layers import layers as layers_lib
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.contrib.layers.python.layers import utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import variable_scope
trunc_normal = lambda stddev: init_ops.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with arg_scope(
[layers.conv2d, layers_lib.fully_connected],
activation_fn=nn_ops.relu,
weights_regularizer=regularizers.l2_regularizer(weight_decay),
biases_initializer=init_ops.zeros_initializer()):
with arg_scope([layers.conv2d], padding='SAME'):
with arg_scope([layers_lib.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
with variable_scope.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
with arg_scope(
[layers.conv2d, layers_lib.fully_connected, layers_lib.max_pool2d],
outputs_collections=end_points_collection):
net = layers.conv2d(
inputs, 64, [11, 11], 4, padding='VALID', scope='conv1')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool1')
net = layers.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool2')
net = layers.conv2d(net, 512, [3, 3], scope='conv3')
net = layers.conv2d(net, 1024, [3, 3], scope='conv4')
net = layers.conv2d(net, 1024, [3, 3], scope='conv5')
net = layers_lib.max_pool2d(net, [2, 2], scope='pool5')
with arg_scope(
[layers.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=init_ops.constant_initializer(0.1)):
net = layers.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout6')
net = layers.conv2d(net, 4096, [1, 1], scope='fc7')
net = layers_lib.dropout(
net, dropout_keep_prob, is_training=is_training, scope='dropout7')
net = layers.conv2d(
net,
num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=init_ops.zeros_initializer(),
scope='fc8')
end_points = utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = array_ops.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
| true | true |
1c3f3429032667dca3939a237e76121898898372 | 13,890 | py | Python | David and Pooja/++Validating Linked Mods/Python-3.0/Lib/binhex.py | LinkedModernismProject/web_code | 4cf6bf53d5c3249e52a75f0a3f57d106e31daf9e | [
"Apache-2.0"
] | 1 | 2015-05-21T23:47:54.000Z | 2015-05-21T23:47:54.000Z | front-end/testsuite-python-lib/Python-3.1/Lib/binhex.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2015-10-29T20:51:31.000Z | 2015-10-29T20:51:31.000Z | front-end/testsuite-python-lib/Python-3.1/Lib/binhex.py | MalloyPower/parsing-python | b2bca5eed07ea2af7a2001cd4f63becdfb0570be | [
"MIT"
] | 1 | 2019-04-11T11:27:01.000Z | 2019-04-11T11:27:01.000Z | """Macintosh binhex compression/decompression.
easy interface:
binhex(inputfilename, outputfilename)
hexbin(inputfilename, outputfilename)
"""
#
# Jack Jansen, CWI, August 1995.
#
# The module is supposed to be as compatible as possible. Especially the
# easy interface should work "as expected" on any platform.
# XXXX Note: currently, textfiles appear in mac-form on all platforms.
# We seem to lack a simple character-translate in python.
# (we should probably use ISO-Latin-1 on all but the mac platform).
# XXXX The simple routines are too simple: they expect to hold the complete
# files in-core. Should be fixed.
# XXXX It would be nice to handle AppleDouble format on unix
# (for servers serving macs).
# XXXX I don't understand what happens when you get 0x90 times the same byte on
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
#
import io
import os
import sys
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
# States (what have we written)
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
# Various constants
REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder
LINELEN = 64
RUNCHAR = b"\x90"
#
# This code is no longer byte-order dependent
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
fp = io.open(name, 'rb')
# Quick check for textfile
data = fp.read(512)
if 0 not in data:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return b''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
"""Write data to the coder in 3-byte chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
self.hqxdata = b''
self.linelen = LINELEN - 1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen // 3) * 3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last] + b'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + b':\n')
def close(self):
if self.data:
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
"""Write data to the RLE-coder in suitably large chunks"""
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if isinstance(ofp, str):
ofname = ofp
ofp = io.open(ofname, 'wb')
if os.name == 'mac':
fss = FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error('Filename too long')
d = bytes([nl]) + name.encode("latin-1") + b'\0'
tp, cr = finfo.Type, finfo.Creator
if isinstance(tp, str):
tp = tp.encode("latin-1")
if isinstance(cr, str):
cr = cr.encode("latin-1")
d2 = tp + cr
# Force all structs to be packed with big-endian
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
# XXXX Should this be here??
# self.crc = binascii.crc_hqx('\0\0', self.crc)
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error('Writing data at the wrong time')
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Writing resource data at the wrong time')
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Close at the wrong time')
if self.rlen != 0:
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
"""binhex(infilename, outfilename): create binhex-encoded copy of a file"""
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = io.open(inp, 'rb')
# XXXX Do textfile translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
"""Read data via the decoder in 4-byte chunks"""
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
"""Read at least wtd bytes (or until EOF)"""
decdata = b''
wtd = totalwtd
#
# The loop here is convoluted, since we don't really now how
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd + 2) // 3) * 4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while True:
try:
decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error('Premature EOF on binhex file')
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error('Premature EOF on binhex file')
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
"""Read data via the RLE-coder"""
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = b''
self.post_buffer = b''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd - len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
#
# Obfuscated code ahead. We have to take care that we don't
# end up with an orphaned RUNCHAR later on. So, we keep a couple
# of bytes in the buffer, depending on what the end of
# the buffer looks like:
# '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0)
# '?\220' - Keep 2 bytes: repeated something-else
# '\220\0' - Escaped \220: Keep 2 bytes.
# '?\220?' - Complete repeat sequence: decode all
# otherwise: keep 1 byte.
#
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass # Decode all
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if isinstance(ifp, str):
ifp = io.open(ifp, 'rb')
#
# Find initial colon.
#
while True:
ch = ifp.read(1)
if not ch:
raise Error("No binhex data found")
# Cater for \r\n terminated lines (which show up as \n\r, hence
# all lines start with \r)
if ch == b'\r':
continue
if ch == b':':
break
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
#self.crc = binascii.crc_hqx('\0\0', self.crc)
# XXXX Is this needed??
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error('CRC error, computed %x, read %x'
% (self.crc, filecrc))
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error('Read data at wrong time')
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = b''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error('close_data at wrong time')
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error('Read resource data at wrong time')
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
"""hexbin(infilename, outfilename) - Decode binhexed file"""
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = FSSpec(out)
out = ofss.as_pathname()
ofp = io.open(out, 'wb')
# XXXX Do translation on non-mac systems
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while True:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
| 28.9375 | 79 | 0.54838 |
# input. The resulting code (xx 90 90) would appear to be interpreted as an
# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety...
import io
import os
import sys
import struct
import binascii
__all__ = ["binhex","hexbin","Error"]
class Error(Exception):
pass
[_DID_HEADER, _DID_DATA, _DID_RSRC] = range(3)
REASONABLY_LARGE = 32768
LINELEN = 64
RUNCHAR = b"\x90"
class FInfo:
def __init__(self):
self.Type = '????'
self.Creator = '????'
self.Flags = 0
def getfileinfo(name):
finfo = FInfo()
fp = io.open(name, 'rb')
data = fp.read(512)
if 0 not in data:
finfo.Type = 'TEXT'
fp.seek(0, 2)
dsize = fp.tell()
fp.close()
dir, file = os.path.split(name)
file = file.replace(':', '-', 1)
return file, finfo, dsize, 0
class openrsrc:
def __init__(self, *args):
pass
def read(self, *args):
return b''
def write(self, *args):
pass
def close(self):
pass
class _Hqxcoderengine:
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
self.hqxdata = b''
self.linelen = LINELEN - 1
def write(self, data):
self.data = self.data + data
datalen = len(self.data)
todo = (datalen // 3) * 3
data = self.data[:todo]
self.data = self.data[todo:]
if not data:
return
self.hqxdata = self.hqxdata + binascii.b2a_hqx(data)
self._flush(0)
def _flush(self, force):
first = 0
while first <= len(self.hqxdata) - self.linelen:
last = first + self.linelen
self.ofp.write(self.hqxdata[first:last] + b'\n')
self.linelen = LINELEN
first = last
self.hqxdata = self.hqxdata[first:]
if force:
self.ofp.write(self.hqxdata + b':\n')
def close(self):
if self.data:
self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data)
self._flush(1)
self.ofp.close()
del self.ofp
class _Rlecoderengine:
def __init__(self, ofp):
self.ofp = ofp
self.data = b''
def write(self, data):
self.data = self.data + data
if len(self.data) < REASONABLY_LARGE:
return
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.data = b''
def close(self):
if self.data:
rledata = binascii.rlecode_hqx(self.data)
self.ofp.write(rledata)
self.ofp.close()
del self.ofp
class BinHex:
def __init__(self, name_finfo_dlen_rlen, ofp):
name, finfo, dlen, rlen = name_finfo_dlen_rlen
if isinstance(ofp, str):
ofname = ofp
ofp = io.open(ofname, 'wb')
if os.name == 'mac':
fss = FSSpec(ofname)
fss.SetCreatorType('BnHq', 'TEXT')
ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:')
hqxer = _Hqxcoderengine(ofp)
self.ofp = _Rlecoderengine(hqxer)
self.crc = 0
if finfo is None:
finfo = FInfo()
self.dlen = dlen
self.rlen = rlen
self._writeinfo(name, finfo)
self.state = _DID_HEADER
def _writeinfo(self, name, finfo):
nl = len(name)
if nl > 63:
raise Error('Filename too long')
d = bytes([nl]) + name.encode("latin-1") + b'\0'
tp, cr = finfo.Type, finfo.Creator
if isinstance(tp, str):
tp = tp.encode("latin-1")
if isinstance(cr, str):
cr = cr.encode("latin-1")
d2 = tp + cr
d3 = struct.pack('>h', finfo.Flags)
d4 = struct.pack('>ii', self.dlen, self.rlen)
info = d + d2 + d3 + d4
self._write(info)
self._writecrc()
def _write(self, data):
self.crc = binascii.crc_hqx(data, self.crc)
self.ofp.write(data)
def _writecrc(self):
if self.crc < 0:
fmt = '>h'
else:
fmt = '>H'
self.ofp.write(struct.pack(fmt, self.crc))
self.crc = 0
def write(self, data):
if self.state != _DID_HEADER:
raise Error('Writing data at the wrong time')
self.dlen = self.dlen - len(data)
self._write(data)
def close_data(self):
if self.dlen != 0:
raise Error('Incorrect data size, diff=%r' % (self.rlen,))
self._writecrc()
self.state = _DID_DATA
def write_rsrc(self, data):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Writing resource data at the wrong time')
self.rlen = self.rlen - len(data)
self._write(data)
def close(self):
if self.state < _DID_DATA:
self.close_data()
if self.state != _DID_DATA:
raise Error('Close at the wrong time')
if self.rlen != 0:
raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,))
self._writecrc()
self.ofp.close()
self.state = None
del self.ofp
def binhex(inp, out):
finfo = getfileinfo(inp)
ofp = BinHex(finfo, out)
ifp = io.open(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close_data()
ifp.close()
ifp = openrsrc(inp, 'rb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write_rsrc(d)
ofp.close()
ifp.close()
class _Hqxdecoderengine:
def __init__(self, ifp):
self.ifp = ifp
self.eof = 0
def read(self, totalwtd):
decdata = b''
wtd = totalwtd
# much to decode: there may be newlines in the incoming data.
while wtd > 0:
if self.eof: return decdata
wtd = ((wtd + 2) // 3) * 4
data = self.ifp.read(wtd)
#
# Next problem: there may not be a complete number of
# bytes in what we pass to a2b. Solve by yet another
# loop.
#
while True:
try:
decdatacur, self.eof = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if not newdata:
raise Error('Premature EOF on binhex file')
data = data + newdata
decdata = decdata + decdatacur
wtd = totalwtd - len(decdata)
if not decdata and not self.eof:
raise Error('Premature EOF on binhex file')
return decdata
def close(self):
self.ifp.close()
class _Rledecoderengine:
def __init__(self, ifp):
self.ifp = ifp
self.pre_buffer = b''
self.post_buffer = b''
self.eof = 0
def read(self, wtd):
if wtd > len(self.post_buffer):
self._fill(wtd - len(self.post_buffer))
rv = self.post_buffer[:wtd]
self.post_buffer = self.post_buffer[wtd:]
return rv
def _fill(self, wtd):
self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4)
if self.ifp.eof:
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer)
self.pre_buffer = b''
return
#
# Obfuscated code ahead. We have to take care that we don't
mark = len(self.pre_buffer)
if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR:
mark = mark - 3
elif self.pre_buffer[-1] == RUNCHAR:
mark = mark - 2
elif self.pre_buffer[-2:] == RUNCHAR + b'\0':
mark = mark - 2
elif self.pre_buffer[-2] == RUNCHAR:
pass
else:
mark = mark - 1
self.post_buffer = self.post_buffer + \
binascii.rledecode_hqx(self.pre_buffer[:mark])
self.pre_buffer = self.pre_buffer[mark:]
def close(self):
self.ifp.close()
class HexBin:
def __init__(self, ifp):
if isinstance(ifp, str):
ifp = io.open(ifp, 'rb')
while True:
ch = ifp.read(1)
if not ch:
raise Error("No binhex data found")
if ch == b'\r':
continue
if ch == b':':
break
hqxifp = _Hqxdecoderengine(ifp)
self.ifp = _Rledecoderengine(hqxifp)
self.crc = 0
self._readheader()
def _read(self, len):
data = self.ifp.read(len)
self.crc = binascii.crc_hqx(data, self.crc)
return data
def _checkcrc(self):
filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff
self.crc = self.crc & 0xffff
if filecrc != self.crc:
raise Error('CRC error, computed %x, read %x'
% (self.crc, filecrc))
self.crc = 0
def _readheader(self):
len = self._read(1)
fname = self._read(ord(len))
rest = self._read(1 + 4 + 4 + 2 + 4 + 4)
self._checkcrc()
type = rest[1:5]
creator = rest[5:9]
flags = struct.unpack('>h', rest[9:11])[0]
self.dlen = struct.unpack('>l', rest[11:15])[0]
self.rlen = struct.unpack('>l', rest[15:19])[0]
self.FName = fname
self.FInfo = FInfo()
self.FInfo.Creator = creator
self.FInfo.Type = type
self.FInfo.Flags = flags
self.state = _DID_HEADER
def read(self, *n):
if self.state != _DID_HEADER:
raise Error('Read data at wrong time')
if n:
n = n[0]
n = min(n, self.dlen)
else:
n = self.dlen
rv = b''
while len(rv) < n:
rv = rv + self._read(n-len(rv))
self.dlen = self.dlen - n
return rv
def close_data(self):
if self.state != _DID_HEADER:
raise Error('close_data at wrong time')
if self.dlen:
dummy = self._read(self.dlen)
self._checkcrc()
self.state = _DID_DATA
def read_rsrc(self, *n):
if self.state == _DID_HEADER:
self.close_data()
if self.state != _DID_DATA:
raise Error('Read resource data at wrong time')
if n:
n = n[0]
n = min(n, self.rlen)
else:
n = self.rlen
self.rlen = self.rlen - n
return self._read(n)
def close(self):
if self.rlen:
dummy = self.read_rsrc(self.rlen)
self._checkcrc()
self.state = _DID_RSRC
self.ifp.close()
def hexbin(inp, out):
ifp = HexBin(inp)
finfo = ifp.FInfo
if not out:
out = ifp.FName
if os.name == 'mac':
ofss = FSSpec(out)
out = ofss.as_pathname()
ofp = io.open(out, 'wb')
while True:
d = ifp.read(128000)
if not d: break
ofp.write(d)
ofp.close()
ifp.close_data()
d = ifp.read_rsrc(128000)
if d:
ofp = openrsrc(out, 'wb')
ofp.write(d)
while True:
d = ifp.read_rsrc(128000)
if not d: break
ofp.write(d)
ofp.close()
if os.name == 'mac':
nfinfo = ofss.GetFInfo()
nfinfo.Creator = finfo.Creator
nfinfo.Type = finfo.Type
nfinfo.Flags = finfo.Flags
ofss.SetFInfo(nfinfo)
ifp.close()
| true | true |
1c3f34b102d2f0b88d8546b3a531125fc9dd1649 | 485 | py | Python | src/schemas/service_schema.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | src/schemas/service_schema.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | src/schemas/service_schema.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | """Service schema module"""
from marshmallow import fields, validate, post_load
from src.schemas import BaseSchema
class ServiceSchema(BaseSchema):
"""Schema class"""
name = fields.String(required=True,
validate=[validate.Length(min=3, max=100)])
@post_load
def append_service_to_name(self, data, **kwargs):
"""Append service to the service name"""
data['name'] = f'{data.get("name").upper()}_SERVICE'
return data
| 25.526316 | 68 | 0.643299 |
from marshmallow import fields, validate, post_load
from src.schemas import BaseSchema
class ServiceSchema(BaseSchema):
name = fields.String(required=True,
validate=[validate.Length(min=3, max=100)])
@post_load
def append_service_to_name(self, data, **kwargs):
data['name'] = f'{data.get("name").upper()}_SERVICE'
return data
| true | true |
1c3f34f9e49804db348754d24fea1e5a877cf275 | 315 | py | Python | venafi_vcert_gitlab_integration/version_command.py | fullstaq-labs/venafi-vcert-gitlab-integration | bb4549e1d83a4afe177665f04ca778e7c4f59d75 | [
"Apache-2.0"
] | null | null | null | venafi_vcert_gitlab_integration/version_command.py | fullstaq-labs/venafi-vcert-gitlab-integration | bb4549e1d83a4afe177665f04ca778e7c4f59d75 | [
"Apache-2.0"
] | 3 | 2021-06-07T08:08:07.000Z | 2021-08-02T09:25:56.000Z | venafi_vcert_gitlab_integration/version_command.py | fullstaq-labs/venafi-vcert-gitlab-integration | bb4549e1d83a4afe177665f04ca778e7c4f59d75 | [
"Apache-2.0"
] | null | null | null | import os
module_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def read_product_version():
with open(os.path.join(module_dir, 'version.txt'), 'r', encoding='UTF-8') as f:
return f.read().strip()
def main():
print(read_product_version())
if __name__ == '__main__':
main()
| 18.529412 | 83 | 0.663492 | import os
module_dir = os.path.abspath(os.path.join(os.path.dirname(__file__)))
def read_product_version():
with open(os.path.join(module_dir, 'version.txt'), 'r', encoding='UTF-8') as f:
return f.read().strip()
def main():
print(read_product_version())
if __name__ == '__main__':
main()
| true | true |
1c3f35e8ed1199cf7a5df4b9f29f45e0ee1e147e | 852 | py | Python | CoinCounter.py | KRHS-GameProgramming-2018/The-Adventures-of-Spaceman | 030ce5006344ffab595309949ad5eef42d6f4aa9 | [
"BSD-2-Clause"
] | null | null | null | CoinCounter.py | KRHS-GameProgramming-2018/The-Adventures-of-Spaceman | 030ce5006344ffab595309949ad5eef42d6f4aa9 | [
"BSD-2-Clause"
] | 11 | 2019-01-28T13:09:29.000Z | 2019-03-12T12:19:38.000Z | CoinCounter.py | KRHS-GameProgramming-2018/The-Adventures-of-Spaceman | 030ce5006344ffab595309949ad5eef42d6f4aa9 | [
"BSD-2-Clause"
] | null | null | null | import pygame, sys, math
#HealthBar and Power Ups
class CoinCounter(pygame.sprite.Sprite):
def __init__(self, coins=0, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
self.coin = coins
self.font = pygame.font.Font("8-Bit Madness.ttf", 48)
# ~ self.shellImage = pygame.image.load("PNG/Power-ups/c0.png")
# ~ self.shellRect = self.image.get_rect(center = [980, 150])
self.image = self.font.render(str(self.coin), True, (255,255,255))
self.rect = self.image.get_rect(center = pos)
def update(*args):
self = args[0]
coins = args[5]
self.coin = coins
self.image = self.font.render(str(self.coin), True, (255,255,255))
self.rect = self.image.get_rect(center = self.rect.center)
| 31.555556 | 74 | 0.585681 | import pygame, sys, math
class CoinCounter(pygame.sprite.Sprite):
def __init__(self, coins=0, pos = [0,0]):
pygame.sprite.Sprite.__init__(self, self.containers)
self.coin = coins
self.font = pygame.font.Font("8-Bit Madness.ttf", 48)
self.image = self.font.render(str(self.coin), True, (255,255,255))
self.rect = self.image.get_rect(center = pos)
def update(*args):
self = args[0]
coins = args[5]
self.coin = coins
self.image = self.font.render(str(self.coin), True, (255,255,255))
self.rect = self.image.get_rect(center = self.rect.center)
| true | true |
1c3f37b0d6abe80e9abf5047bd200068d3915b3b | 280 | py | Python | camper/pages/templatetags/pages_tags.py | drinks/camper | 82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3 | [
"BSD-3-Clause"
] | null | null | null | camper/pages/templatetags/pages_tags.py | drinks/camper | 82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3 | [
"BSD-3-Clause"
] | null | null | null | camper/pages/templatetags/pages_tags.py | drinks/camper | 82d9f1342886d91bf6787c1bcdb1a7cb62e53ca3 | [
"BSD-3-Clause"
] | null | null | null | from django import template
from camper.pages.models import Chunk
register = template.Library()
@register.simple_tag(name='chunk')
def do_chunk(slug):
try:
c = Chunk.objects.get(slug=slug)
return c.content
except Chunk.DoesNotExist:
return ''
| 17.5 | 40 | 0.682143 | from django import template
from camper.pages.models import Chunk
register = template.Library()
@register.simple_tag(name='chunk')
def do_chunk(slug):
try:
c = Chunk.objects.get(slug=slug)
return c.content
except Chunk.DoesNotExist:
return ''
| true | true |
1c3f38cdc80a2becaf809b2b5084b77fa89579c6 | 682 | py | Python | web/api/app.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 13 | 2018-12-17T23:30:54.000Z | 2021-12-29T14:31:43.000Z | web/api/app.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 36 | 2018-06-07T21:34:13.000Z | 2022-03-13T21:01:43.000Z | web/api/app.py | kosyachniy/dev | 39bb5c5ee10780bfcd8a59cf59cfb1a348ac52a4 | [
"Apache-2.0"
] | 2 | 2021-01-03T11:47:20.000Z | 2021-12-29T14:31:49.000Z | from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
class Input(BaseModel):
method: str
params: dict = {}
locale: str = 'en'
token: str = None
app = FastAPI(title='Web app API')
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post('/')
async def api(data: Input, request: Request):
print(data, request.client.host, request.client.port)
return {'error': 0, 'result': {'data': 'result'}}
if __name__ == '__main__':
import uvicorn
uvicorn.run('app:app', host='0.0.0.0', port=5000, reload=True) | 21.3125 | 66 | 0.680352 | from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from pydantic import BaseModel
class Input(BaseModel):
method: str
params: dict = {}
locale: str = 'en'
token: str = None
app = FastAPI(title='Web app API')
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.post('/')
async def api(data: Input, request: Request):
print(data, request.client.host, request.client.port)
return {'error': 0, 'result': {'data': 'result'}}
if __name__ == '__main__':
import uvicorn
uvicorn.run('app:app', host='0.0.0.0', port=5000, reload=True) | true | true |
1c3f39799557f9a44493355c1da9e2d7a9f96e9d | 50,056 | py | Python | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/interface/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/interface/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | null | null | null | pkgs/sdk-pkg/src/genie/libs/sdk/apis/iosxe/interface/configure.py | jbronikowski/genielibs | 200a34e5fe4838a27b5a80d5973651b2e34ccafb | [
"Apache-2.0"
] | null | null | null | """Common configure functions for interface"""
# Python
import logging
# Unicon
from unicon.core.errors import SubCommandFailure
# Steps
from pyats.aetest.steps import Steps
# Genie
from genie.conf.base import Interface
from genie.libs.conf.base import IPv4Address, IPv6Address
from genie.libs.conf.interface import IPv4Addr, IPv6Addr
from genie.harness.utils import connect_device
# Interface
from genie.libs.sdk.apis.iosxe.interface.get import (
get_interface_running_config,
)
from genie.libs.sdk.apis.iosxe.interface.get import (
get_interface_connected_adjacent_router_interfaces,
)
# utils
from genie.libs.sdk.apis.utils import mask_to_int
log = logging.getLogger(__name__)
def reset_interface(device, interface):
""" Reset interface configuration
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info("Defaulting interface {interface}".format(interface=interface))
try:
device.configure(
"default interface {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not default {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def clear_interface_counters(device, interface):
""" Clear interface counters
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Clearing counters on interface {interface}".format(
interface=interface
)
)
try:
device.execute(
"clear counters {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear counters on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def config_interface_negotiation(device, interface):
""" Config negotiation auto on interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Configuring negotiation auto on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"negotiation auto",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to config negotiation auto on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def remove_interface_negotiation(device, interface):
""" Remove negotiation auto on interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Removing negotiation auto on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no negotiation auto",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfig negotiation auto on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def shut_interface(device, interface):
""" Shut interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
if not device.is_connected():
connect_device(device=device)
try:
device.configure(
["interface {interface}".format(interface=interface), "shutdown"]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not shut interface {intf} on device {dev}. Error:\n{error}".format(
intf=interface, dev=device.name, error=e
)
)
def unshut_interface(device, interface):
""" Unshut interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
if not device.is_connected():
connect_device(device=device)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no shutdown",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unshut interface {interface} on device {dev}. Error:\n{error}".format(
interface=interface, dev=device.name, error=e
)
)
def shut_interface_adjacent_interfaces(
device, link_name, adjacent_interfaces=None, steps=Steps(), num=1
):
""" Shut adjacent interfaces
Args:
device ('obj'): Device object
link_name ('str'): Interface alias in topology
adjacent_interfaces ('list'): List of EthernetInterface objects
steps ('obj'): Context manager object
num ('int'): Number of interfaces to return
Returns:
None
Raises:
SubCommandFailure
"""
if adjacent_interfaces is None:
adjacent_interfaces = get_interface_connected_adjacent_router_interfaces(
device=device, link_name=link_name, num=num
)
for interface in adjacent_interfaces:
adjacent_device = interface.device
interface_name = interface.name
with steps.start(
"Shut adjacent interface {interface} on "
"device {device}".format(
interface=interface_name, device=adjacent_device.name
),
continue_=True,
) as step:
shut_interface(device=adjacent_device, interface=interface_name)
def unshut_interface_adjacent_interfaces(
device, link_name, adjacent_interfaces=None, steps=Steps(), num=1
):
""" Unshut adjacent interfaces
Args:
device ('obj'): Device object
link_name ('str'): Interface alias in topology
num ('int'): Number of interfaces to return
adjacent_interfaces ('list'): List of EthernetInterface objects
steps ('obj'): Context manager object
Returns:
None
Raises:
SubCommandFailure
"""
if adjacent_interfaces is None:
adjacent_interfaces = get_interface_connected_adjacent_router_interfaces(
device=device, link_name=link_name, num=num
)
for interface in adjacent_interfaces:
adjacent_device = interface.device
interface_name = interface.name
with steps.start(
"No shut adjacent interface {interface} on "
"device {device}".format(
interface=interface_name, device=adjacent_device.name
),
continue_=True,
) as step:
unshut_interface(device=adjacent_device, interface=interface_name)
def config_interface_carrier_delay(device, interface, delay, delay_type):
""" Configure interface carrier delay on device
Args:
device (`obj`): Device object
interface (`str`): Interface name
delay (`int`): Delay time in second
delay_type (`str`): Delay type
Returns:
None
Raises:
SubCommandFailure
"""
delay_types = ["up", "down"]
if delay_type not in delay_types:
raise Exception(
"'{type}' not a supported type; only support '{types}'".format(
type=delay_type, types=delay_types
)
)
try:
device.configure(
"interface {interface}\n"
"carrier-delay {delay_type} {delay}".format(
interface=interface, delay_type=delay_type, delay=delay
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure carrier delay. Error:\n{error}".format(
error=e
)
)
def remove_interface_carrier_delay(device, interface):
""" Remove interface carrier delay on device
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"interface {interface}\n"
"no carrier-delay up\n"
"no carrier-delay down".format(interface=interface))
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to remove carrier delay on {interface}. "
"Error:\n{e}".format(interface=interface, e=e)) from e
def remove_interface_ospf_bfd(device, interface):
""" Remove interface ospf bfd on device
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"interface {interface}\n"
"no ip ospf bfd".format(interface=interface))
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to remove ospf bfd on {interface}. "
"Error:\n{e}".format(interface=interface, e=e)) from e
def config_interface_mtu(device, interface, mtu_bytes):
""" Config MTU on interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
mtu_bytes (`int`): MTU bytes
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Configuring MTU {mtu_bytes} on interface {interface}".format(
mtu_bytes=mtu_bytes, interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"mtu {mtu_bytes}".format(mtu_bytes=mtu_bytes),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure MTU on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def unconfig_interface_mtu(device, interface):
""" Remove MTU config from interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Removing MTU config on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
["interface {interface}".format(interface=interface), "no mtu"]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unconfigure MTU on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def config_interface_ospf(device, interface, ospf_pid, area):
""" Config OSPF on interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
ospf_pid (`str`): Ospf process id
area ('int'): Ospf area code
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Configuring OSPF on interface {interface}".format(interface=interface)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"ip ospf {pid} area {area}".format(pid=ospf_pid, area=area),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure ospf. Error:\n{error}".format(error=e)
)
def config_ip_on_interface(
device,
interface,
ip_address,
mask,
ipv6_address=None,
eth_encap_type=None,
eth_encap_val=None,
sub_interface=None,
):
""" Configure IP on an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to get address
ip_address (`str`): IP addressed to be configured on interface
mask (`str`): Mask address to be used in configuration
ipv6_address (`str`): IPv6 address with subnet mask
eth_encap_type (`str`): Encapsulation type
eth_encap_val (`str`): Encapsulation value
sub_interface (`str`): Subinterface to be added to interface name
Returns:
None
Raises:
SubCommandFailure
"""
# Get interface name
if sub_interface:
interface_name = interface + "." + sub_interface
else:
interface_name = interface
# Build config string
cfg_str = "interface {intf}\n".format(intf=interface_name)
# Add encap
if eth_encap_type and eth_encap_val:
cfg_str += "encapsulation {encap_type} {encap_val}\n".format(
encap_type=eth_encap_type, encap_val=eth_encap_val
)
cfg_str += "ip address {ip} {mask}\n".format(
intf=interface_name, ip=ip_address, mask=mask
)
# Add ipv6 address configuration
if ipv6_address:
cfg_str += "ipv6 enable\n" \
"ipv6 address {ipv6}\n".format(
ipv6=ipv6_address
)
# Configure device
try:
device.configure(cfg_str)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to configure IP address {ip} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
ip=ip_address,
interface=interface_name,
dev=device.name,
error=e,
)
)
def config_interface_subinterface_and_secondary_addresses(
device,
interface,
sub_interface_num,
ip_address,
prefix,
encap_type,
start,
end,
):
""" Configure sub-interface and secondary addresses on device
Args:
device (`obj`): Device object
interface (`str`): Interface name
sub_interface_num (`int`): Subinterface to be added to
interface name
ip_address(`str`): IP addressed to be configured on interface
prefix(`str`): prefix to be used in configuration
encap_type (`str`): Encapsulation type
start (`int`): start number on ip
end (`int`): end number on ip
Returns:
None
Raises:
SubCommandFailure
"""
# interface {interface}.999
# encapsulation dot1Q 999
# ip address 10.4.0.1 255.255.255.0
# ip address 1.1.x.1 255.255.255.0 secondary (x -> 1 to 15)
name = interface + "." + str(sub_interface_num)
sub_intf = Interface(device=device, name=name)
sub_intf.eth_encap_type1 = encap_type
sub_intf.eth_encap_val1 = sub_interface_num
ipv4a = IPv4Addr(device=device)
ipv4a.ipv4 = IPv4Address(ip_address.format(x=start))
ipv4a.prefix_length = prefix
sub_intf.add_ipv4addr(ipv4a)
for x in range(end - start):
ipv4b = IPv4Addr(device=device)
ipv4b.ipv4 = IPv4Address(ip_address.format(x=x + 1))
ipv4b.prefix_length = prefix
ipv4b.ipv4_secondary = True
sub_intf.add_ipv4addr(ipv4b)
try:
config = str(sub_intf.build_config(apply=False))
sub_intf.build_config()
except Exception as e:
log.error(str(e))
raise Exception("Failed to config \n {}".format(config))
return config
def remove_interface_configured_service_policy(device, interface, out=None):
""" Remove any service policy configured under interface
Args:
device (`obj`): Device object
interface (`str`): Interface to remove service policy from
out (`dict`): Show run interface <interface> output
Returns:
None
Raises:
SubCommandFailure
"""
configs = []
if not out:
out = get_interface_running_config(device, interface)
for item in out:
if "interface" in item:
for serv_policy in out[item]:
if "service-policy input" in serv_policy:
configs.append(
"no {service_policy_input}".format(
service_policy_input=serv_policy
)
)
elif "service-policy output" in serv_policy:
configs.append(
"no {service_policy_output}".format(
service_policy_output=serv_policy
)
)
if len(configs) >= 1:
configs.insert(0, "interface {interface}".format(interface=interface))
try:
device.configure(configs)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfigure service policy"
" in/out under interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
else:
log.info(
"No configured service policy found under interface {interface}".format(
interface=interface
)
)
def clear_interface_config(device, interface):
""" Clears interface config
Args:
device ('obj'): device to use
interface ('str'): interface to clear
Returns:
None
Raises:
SubCommandFailure
"""
log.info("Clearing {interface} config".format(interface=interface))
try:
device.configure(
"default interface {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not default interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_interface_switchport_access_vlan(device, interface, vlan):
""" Configures switchport on interface
Args:
device ('obj'): device to use
interface ('str'): interface to configure
vlan ('str'): access_vlan to configure
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Configuring switchport on {interface} with access_vlan = {vlan}".format(
interface=interface, vlan=vlan
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"switchport access vlan {vlan}".format(vlan=vlan),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure switchport access vlan. Error:\n{error}".format(
error=e
)
)
def configure_interface_directed_broadcast(device, interfaces, configure=True):
""" Configures directed-broadcast on interface
Args:
device ('obj'): device to run on
interfaces ('list'): list of interfaces to configure
configure ('bool'): config/unconfig
Returns:
None
Raises:
SubCommandFailure
"""
cmd = ""
for intf in interfaces:
if configure:
cmd += (
"interface {}\n"
"ip directed-broadcast\n"
"exit\n".format(intf)
)
else:
cmd += (
"interface {}\n"
"no ip directed-broadcast\n"
"exit\n".format(intf)
)
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure directed broadcast. Error:\n{error}".format(
error=e
)
)
def configure_interface_l3_port_channel(
target,
port_channel,
neighbor_address,
neighbor_netmask,
interfaces,
testbed,
):
""" Configure Port channel and lag interfaces
Args:
target (`str`): Target device to configure on
port_channel (`str`): Port Channel Interface
neighbor_address (`str`): Peer IP address
neighbor_netmask(`str`): Peer address Net-mask
interfaces(`List`): List of interfaces to configure
testbed (`obj`): Testbed object
Returns:
None
Raises:
SubCommandFailure
"""
ip = neighbor_address + "/" + str(mask_to_int(neighbor_netmask))
config_cmd = [
"set chassis aggregated-devices ethernet device-count 1",
"set interfaces {} aggregated-ether-options lacp active".format(
port_channel
),
"set interfaces {} unit 0 family inet address {}".format(
port_channel, ip
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[0], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[1], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[2], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[3], port_channel
),
]
dev = testbed.devices[target]
try:
dev.configure(config_cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure l3 port channel. Error:\n{error}".format(
error=e
)
)
def configure_interfaces_shutdown(device, interfaces):
""" Shutdown the listed interfaces in the given list on the device
Args:
List['string']: Interfaces to shutdown
device ('obj'): Device object
"""
config_cmd = []
for interface in interfaces:
config_cmd += ["int {interface}".format(interface=interface), "shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
log.error('Failed to shutdown interfaces on device {}: {}'.format(device.name, e))
def configure_interfaces_unshutdown(device, interfaces):
""" Enable the listed interfaces in the given list on the device
Args:
List['string']: Interfaces to enable
device ('obj'): Device object
"""
config_cmd = []
for interface in interfaces:
config_cmd += ["int {interface}".format(interface=interface), "no shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
log.error('Failed to enable interfaces on device {}: {}'.format(device.name, e))
def shutdown_interface(device, member):
""" Shutdown a bundled Interface
Args:
device (`obj`): Device object
member (`str`): Bundled interface
Returns:
None
Raises:
SubCommandFailure
"""
config_cmd = ["int {interface}".format(interface=member), "shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't shut down the port channel member"
"{intf}. Error:\n{error}".format(intf=member, error=e)
)
def configure_interface_interfaces_on_port_channel(
device, interface, mode, channel_group, interfaces
):
""" Add interface <interface> to port channel
Args:
device (`obj`): Device object
interface (`str`): Interface to be added to port channel
mode (`str`): Interface mode under Port channel
interfaces(`List`): List of interfaces to configure
channel_group (`obj`): Channel group
Returns:
None
"""
config_cmd = [
"interface {interface}".format(interface=interface),
"no shutdown",
"channel-group {channel_group} mode {mode}".format(
mode=mode, channel_group=channel_group
),
]
if len(interfaces) > 2:
if interface == interfaces[3]:
config_cmd.append("lacp rate fast")
else:
pass
try:
device.configure(config_cmd)
log.info(
"Successfully added {intf} on "
"channel-group {channel_group} in {mode} mode".format(
intf=interface, mode=mode, channel_group=channel_group
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't add {intf} on "
"channel-group {channel_group} in {mode} mode. Error:\n{error}".format(
intf=interface, mode=mode, channel_group=channel_group, error=e
)
)
def configure_lacp_on_interface(
device, interface, min_max_bundle, minumum_bundle=False
):
""" Configure LACP on the interface
Args:
device (`obj`): Device object
interface (`str`): Interface to be added to port channel
min_max_bundle (`int`): Number of minimum/maximum bundles
minumum_bundle (`bool`): True if configuring minimum-bundle
Returns:
None
Raises:
SubCommandFailure
"""
if minumum_bundle:
config_cmd = [
"int {interface}".format(interface=interface),
"lacp min-bundle {max}".format(max=min_max_bundle),
]
mode = "minimum"
else:
config_cmd = [
"int {interface}".format(interface=interface),
"lacp max-bundle {max}".format(max=min_max_bundle),
]
mode = "maximum"
try:
device.configure(config_cmd)
log.info(
"Successfully configured {mode} number "
"of port channel members to {max}".format(
mode=mode, max=min_max_bundle
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't configure {mode} number "
"of port channel members to {max}. Error:\n{error}".format(
mode=mode, max=min_max_bundle, error=e
)
)
def default_interface(device, interfaces):
""" configure default interface on device
Args:
device (`obj`): Device object
interfaces (`list`): List of interfaces to be defaulted
Returns:
None
Raises:
SubCommandFailure
"""
for intf in interfaces:
config_cmd = "default interface {}".format(intf)
try:
device.configure(config_cmd)
log.info("Successfully defaulted {}".format(intf))
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't default {interface}. Error:\n{error}".format(
interface=intf, error=e
)
)
def clear_interface_interfaces(device, interfaces):
""" clear interface configuration
Args:
device ('obj'): device to use
interfaces ('list'): List of interface to be cleared
Returns:
None
Raises:
SubCommandFailure
"""
for interface in interfaces:
if "." in interface:
cmd = "no interface {interface}".format(interface=interface)
else:
cmd = "default interface {interface}".format(interface=interface)
log.info(
'Clearing interface {interface} configuration with "{cmd}"'.format(
interface=interface, cmd=cmd
)
)
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_vrf_on_interface(device, interface, vrf):
""" Configure interface to use VRF
Args:
device ('obj'): Device object
interface ('str'): Interface
vrf ('str'): VRF name
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"vrf forwarding {vrf}".format(vrf=vrf),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure VRF {vrf} on interface "
"{interface}. Error:\n{error}".format(
interface=interface, vrf=vrf, error=e
)
)
def configure_interface_description(device, interface, description):
"""configure interface description
Args:
device (`obj`): Device object
interface (`str`): Interface name
description(`str`): Description
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"description {description}".format(description=description),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure description '{description}' on "
"interface {interface}. Error:\n{error}".format(
description=description, interface=interface, error=e
)
)
def unconfigure_interface_description(device, interface):
"""unconfigure interface description
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no description",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not remove description from interface "
"{interface}. Error:\n{error}".format(interface=interface, error=e)
)
def configure_interface_monitor_session(device, monitor_config):
""" configure monitor session on device
Args:
device (`obj`): Device object
monitor_config (`list`): list of monitor session configuration
ex.)
monitor_config = [{
'session_name': 1,
'session_type': 'erspan-source',
'interface': 'GigabitEthernet10',
'erspan_id': 10,
'ip_address': '192.168.1.1'
},
{
'session_name': 2,
'session_type': 'erspan-destination',
'interface': 'GigabitEthernet11',
'erspan_id': 10,
'ip_address': '192.168.1.1'
}
]
Returns:
None
Raises:
SubCommandFailure
"""
for mc in monitor_config:
config = []
if "source" in mc["session_type"]:
config.append(
"monitor session {} type {}\n".format(
mc["session_name"], mc["session_type"]
)
)
config.append("source interface {}\n".format(mc["interface"]))
config.append("destination\n")
config.append("erspan-id {}\n".format(mc["erspan_id"]))
config.append("ip address {}\n".format(mc["ip_address"]))
config.append("origin ip address {}\n".format(mc["ip_address"]))
else:
unshut_interface(device=device, interface=mc["interface"])
config.append(
"monitor session {} type {}\n".format(
mc["session_name"], mc["session_type"]
)
)
config.append("destination interface {}\n".format(mc["interface"]))
config.append("source\n")
config.append("erspan-id {}\n".format(mc["erspan_id"]))
config.append("ip address {}\n".format(mc["ip_address"]))
if 'description' in mc:
config.append("description {}\n".format(mc["description"]))
if 'source_vlan' in mc:
config.append("source vlan {}\n".format(mc["source_vlan"]))
if 'mtu' in mc:
config.append("mtu {}\n".format(mc["mtu"]))
if 'vrf' in mc:
config.append("vrf {}\n".format(mc["vrf"]))
config.append("exit\n")
config.append("no shutdown\n")
try:
device.configure("".join(config))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure monitor session. Error:\n{error}".format(
error=e
)
)
def unconfigure_interface_monitor_session(device, session_name, session_type):
""" configure monitor session on device
Args:
device (`obj`): Device object
session_name (`str`): session_name
session_type (`str`): session_type
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
"no monitor session {session_name} type {session_type}".format(
session_name=session_name,
session_type=session_type))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unconfigure monitor session. Error:\n{error}".format(
error=e
)
)
def configure_subinterfaces_for_vlan_range(device, interface, vlan_id_start, vlan_id_step,
vlan_id_count, network_start, network_step,
host_address_step, netmask, ospf_network_type=None):
""" Configures multiple subinterfaces looping through vlan range
Args:
device ('obj'): Device to use
interface ('str'): Physical interface to configure
vlan_id_start ('int'): Start of vlan range
vlan_id_step ('int'): Size of vlan range step
vlan_id_count ('int'): How many steps for vlan range
netmask ('str'): Netmask to configure
network_start ('str'): Start of network
network_step ('str'): Size of network step
ospf_network_type ('str'): Ospf network type to configure
Raises:
SubCommandFailure
Returns:
list of configured interfaces
"""
cmds = []
vlan_id = vlan_id_start
network = IPv4Address(network_start)
interfaces = []
for i in range(vlan_id_count):
interfaces.append('{interface}.{vlan_id}'.format(interface=interface, vlan_id=vlan_id))
ip_address = network + int(IPv4Address(host_address_step))
cmds.extend(['interface {interface}.{vlan_id}'.format(interface=interface, vlan_id=vlan_id),
'encapsulation dot1q {vlan_id}'.format(vlan_id=vlan_id),
'ip address {ip_address} {netmask}'.format(ip_address=ip_address, netmask=netmask)])
if ospf_network_type:
cmds.append('ip ospf network {ospf_network_type}'.format(ospf_network_type=ospf_network_type))
cmds.append('exit')
vlan_id += vlan_id_step
network += int(IPv4Address(network_step))
device.configure(cmds)
return interfaces
def configure_ipv4_dhcp_relay_helper(device, interface, ip_address):
""" Configure helper IP on an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to get address
ip_address (`str`): helper IP address to be configured on interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ip helper-address {ip}".format(ip=ip_address)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to configure helper IP address {ip} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
ip=ip_address,
interface=interface,
dev=device.name,
error=e,
)
)
def attach_ipv6_raguard_policy_to_interface(device, interface, policy_name):
""" Attach IPv6 RA Guard Policy to an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to attach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach IPv6 RA Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def remove_interface_ip(device, interface):
""" Remove ip on interface
Args:
device (`obj`): Device object
interface (`str`): Interface name
Returns:
None
Raises:
SubCommandFailure
"""
log.info(
"Removing ip on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no ip address",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfig ip address on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_ipv6_dhcp_relay(device, interface, dest_ipv6, vlan):
""" Configure IPv6 DHCP Relay
Args:
device ('obj'): device to use
interface ('str'): name of the interface to be configured
dest_ipv6 ('str'): IPv6 destination address
vlan ('int'): vlan number
Returns:
None
Raises:
SubCommandFailure: Failed configuring IPv6 DHCP Relay
"""
log.info(
"Configuring IPv6 DHCP Relay on int={int}, for dest_ipv6={dest_ipv6} and vlan={vlan} "
.format(int=int,dest_ipv6=dest_ipv6,vlan=vlan)
)
try:
device.configure(
[
"interface {interface}\n".format(interface=interface),
"ipv6 dhcp relay destination {dest_ipv6} {vlan}".format(dest_ipv6=dest_ipv6,vlan=vlan)
]
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure IPv6 DHCP Relay on int={int}, for dest_ipv6={dest_ipv6} and vlan={vlan} ".format(
int=int,dest_ipv6=dest_ipv6,vlan=vlan
)
)
def configure_ipv6_nd(device, interface, lifetime, pref_lifetime, router_pref, ra_lifetime,ra_interval):
""" Configure IPv6 ND parameters
Args:
device ('obj'): device to use
interface ('str'): name of the interface to be configured
lifetime ('int') : Valid Lifetime in secs
pref_lifetime ('int') : Preferred Lifetime in secs
router_pref ('str') : default router preference
ra_lifetime ('int') : IPv6 Router Advertisement Lifetime
ra_interval ('int') : IPv6 Router Advertisement Interval
Returns:
None
Raises:
SubCommandFailure: Failed configuring IPv6 DHCP ND parameters
"""
log.info(
"Configuring IPv6 DHCP ND parameters on int={int} "
.format(int=interface)
)
try:
device.configure(
[
"interface {interface}\n".format(interface=interface),
"ipv6 nd prefix default {} {}".format(lifetime, pref_lifetime),
"ipv6 nd router-preference {}".format(router_pref),
"ipv6 nd ra lifetime {}".format(ra_lifetime),
"ipv6 nd ra interval {}".format(ra_interval)
]
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure IPv6 DHCP ND parameters on int={int}".format(int=interface)
)
def attach_dhcpv6_guard_policy_to_interface(device, interface, policy_name):
""" Attach DHCPv6 Guard Policy to an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to attach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 dhcp guard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach DHCPv6 Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def enable_ipv6_dhcp_server(device, interface, pool_name):
""" Enable IPv6 DHCP server on an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to enable IPv6 DHCP server
pool_name (`str`): Pool name
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 dhcp server {pool_name} rapid-commit".format(pool_name=pool_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to enable IPv6 DHCP server for {pool_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
pool_name=pool_name,
interface=interface,
dev=device.name,
error=e,
)
)
def detach_dhcpv6_guard_policy_to_interface(device, interface, policy_name):
""" Detach DHCPv6 Guard Policy from an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to attach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "no ipv6 dhcp guard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach DHCPv6 Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def detach_ipv6_raguard_policy_to_interface(device,interface,policy_name):
""" Detach IPv6 RA Guard Policy from an interface
Args:
device (`obj`): Device object
interface (`str`): Interface to detach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "no ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach IPv6 RA Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def attach_ipv6_raguard_policy_to_vlan(device, vlan, policy_name):
""" Attach IPv6 RA Guard Policy to a vlan
Args:
device (`obj`): Device object
vlan (`str`): vlan to attach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "vlan configuration {vlan}".format(vlan=vlan)
cmd_2 = "ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach IPv6 RA Guard policy {policy_name} on vlan "
"{vlan} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
vlan=vlan,
dev=device.name,
error=e,
)
)
def detach_ipv6_raguard_policy_to_vlan(device, vlan, policy_name):
""" Detach IPv6 RA Guard Policy from Vlan
Args:
device (`obj`): Device object
vlan (`str`): vlan to detach policy
policy_name (`str`): Policy name to be attached to interface
Returns:
None
Raises:
SubCommandFailure
"""
cmd_1 = "vlan configuration {vlan}".format(vlan=vlan)
cmd_2 = "no ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
# Configure device
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach IPv6 RA Guard policy {policy_name} on vlan "
"{vlan} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
vlan=vlan,
dev=device.name,
error=e,
)
)
def remove_channel_group_from_interface(device, interface, channel_group, mode):
""" Remove channel group from an Interface
Args:
device (`obj`): Device object
interface (`str`): Interface on which the channel group command is to be applied
channel_group (`str`): Channel group number
mode (`str`): Channel group mode
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no channel-group {channel_group} mode {mode}".format(
channel_group=channel_group, mode=mode)
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't remove channel group {channel_group} "
"from interface {interface}. Error:\n{error}".format(
channel_group=channel_group, interface=interface, error=e)
)
def remove_port_channel_interface(device, port_channel):
""" Remove port channel interface
Args:
device (`obj`): Device object
port_channel (`str`): Port channel to be removed
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure("no interface port-channel{port_channel}".format(
port_channel=port_channel))
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't remove port channel {port_channel} from device. "
"Error:\n{error}".format(port_channel=port_channel, error=e)
)
def config_edge_trunk_on_interface(device, interface):
""" Configure spanning portf edge trunk on Interface
Args:
device (`obj`): Device object
interface (`str`): Interface on which the edge trunk config to be applied
Returns:
None
Raises:
SubCommandFailure
"""
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"spanning portf edge trunk"
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't configure spanning portf edge trunk "
"on interface {interface}. Error:\n{error}".format(
interface=interface, error=e)
)
| 29.306792 | 114 | 0.567484 |
import logging
from unicon.core.errors import SubCommandFailure
from pyats.aetest.steps import Steps
from genie.conf.base import Interface
from genie.libs.conf.base import IPv4Address, IPv6Address
from genie.libs.conf.interface import IPv4Addr, IPv6Addr
from genie.harness.utils import connect_device
from genie.libs.sdk.apis.iosxe.interface.get import (
get_interface_running_config,
)
from genie.libs.sdk.apis.iosxe.interface.get import (
get_interface_connected_adjacent_router_interfaces,
)
from genie.libs.sdk.apis.utils import mask_to_int
log = logging.getLogger(__name__)
def reset_interface(device, interface):
log.info("Defaulting interface {interface}".format(interface=interface))
try:
device.configure(
"default interface {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not default {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def clear_interface_counters(device, interface):
log.info(
"Clearing counters on interface {interface}".format(
interface=interface
)
)
try:
device.execute(
"clear counters {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear counters on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def config_interface_negotiation(device, interface):
log.info(
"Configuring negotiation auto on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"negotiation auto",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to config negotiation auto on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def remove_interface_negotiation(device, interface):
log.info(
"Removing negotiation auto on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no negotiation auto",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfig negotiation auto on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def shut_interface(device, interface):
if not device.is_connected():
connect_device(device=device)
try:
device.configure(
["interface {interface}".format(interface=interface), "shutdown"]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not shut interface {intf} on device {dev}. Error:\n{error}".format(
intf=interface, dev=device.name, error=e
)
)
def unshut_interface(device, interface):
if not device.is_connected():
connect_device(device=device)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no shutdown",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unshut interface {interface} on device {dev}. Error:\n{error}".format(
interface=interface, dev=device.name, error=e
)
)
def shut_interface_adjacent_interfaces(
device, link_name, adjacent_interfaces=None, steps=Steps(), num=1
):
if adjacent_interfaces is None:
adjacent_interfaces = get_interface_connected_adjacent_router_interfaces(
device=device, link_name=link_name, num=num
)
for interface in adjacent_interfaces:
adjacent_device = interface.device
interface_name = interface.name
with steps.start(
"Shut adjacent interface {interface} on "
"device {device}".format(
interface=interface_name, device=adjacent_device.name
),
continue_=True,
) as step:
shut_interface(device=adjacent_device, interface=interface_name)
def unshut_interface_adjacent_interfaces(
device, link_name, adjacent_interfaces=None, steps=Steps(), num=1
):
if adjacent_interfaces is None:
adjacent_interfaces = get_interface_connected_adjacent_router_interfaces(
device=device, link_name=link_name, num=num
)
for interface in adjacent_interfaces:
adjacent_device = interface.device
interface_name = interface.name
with steps.start(
"No shut adjacent interface {interface} on "
"device {device}".format(
interface=interface_name, device=adjacent_device.name
),
continue_=True,
) as step:
unshut_interface(device=adjacent_device, interface=interface_name)
def config_interface_carrier_delay(device, interface, delay, delay_type):
delay_types = ["up", "down"]
if delay_type not in delay_types:
raise Exception(
"'{type}' not a supported type; only support '{types}'".format(
type=delay_type, types=delay_types
)
)
try:
device.configure(
"interface {interface}\n"
"carrier-delay {delay_type} {delay}".format(
interface=interface, delay_type=delay_type, delay=delay
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure carrier delay. Error:\n{error}".format(
error=e
)
)
def remove_interface_carrier_delay(device, interface):
try:
device.configure(
"interface {interface}\n"
"no carrier-delay up\n"
"no carrier-delay down".format(interface=interface))
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to remove carrier delay on {interface}. "
"Error:\n{e}".format(interface=interface, e=e)) from e
def remove_interface_ospf_bfd(device, interface):
try:
device.configure(
"interface {interface}\n"
"no ip ospf bfd".format(interface=interface))
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to remove ospf bfd on {interface}. "
"Error:\n{e}".format(interface=interface, e=e)) from e
def config_interface_mtu(device, interface, mtu_bytes):
log.info(
"Configuring MTU {mtu_bytes} on interface {interface}".format(
mtu_bytes=mtu_bytes, interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"mtu {mtu_bytes}".format(mtu_bytes=mtu_bytes),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure MTU on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def unconfig_interface_mtu(device, interface):
log.info(
"Removing MTU config on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
["interface {interface}".format(interface=interface), "no mtu"]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unconfigure MTU on {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def config_interface_ospf(device, interface, ospf_pid, area):
log.info(
"Configuring OSPF on interface {interface}".format(interface=interface)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"ip ospf {pid} area {area}".format(pid=ospf_pid, area=area),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure ospf. Error:\n{error}".format(error=e)
)
def config_ip_on_interface(
device,
interface,
ip_address,
mask,
ipv6_address=None,
eth_encap_type=None,
eth_encap_val=None,
sub_interface=None,
):
if sub_interface:
interface_name = interface + "." + sub_interface
else:
interface_name = interface
cfg_str = "interface {intf}\n".format(intf=interface_name)
if eth_encap_type and eth_encap_val:
cfg_str += "encapsulation {encap_type} {encap_val}\n".format(
encap_type=eth_encap_type, encap_val=eth_encap_val
)
cfg_str += "ip address {ip} {mask}\n".format(
intf=interface_name, ip=ip_address, mask=mask
)
if ipv6_address:
cfg_str += "ipv6 enable\n" \
"ipv6 address {ipv6}\n".format(
ipv6=ipv6_address
)
try:
device.configure(cfg_str)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to configure IP address {ip} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
ip=ip_address,
interface=interface_name,
dev=device.name,
error=e,
)
)
def config_interface_subinterface_and_secondary_addresses(
device,
interface,
sub_interface_num,
ip_address,
prefix,
encap_type,
start,
end,
):
name = interface + "." + str(sub_interface_num)
sub_intf = Interface(device=device, name=name)
sub_intf.eth_encap_type1 = encap_type
sub_intf.eth_encap_val1 = sub_interface_num
ipv4a = IPv4Addr(device=device)
ipv4a.ipv4 = IPv4Address(ip_address.format(x=start))
ipv4a.prefix_length = prefix
sub_intf.add_ipv4addr(ipv4a)
for x in range(end - start):
ipv4b = IPv4Addr(device=device)
ipv4b.ipv4 = IPv4Address(ip_address.format(x=x + 1))
ipv4b.prefix_length = prefix
ipv4b.ipv4_secondary = True
sub_intf.add_ipv4addr(ipv4b)
try:
config = str(sub_intf.build_config(apply=False))
sub_intf.build_config()
except Exception as e:
log.error(str(e))
raise Exception("Failed to config \n {}".format(config))
return config
def remove_interface_configured_service_policy(device, interface, out=None):
configs = []
if not out:
out = get_interface_running_config(device, interface)
for item in out:
if "interface" in item:
for serv_policy in out[item]:
if "service-policy input" in serv_policy:
configs.append(
"no {service_policy_input}".format(
service_policy_input=serv_policy
)
)
elif "service-policy output" in serv_policy:
configs.append(
"no {service_policy_output}".format(
service_policy_output=serv_policy
)
)
if len(configs) >= 1:
configs.insert(0, "interface {interface}".format(interface=interface))
try:
device.configure(configs)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfigure service policy"
" in/out under interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
else:
log.info(
"No configured service policy found under interface {interface}".format(
interface=interface
)
)
def clear_interface_config(device, interface):
log.info("Clearing {interface} config".format(interface=interface))
try:
device.configure(
"default interface {interface}".format(interface=interface)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not default interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_interface_switchport_access_vlan(device, interface, vlan):
log.info(
"Configuring switchport on {interface} with access_vlan = {vlan}".format(
interface=interface, vlan=vlan
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"switchport access vlan {vlan}".format(vlan=vlan),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure switchport access vlan. Error:\n{error}".format(
error=e
)
)
def configure_interface_directed_broadcast(device, interfaces, configure=True):
cmd = ""
for intf in interfaces:
if configure:
cmd += (
"interface {}\n"
"ip directed-broadcast\n"
"exit\n".format(intf)
)
else:
cmd += (
"interface {}\n"
"no ip directed-broadcast\n"
"exit\n".format(intf)
)
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure directed broadcast. Error:\n{error}".format(
error=e
)
)
def configure_interface_l3_port_channel(
target,
port_channel,
neighbor_address,
neighbor_netmask,
interfaces,
testbed,
):
ip = neighbor_address + "/" + str(mask_to_int(neighbor_netmask))
config_cmd = [
"set chassis aggregated-devices ethernet device-count 1",
"set interfaces {} aggregated-ether-options lacp active".format(
port_channel
),
"set interfaces {} unit 0 family inet address {}".format(
port_channel, ip
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[0], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[1], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[2], port_channel
),
"set interfaces {} gigether-options 802.3ad {}".format(
interfaces[3], port_channel
),
]
dev = testbed.devices[target]
try:
dev.configure(config_cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure l3 port channel. Error:\n{error}".format(
error=e
)
)
def configure_interfaces_shutdown(device, interfaces):
config_cmd = []
for interface in interfaces:
config_cmd += ["int {interface}".format(interface=interface), "shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
log.error('Failed to shutdown interfaces on device {}: {}'.format(device.name, e))
def configure_interfaces_unshutdown(device, interfaces):
config_cmd = []
for interface in interfaces:
config_cmd += ["int {interface}".format(interface=interface), "no shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
log.error('Failed to enable interfaces on device {}: {}'.format(device.name, e))
def shutdown_interface(device, member):
config_cmd = ["int {interface}".format(interface=member), "shutdown"]
try:
device.configure(config_cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't shut down the port channel member"
"{intf}. Error:\n{error}".format(intf=member, error=e)
)
def configure_interface_interfaces_on_port_channel(
device, interface, mode, channel_group, interfaces
):
config_cmd = [
"interface {interface}".format(interface=interface),
"no shutdown",
"channel-group {channel_group} mode {mode}".format(
mode=mode, channel_group=channel_group
),
]
if len(interfaces) > 2:
if interface == interfaces[3]:
config_cmd.append("lacp rate fast")
else:
pass
try:
device.configure(config_cmd)
log.info(
"Successfully added {intf} on "
"channel-group {channel_group} in {mode} mode".format(
intf=interface, mode=mode, channel_group=channel_group
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't add {intf} on "
"channel-group {channel_group} in {mode} mode. Error:\n{error}".format(
intf=interface, mode=mode, channel_group=channel_group, error=e
)
)
def configure_lacp_on_interface(
device, interface, min_max_bundle, minumum_bundle=False
):
if minumum_bundle:
config_cmd = [
"int {interface}".format(interface=interface),
"lacp min-bundle {max}".format(max=min_max_bundle),
]
mode = "minimum"
else:
config_cmd = [
"int {interface}".format(interface=interface),
"lacp max-bundle {max}".format(max=min_max_bundle),
]
mode = "maximum"
try:
device.configure(config_cmd)
log.info(
"Successfully configured {mode} number "
"of port channel members to {max}".format(
mode=mode, max=min_max_bundle
)
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't configure {mode} number "
"of port channel members to {max}. Error:\n{error}".format(
mode=mode, max=min_max_bundle, error=e
)
)
def default_interface(device, interfaces):
for intf in interfaces:
config_cmd = "default interface {}".format(intf)
try:
device.configure(config_cmd)
log.info("Successfully defaulted {}".format(intf))
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't default {interface}. Error:\n{error}".format(
interface=intf, error=e
)
)
def clear_interface_interfaces(device, interfaces):
for interface in interfaces:
if "." in interface:
cmd = "no interface {interface}".format(interface=interface)
else:
cmd = "default interface {interface}".format(interface=interface)
log.info(
'Clearing interface {interface} configuration with "{cmd}"'.format(
interface=interface, cmd=cmd
)
)
try:
device.configure(cmd)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not clear interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_vrf_on_interface(device, interface, vrf):
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"vrf forwarding {vrf}".format(vrf=vrf),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure VRF {vrf} on interface "
"{interface}. Error:\n{error}".format(
interface=interface, vrf=vrf, error=e
)
)
def configure_interface_description(device, interface, description):
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"description {description}".format(description=description),
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure description '{description}' on "
"interface {interface}. Error:\n{error}".format(
description=description, interface=interface, error=e
)
)
def unconfigure_interface_description(device, interface):
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no description",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not remove description from interface "
"{interface}. Error:\n{error}".format(interface=interface, error=e)
)
def configure_interface_monitor_session(device, monitor_config):
for mc in monitor_config:
config = []
if "source" in mc["session_type"]:
config.append(
"monitor session {} type {}\n".format(
mc["session_name"], mc["session_type"]
)
)
config.append("source interface {}\n".format(mc["interface"]))
config.append("destination\n")
config.append("erspan-id {}\n".format(mc["erspan_id"]))
config.append("ip address {}\n".format(mc["ip_address"]))
config.append("origin ip address {}\n".format(mc["ip_address"]))
else:
unshut_interface(device=device, interface=mc["interface"])
config.append(
"monitor session {} type {}\n".format(
mc["session_name"], mc["session_type"]
)
)
config.append("destination interface {}\n".format(mc["interface"]))
config.append("source\n")
config.append("erspan-id {}\n".format(mc["erspan_id"]))
config.append("ip address {}\n".format(mc["ip_address"]))
if 'description' in mc:
config.append("description {}\n".format(mc["description"]))
if 'source_vlan' in mc:
config.append("source vlan {}\n".format(mc["source_vlan"]))
if 'mtu' in mc:
config.append("mtu {}\n".format(mc["mtu"]))
if 'vrf' in mc:
config.append("vrf {}\n".format(mc["vrf"]))
config.append("exit\n")
config.append("no shutdown\n")
try:
device.configure("".join(config))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not configure monitor session. Error:\n{error}".format(
error=e
)
)
def unconfigure_interface_monitor_session(device, session_name, session_type):
try:
device.configure(
"no monitor session {session_name} type {session_type}".format(
session_name=session_name,
session_type=session_type))
except SubCommandFailure as e:
raise SubCommandFailure(
"Could not unconfigure monitor session. Error:\n{error}".format(
error=e
)
)
def configure_subinterfaces_for_vlan_range(device, interface, vlan_id_start, vlan_id_step,
vlan_id_count, network_start, network_step,
host_address_step, netmask, ospf_network_type=None):
cmds = []
vlan_id = vlan_id_start
network = IPv4Address(network_start)
interfaces = []
for i in range(vlan_id_count):
interfaces.append('{interface}.{vlan_id}'.format(interface=interface, vlan_id=vlan_id))
ip_address = network + int(IPv4Address(host_address_step))
cmds.extend(['interface {interface}.{vlan_id}'.format(interface=interface, vlan_id=vlan_id),
'encapsulation dot1q {vlan_id}'.format(vlan_id=vlan_id),
'ip address {ip_address} {netmask}'.format(ip_address=ip_address, netmask=netmask)])
if ospf_network_type:
cmds.append('ip ospf network {ospf_network_type}'.format(ospf_network_type=ospf_network_type))
cmds.append('exit')
vlan_id += vlan_id_step
network += int(IPv4Address(network_step))
device.configure(cmds)
return interfaces
def configure_ipv4_dhcp_relay_helper(device, interface, ip_address):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ip helper-address {ip}".format(ip=ip_address)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to configure helper IP address {ip} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
ip=ip_address,
interface=interface,
dev=device.name,
error=e,
)
)
def attach_ipv6_raguard_policy_to_interface(device, interface, policy_name):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach IPv6 RA Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def remove_interface_ip(device, interface):
log.info(
"Removing ip on interface {interface}".format(
interface=interface
)
)
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no ip address",
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to unconfig ip address on interface {interface}. Error:\n{error}".format(
interface=interface, error=e
)
)
def configure_ipv6_dhcp_relay(device, interface, dest_ipv6, vlan):
log.info(
"Configuring IPv6 DHCP Relay on int={int}, for dest_ipv6={dest_ipv6} and vlan={vlan} "
.format(int=int,dest_ipv6=dest_ipv6,vlan=vlan)
)
try:
device.configure(
[
"interface {interface}\n".format(interface=interface),
"ipv6 dhcp relay destination {dest_ipv6} {vlan}".format(dest_ipv6=dest_ipv6,vlan=vlan)
]
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure IPv6 DHCP Relay on int={int}, for dest_ipv6={dest_ipv6} and vlan={vlan} ".format(
int=int,dest_ipv6=dest_ipv6,vlan=vlan
)
)
def configure_ipv6_nd(device, interface, lifetime, pref_lifetime, router_pref, ra_lifetime,ra_interval):
log.info(
"Configuring IPv6 DHCP ND parameters on int={int} "
.format(int=interface)
)
try:
device.configure(
[
"interface {interface}\n".format(interface=interface),
"ipv6 nd prefix default {} {}".format(lifetime, pref_lifetime),
"ipv6 nd router-preference {}".format(router_pref),
"ipv6 nd ra lifetime {}".format(ra_lifetime),
"ipv6 nd ra interval {}".format(ra_interval)
]
)
except SubCommandFailure:
raise SubCommandFailure(
"Could not configure IPv6 DHCP ND parameters on int={int}".format(int=interface)
)
def attach_dhcpv6_guard_policy_to_interface(device, interface, policy_name):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 dhcp guard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach DHCPv6 Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def enable_ipv6_dhcp_server(device, interface, pool_name):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "ipv6 dhcp server {pool_name} rapid-commit".format(pool_name=pool_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to enable IPv6 DHCP server for {pool_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
pool_name=pool_name,
interface=interface,
dev=device.name,
error=e,
)
)
def detach_dhcpv6_guard_policy_to_interface(device, interface, policy_name):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "no ipv6 dhcp guard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach DHCPv6 Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def detach_ipv6_raguard_policy_to_interface(device,interface,policy_name):
cmd_1 = "interface {intf}".format(intf=interface)
cmd_2 = "no ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach IPv6 RA Guard policy {policy_name} on interface "
"{interface} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
interface=interface,
dev=device.name,
error=e,
)
)
def attach_ipv6_raguard_policy_to_vlan(device, vlan, policy_name):
cmd_1 = "vlan configuration {vlan}".format(vlan=vlan)
cmd_2 = "ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to attach IPv6 RA Guard policy {policy_name} on vlan "
"{vlan} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
vlan=vlan,
dev=device.name,
error=e,
)
)
def detach_ipv6_raguard_policy_to_vlan(device, vlan, policy_name):
cmd_1 = "vlan configuration {vlan}".format(vlan=vlan)
cmd_2 = "no ipv6 nd raguard attach-policy {policy_name}".format(policy_name=policy_name)
try:
device.configure([cmd_1, cmd_2])
except SubCommandFailure as e:
raise SubCommandFailure(
"Failed to detach IPv6 RA Guard policy {policy_name} on vlan "
"{vlan} on device {dev}. Error:\n{error}".format(
policy_name=policy_name,
vlan=vlan,
dev=device.name,
error=e,
)
)
def remove_channel_group_from_interface(device, interface, channel_group, mode):
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"no channel-group {channel_group} mode {mode}".format(
channel_group=channel_group, mode=mode)
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't remove channel group {channel_group} "
"from interface {interface}. Error:\n{error}".format(
channel_group=channel_group, interface=interface, error=e)
)
def remove_port_channel_interface(device, port_channel):
try:
device.configure("no interface port-channel{port_channel}".format(
port_channel=port_channel))
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't remove port channel {port_channel} from device. "
"Error:\n{error}".format(port_channel=port_channel, error=e)
)
def config_edge_trunk_on_interface(device, interface):
try:
device.configure(
[
"interface {interface}".format(interface=interface),
"spanning portf edge trunk"
]
)
except SubCommandFailure as e:
raise SubCommandFailure(
"Couldn't configure spanning portf edge trunk "
"on interface {interface}. Error:\n{error}".format(
interface=interface, error=e)
)
| true | true |
1c3f39a368018d62b2c631d5a8d2532741dedc90 | 6,827 | py | Python | checkTasks.py | Paola351/recurring-tasks | d05abc8d4029eee3638c18a468b607f9c548c6f6 | [
"MIT"
] | null | null | null | checkTasks.py | Paola351/recurring-tasks | d05abc8d4029eee3638c18a468b607f9c548c6f6 | [
"MIT"
] | null | null | null | checkTasks.py | Paola351/recurring-tasks | d05abc8d4029eee3638c18a468b607f9c548c6f6 | [
"MIT"
] | null | null | null | import configparser, sendMail
from datetime import datetime
import logging
logging.basicConfig(level='DEBUG', filename='logs/app.log', filemode='a', format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
taskTypeList = ["daily", "weekly", "monthly", "weekdayofmonth", "yearly", "free"]
daysOfTheWeek = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
monthsOfTheYear = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"]
occurrences = ["first", "second", "third", "fourth", "last"]
config = configparser.ConfigParser()
config.read('tasks.ini')
now = datetime.now()
day = now.strftime("%d")
month = now.strftime("%m")
def validate_task_type(tt):
if tt not in taskTypeList:
errorMessage = "Sorry, taskType should only be in {}".format(taskTypeList)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_weekday(wd):
if wd not in daysOfTheWeek:
errorMessage = "Sorry, Weekday should only be in {}".format(daysOfTheWeek)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_month(m):
if m not in monthsOfTheYear:
errorMessage = "Sorry, month should only be in {}".format(monthsOfTheYear)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_occurrence(o):
if o not in occurrences:
errorMessage = "Sorry, occurrence should only be in {}".format(occurrences)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_day(d):
try:
d = int(d)
except:
errorMessage = "day should be an int"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
if d < 1 or d > 31:
errorMessage = "Sorry, day should only be in [1, 31]"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def extract_type(section):
return config.get(section, 'Type')
def extract_message(section):
return config.get(section, 'Message')
def extract_days(section):
return config.get(section, 'Day')
def extract_months(section):
return config.get(section, 'Month')
def extract_weekdays(section):
return config.get(section, 'Weekday')
def extract_occurrences(section):
return config.get(section, 'Occurrence')
for i in config.sections():
taskType = extract_type(i)
validate_task_type(taskType)
if taskType == 'daily':
message = extract_message(i)
logging.info("Notification triggered \"{}\" every day ".format(message))
sendMail.sendNotification(i, message)
if taskType == 'weekly':
for j in extract_weekdays(i).split(','):
validate_weekday(j)
if daysOfTheWeek.index(j) == now.weekday():
message = extract_message(i)
logging.info("Notification triggered \"{}\" every day ".format(message))
sendMail.sendNotification(i, message)
if taskType == 'monthly':
for j in extract_days(i).split(','):
validate_day(j)
if day == j:
message = extract_message(i)
logging.info("Notification triggered \"{}\" every {} of the month ".format(message, j))
sendMail.sendNotification(i, message)
if taskType == 'weekdayofmonth':
for j in extract_weekdays(i).split(','):
validate_weekday(j)
for occurrence in extract_occurrences(i).split(','):
validate_occurrence(occurrence)
if daysOfTheWeek.index(j) == now.weekday():
message = extract_message(i)
if occurrence.lower() == "first" and 1 <= int(day) <= 7:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "second" and 8 <= int(day) <= 14:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "third" and 15 <= int(day) <= 21:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "fourth" and 22 <= int(day) <= 28:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "last" and 25 <= int(day) <= 31:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
else:
continue
if taskType == 'yearly':
m = extract_months(i)
d = extract_days(i)
if len(m.split(',')) > 1 or len(d.split(',')) > 1:
errorMessage = "Sorry, yearly task should only contain one specific date"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
validate_day(d)
validate_month(m)
if d == day and monthsOfTheYear.index(m) + 1 == int(month):
message = extract_message(i)
logging.info(
"Notification triggered \"{}\" every year, the {} of the month {}".format(message, day,
month))
sendMail.sendNotification(i, message)
if taskType == 'free':
for j in extract_months(i).split(','):
validate_month(j)
if monthsOfTheYear.index(j) + 1 == int(month):
for k in extract_days(i).split(','):
validate_day(k)
if day == k:
message = extract_message(i)
logging.info(
"Notification triggered \"{}\" every {} of the months {}".format(message,
day,
month))
sendMail.sendNotification(i, message)
| 44.045161 | 136 | 0.56174 | import configparser, sendMail
from datetime import datetime
import logging
logging.basicConfig(level='DEBUG', filename='logs/app.log', filemode='a', format='%(name)s - %(levelname)s - %(asctime)s - %(message)s')
taskTypeList = ["daily", "weekly", "monthly", "weekdayofmonth", "yearly", "free"]
daysOfTheWeek = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
monthsOfTheYear = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October",
"November", "December"]
occurrences = ["first", "second", "third", "fourth", "last"]
config = configparser.ConfigParser()
config.read('tasks.ini')
now = datetime.now()
day = now.strftime("%d")
month = now.strftime("%m")
def validate_task_type(tt):
if tt not in taskTypeList:
errorMessage = "Sorry, taskType should only be in {}".format(taskTypeList)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_weekday(wd):
if wd not in daysOfTheWeek:
errorMessage = "Sorry, Weekday should only be in {}".format(daysOfTheWeek)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_month(m):
if m not in monthsOfTheYear:
errorMessage = "Sorry, month should only be in {}".format(monthsOfTheYear)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_occurrence(o):
if o not in occurrences:
errorMessage = "Sorry, occurrence should only be in {}".format(occurrences)
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def validate_day(d):
try:
d = int(d)
except:
errorMessage = "day should be an int"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
if d < 1 or d > 31:
errorMessage = "Sorry, day should only be in [1, 31]"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
def extract_type(section):
return config.get(section, 'Type')
def extract_message(section):
return config.get(section, 'Message')
def extract_days(section):
return config.get(section, 'Day')
def extract_months(section):
return config.get(section, 'Month')
def extract_weekdays(section):
return config.get(section, 'Weekday')
def extract_occurrences(section):
return config.get(section, 'Occurrence')
for i in config.sections():
taskType = extract_type(i)
validate_task_type(taskType)
if taskType == 'daily':
message = extract_message(i)
logging.info("Notification triggered \"{}\" every day ".format(message))
sendMail.sendNotification(i, message)
if taskType == 'weekly':
for j in extract_weekdays(i).split(','):
validate_weekday(j)
if daysOfTheWeek.index(j) == now.weekday():
message = extract_message(i)
logging.info("Notification triggered \"{}\" every day ".format(message))
sendMail.sendNotification(i, message)
if taskType == 'monthly':
for j in extract_days(i).split(','):
validate_day(j)
if day == j:
message = extract_message(i)
logging.info("Notification triggered \"{}\" every {} of the month ".format(message, j))
sendMail.sendNotification(i, message)
if taskType == 'weekdayofmonth':
for j in extract_weekdays(i).split(','):
validate_weekday(j)
for occurrence in extract_occurrences(i).split(','):
validate_occurrence(occurrence)
if daysOfTheWeek.index(j) == now.weekday():
message = extract_message(i)
if occurrence.lower() == "first" and 1 <= int(day) <= 7:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "second" and 8 <= int(day) <= 14:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "third" and 15 <= int(day) <= 21:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "fourth" and 22 <= int(day) <= 28:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
elif occurrence.lower() == "last" and 25 <= int(day) <= 31:
sendMail.sendNotification(i, message)
logging.info("Notification triggered \"{}\" every {} {} of the month ".format(message, occurrence, j))
else:
continue
if taskType == 'yearly':
m = extract_months(i)
d = extract_days(i)
if len(m.split(',')) > 1 or len(d.split(',')) > 1:
errorMessage = "Sorry, yearly task should only contain one specific date"
sendMail.sendNotification("Error", errorMessage)
raise ValueError(errorMessage)
validate_day(d)
validate_month(m)
if d == day and monthsOfTheYear.index(m) + 1 == int(month):
message = extract_message(i)
logging.info(
"Notification triggered \"{}\" every year, the {} of the month {}".format(message, day,
month))
sendMail.sendNotification(i, message)
if taskType == 'free':
for j in extract_months(i).split(','):
validate_month(j)
if monthsOfTheYear.index(j) + 1 == int(month):
for k in extract_days(i).split(','):
validate_day(k)
if day == k:
message = extract_message(i)
logging.info(
"Notification triggered \"{}\" every {} of the months {}".format(message,
day,
month))
sendMail.sendNotification(i, message)
| true | true |
1c3f39cffcbd5fc2bd8e3c59a4f930fe745144cc | 2,179 | py | Python | Bot/sending_emails.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | 19 | 2021-03-11T12:59:00.000Z | 2022-02-12T18:50:58.000Z | Bot/sending_emails.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | null | null | null | Bot/sending_emails.py | DogsonPl/bot_for_messenger | 2d6664b52b59696dc82efb3d361b7700ebb3960b | [
"MIT"
] | 4 | 2021-03-10T23:07:13.000Z | 2021-09-28T18:55:30.000Z | import random as rd
import asyncio
import aiosmtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from . import parse_config
async def get_confirmation_code():
confirmation_code = rd.randint(10000, 99999)
return confirmation_code
class SmptConnection:
def __init__(self):
self.smpt_connection = aiosmtplib.SMTP(hostname=HOSTNAME, port=465, use_tls=True)
async def connect(self):
await self.smpt_connection.connect()
await self.smpt_connection.ehlo()
await self.smpt_connection.login(MAIL, PASSWORD)
async def send_mail(self, receiver, message):
try:
await self.smpt_connection.send_message(message)
return f"✅ Wysłano email z kodem do {receiver}"
except aiosmtplib.errors.SMTPRecipientsRefused:
return "🚫 Nie udało się wysłać emaila. Czy na pewno podałeś poprawny email?"
except aiosmtplib.errors.SMTPServerDisconnected:
await self.connect()
await self.send_mail(receiver, message)
@staticmethod
async def create_message(receiver, code):
message = MIMEMultipart("alternative")
message["From"] = MAIL
message["To"] = receiver
message["Subject"] = "Kod potwierdzający"
message.attach(MIMEText(f"""<h1>Twój kod to {code}</h1>
Wpisz komendę <b>!kod {code}</b>. Kod wygaśnie za godzinę<br>
Jeśli nie chciałeś połączyć tego maila z botem na Facebooku, zignoruj tego maila""",
"html", "utf-8"))
return message
@staticmethod
async def create_traceback_message(traceback_message):
message = MIMEMultipart("alternative")
message["From"] = MAIL
message["To"] = "dogsonkrul@gmail.com"
message["Subject"] = "Bot error"
message.attach(MIMEText(traceback_message, "html", "utf-8"))
return message
loop = asyncio.get_event_loop()
HOSTNAME, MAIL, PASSWORD = loop.run_until_complete(parse_config.get_smpt_config())
smpt_connection = SmptConnection()
loop.create_task(smpt_connection.connect())
| 35.721311 | 120 | 0.662689 | import random as rd
import asyncio
import aiosmtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from . import parse_config
async def get_confirmation_code():
confirmation_code = rd.randint(10000, 99999)
return confirmation_code
class SmptConnection:
def __init__(self):
self.smpt_connection = aiosmtplib.SMTP(hostname=HOSTNAME, port=465, use_tls=True)
async def connect(self):
await self.smpt_connection.connect()
await self.smpt_connection.ehlo()
await self.smpt_connection.login(MAIL, PASSWORD)
async def send_mail(self, receiver, message):
try:
await self.smpt_connection.send_message(message)
return f"✅ Wysłano email z kodem do {receiver}"
except aiosmtplib.errors.SMTPRecipientsRefused:
return "🚫 Nie udało się wysłać emaila. Czy na pewno podałeś poprawny email?"
except aiosmtplib.errors.SMTPServerDisconnected:
await self.connect()
await self.send_mail(receiver, message)
@staticmethod
async def create_message(receiver, code):
message = MIMEMultipart("alternative")
message["From"] = MAIL
message["To"] = receiver
message["Subject"] = "Kod potwierdzający"
message.attach(MIMEText(f"""<h1>Twój kod to {code}</h1>
Wpisz komendę <b>!kod {code}</b>. Kod wygaśnie za godzinę<br>
Jeśli nie chciałeś połączyć tego maila z botem na Facebooku, zignoruj tego maila""",
"html", "utf-8"))
return message
@staticmethod
async def create_traceback_message(traceback_message):
message = MIMEMultipart("alternative")
message["From"] = MAIL
message["To"] = "dogsonkrul@gmail.com"
message["Subject"] = "Bot error"
message.attach(MIMEText(traceback_message, "html", "utf-8"))
return message
loop = asyncio.get_event_loop()
HOSTNAME, MAIL, PASSWORD = loop.run_until_complete(parse_config.get_smpt_config())
smpt_connection = SmptConnection()
loop.create_task(smpt_connection.connect())
| true | true |
1c3f39e71209012af52fd19efc149b2e9bb09f5e | 8,485 | py | Python | docs/conf.py | lalmeras/clickable | 6182f8a106c202a9bb1e6d7142e2b5b4734c13f3 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | lalmeras/clickable | 6182f8a106c202a9bb1e6d7142e2b5b4734c13f3 | [
"BSD-3-Clause"
] | 297 | 2017-09-29T23:51:42.000Z | 2021-08-31T09:27:17.000Z | docs/conf.py | lalmeras/clickable | 6182f8a106c202a9bb1e6d7142e2b5b4734c13f3 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# clickable documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import clickable
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'clickable helper scripts'
copyright = u"2017, Laurent Almeras"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = clickable.__version__
# The full version, including alpha/beta/rc tags.
release = clickable.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'clickabledoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'clickable.tex',
u'clickable helper scripts Documentation',
u'Laurent Almeras', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clickable',
u'clickable helper scripts Documentation',
[u'Laurent Almeras'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'clickable',
u'clickable helper scripts Documentation',
u'Laurent Almeras',
'clickable',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 30.742754 | 76 | 0.717384 |
import sys
import os
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
import clickable
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'clickable helper scripts'
copyright = u"2017, Laurent Almeras"
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = clickable.__version__
# The full version, including alpha/beta/rc tags.
release = clickable.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'clickabledoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'clickable.tex',
u'clickable helper scripts Documentation',
u'Laurent Almeras', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'clickable',
u'clickable helper scripts Documentation',
[u'Laurent Almeras'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'clickable',
u'clickable helper scripts Documentation',
u'Laurent Almeras',
'clickable',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
| true | true |
1c3f3a76aba94a9ad074c4c60884e68d0eb42c07 | 1,148 | py | Python | setup.py | kttian/ifcc | 017827fb175713802d3128de4841cc4d54cc7598 | [
"Apache-2.0"
] | 43 | 2020-10-21T03:25:21.000Z | 2022-03-26T08:13:06.000Z | setup.py | kttian/ifcc | 017827fb175713802d3128de4841cc4d54cc7598 | [
"Apache-2.0"
] | 8 | 2020-12-04T15:06:45.000Z | 2022-03-28T12:18:14.000Z | setup.py | kttian/ifcc | 017827fb175713802d3128de4841cc4d54cc7598 | [
"Apache-2.0"
] | 10 | 2020-11-13T03:46:09.000Z | 2022-02-05T21:39:52.000Z | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='ifcc',
version='0.2.0',
author='Yasuhide Miura',
author_email='ysmiura@stanford.edu',
description='The code of: Improving Factual Completeness and Consistency of Image-to-text Radiology Report Generation',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ysmiura/ifcc',
packages='clinicgen',
python_requires='>=3.7',
install_requires=[
'bert-score==0.3.0',
'bioc==1.3.4',
'bllipparser==2016.9.11',
'cachetools==4.1.0',
'flask==1.1.1',
'jpype1==0.6.3',
'networkx==1.11',
'nltk==3.4.5',
'numpy==1.18.5',
'pandas==1.0.1',
'pathlib2==2.3.5',
'ply==3.11',
'pystanforddependencies==0.3.1',
'rouge==0.3.2',
'scispacy==0.2.0',
'spacy==2.1.3',
'stanza==1.1.1',
'tensorboard==2.0.0',
'torch==1.5.0',
'torchvision==0.6.0',
'tqdm==4.45.0',
'transformers==2.9.0'
]
)
| 26.697674 | 123 | 0.550523 | import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='ifcc',
version='0.2.0',
author='Yasuhide Miura',
author_email='ysmiura@stanford.edu',
description='The code of: Improving Factual Completeness and Consistency of Image-to-text Radiology Report Generation',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/ysmiura/ifcc',
packages='clinicgen',
python_requires='>=3.7',
install_requires=[
'bert-score==0.3.0',
'bioc==1.3.4',
'bllipparser==2016.9.11',
'cachetools==4.1.0',
'flask==1.1.1',
'jpype1==0.6.3',
'networkx==1.11',
'nltk==3.4.5',
'numpy==1.18.5',
'pandas==1.0.1',
'pathlib2==2.3.5',
'ply==3.11',
'pystanforddependencies==0.3.1',
'rouge==0.3.2',
'scispacy==0.2.0',
'spacy==2.1.3',
'stanza==1.1.1',
'tensorboard==2.0.0',
'torch==1.5.0',
'torchvision==0.6.0',
'tqdm==4.45.0',
'transformers==2.9.0'
]
)
| true | true |
1c3f3acd79d785a1aab6f01a36bd71f0c6d3cbac | 39 | py | Python | protocols/protocol_7_0/coverage.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | null | null | null | protocols/protocol_7_0/coverage.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | null | null | null | protocols/protocol_7_0/coverage.py | Lucioric2000/GelReportModels | 1704cdea3242d5b46c8b81ef46553ccae2799435 | [
"Apache-2.0"
] | null | null | null | from protocols.coverage_0_1_0 import *
| 19.5 | 38 | 0.846154 | from protocols.coverage_0_1_0 import *
| true | true |
1c3f3b07e8b2c3bb8e69f784a981b7d41aebc9f9 | 212 | py | Python | Symbol Patterns/symbolicpattern38.py | Daksh777/Python-PatternHouse | ab801631c2e1f5ed3cc12a26c959d41a5e51273d | [
"MIT"
] | 8 | 2021-03-20T11:26:35.000Z | 2022-01-05T02:39:15.000Z | Symbol Patterns/symbolicpattern38.py | Daksh777/Python-PatternHouse | ab801631c2e1f5ed3cc12a26c959d41a5e51273d | [
"MIT"
] | 851 | 2021-04-02T09:08:15.000Z | 2022-01-12T11:26:57.000Z | Symbol Patterns/symbolicpattern38.py | Daksh777/Python-PatternHouse | ab801631c2e1f5ed3cc12a26c959d41a5e51273d | [
"MIT"
] | 15 | 2021-04-13T06:10:17.000Z | 2022-01-08T05:07:21.000Z | print("Enter the no of rows: ")
n = int(input())
for i in range(0, n):
for j in range(0, i+1):
if j % 2 == 0:
print("#", end=" ")
else:
print("*", end=" ")
print()
| 21.2 | 31 | 0.410377 | print("Enter the no of rows: ")
n = int(input())
for i in range(0, n):
for j in range(0, i+1):
if j % 2 == 0:
print("#", end=" ")
else:
print("*", end=" ")
print()
| true | true |
1c3f3b3bcfad31e4d9aa1a8ba8c130cb3fca91f0 | 925 | py | Python | mainland/_main.py | moshez/mainland | 4aadf63d6e971518940828b1cc0b648ff5629bdd | [
"MIT"
] | null | null | null | mainland/_main.py | moshez/mainland | 4aadf63d6e971518940828b1cc0b648ff5629bdd | [
"MIT"
] | 1 | 2015-06-28T04:29:16.000Z | 2015-06-28T04:29:16.000Z | mainland/_main.py | moshez/mainland | 4aadf63d6e971518940828b1cc0b648ff5629bdd | [
"MIT"
] | null | null | null | # Copyright (c) Moshe Zadka
# See LICENSE for details.
import importlib
def main(argv, root, suffix=None, marker=None):
argv = list(argv)
argv.pop(0)
if not argv:
raise SystemExit('Need subcommand name')
moduleName = argv[0]
if not root.endswith('.'):
root += '.'
moduleName = root + moduleName
if marker is None:
marker = root.upper() + 'MAIN_OK'
try:
module = getModule(moduleName, suffix)
except ImportError:
raise SystemExit('Could not find command ' + moduleName)
if not getattr(module, marker, False):
raise SystemExit('module is not runnable ' + moduleName)
return module.main(argv)
def getModule(name, suffix=None):
if suffix is None:
suffix = ['']
for option in suffix:
try:
return importlib.import_module(name + option)
except ImportError as e:
pass
raise e
| 25.694444 | 64 | 0.616216 |
import importlib
def main(argv, root, suffix=None, marker=None):
argv = list(argv)
argv.pop(0)
if not argv:
raise SystemExit('Need subcommand name')
moduleName = argv[0]
if not root.endswith('.'):
root += '.'
moduleName = root + moduleName
if marker is None:
marker = root.upper() + 'MAIN_OK'
try:
module = getModule(moduleName, suffix)
except ImportError:
raise SystemExit('Could not find command ' + moduleName)
if not getattr(module, marker, False):
raise SystemExit('module is not runnable ' + moduleName)
return module.main(argv)
def getModule(name, suffix=None):
if suffix is None:
suffix = ['']
for option in suffix:
try:
return importlib.import_module(name + option)
except ImportError as e:
pass
raise e
| true | true |
1c3f3cab650e18c49a607e9a58896887699da0e5 | 6,288 | py | Python | train.py | deep-spin/SIGMORPHON2019 | 60cf3b53be42e76238e7928405b2916cd9aed6c4 | [
"MIT"
] | 2 | 2019-07-30T06:50:21.000Z | 2020-02-05T17:42:06.000Z | train.py | deep-spin/SIGMORPHON2019 | 60cf3b53be42e76238e7928405b2916cd9aed6c4 | [
"MIT"
] | 1 | 2019-08-20T08:57:21.000Z | 2019-08-21T08:49:48.000Z | train.py | deep-spin/SIGMORPHON2019 | 60cf3b53be42e76238e7928405b2916cd9aed6c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import configargparse
import onmt.opts as opts
import os
import random
from itertools import chain
import torch
import torchtext
# from torchtext.data.iterator import Iterator
from onmt.model_builder import build_model
from onmt.trainer import build_trainer
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import use_gpu
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = torchtext.data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return n_params, enc, dec
def training_opt_postprocessing(opt, device_id):
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
if opt.rnn_size != -1:
opt.enc_rnn_size = opt.rnn_size
opt.dec_rnn_size = opt.rnn_size
if opt.seed > 0:
torch.manual_seed(opt.seed)
# this one is needed for torchtext random call (shuffled iterator)
# in multi gpu it ensures datasets are read in the same order
random.seed(opt.seed)
# some cudnn methods can be random even after fixing the seed
# unless you tell it to be deterministic
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
if opt.seed > 0:
# These ensure same initialization in multi gpu mode
torch.cuda.manual_seed(opt.seed)
return opt
def main(opt):
if opt.gpuid:
raise AssertionError("gpuid is deprecated \
see world_size and gpu_ranks")
assert opt.world_size <= 1, "you don't need multi-gpu for morphology"
device_id = 0 if len(opt.gpu_ranks) == 1 else -1
opt = training_opt_postprocessing(opt, device_id)
init_logger(opt.log_file)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
# Load default opts values then overwrite it with opts from
# the checkpoint. It's useful in order to re-train a model
# after adding a new option (not set in checkpoint)
dummy_parser = configargparse.ArgumentParser()
opts.model_opts(dummy_parser)
default_opt = dummy_parser.parse_known_args([])[0]
model_opt = default_opt
model_opt.__dict__.update(checkpoint['opt'].__dict__)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
fields = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
fields = torch.load(opt.data + '.vocab.pt')
for key, values in fields.items():
for name, f in values:
if f.use_vocab:
logger.info(' * %s vocab size = %d' % (name, len(f.vocab)))
# Build model.
logger.info('Building model...')
model = build_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
# Build optimizer.
params = model.parameters()
optim_args = {"lr": opt.learning_rate}
if opt.optim == "adam":
# no need to mess with the default betas
optim_args["eps"] = 1e-9
elif opt.optim == "adagrad":
optim_args["initial_accumulator_value"] = opt.adagrad_accumulator_init
optim = getattr(torch.optim, opt.optim.title())(params, **optim_args)
print(optim)
trainer = build_trainer(opt, model_opt, device_id, model, fields, optim)
# this line is kind of a temporary kludge because different objects expect
# fields to have a different structure
dataset_fields = dict(chain.from_iterable(fields.values()))
device = "cuda" if opt.gpu_ranks else "cpu"
train_dataset = torch.load(opt.data + '.train.pt')
train_dataset.fields = dataset_fields
train_iter = OrderedIterator(
train_dataset, opt.batch_size, sort_within_batch=True,
device=device, repeat=False, shuffle=not opt.no_shuffle)
valid_dataset = torch.load(opt.data + '.valid.pt')
valid_dataset.fields = dataset_fields
valid_iter = OrderedIterator(
valid_dataset, opt.valid_batch_size, train=False,
sort_within_batch=True, device=device)
logger.info('Starting training on {}'.format(device))
trainer.train(train_iter, valid_iter, opt.epochs)
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description='train.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
opts.config_opts(parser)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
| 33.625668 | 78 | 0.655057 |
import configargparse
import onmt.opts as opts
import os
import random
from itertools import chain
import torch
import torchtext
from onmt.model_builder import build_model
from onmt.trainer import build_trainer
from onmt.utils.logging import init_logger, logger
from onmt.utils.misc import use_gpu
class OrderedIterator(torchtext.data.Iterator):
def create_batches(self):
if self.train:
def _pool(data, random_shuffler):
for p in torchtext.data.batch(data, self.batch_size * 100):
p_batch = torchtext.data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = _pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in torchtext.data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def _check_save_model_path(opt):
save_model_path = os.path.abspath(opt.save_model)
model_dirname = os.path.dirname(save_model_path)
if not os.path.exists(model_dirname):
os.makedirs(model_dirname)
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
enc = 0
dec = 0
for name, param in model.named_parameters():
if 'encoder' in name:
enc += param.nelement()
else:
dec += param.nelement()
return n_params, enc, dec
def training_opt_postprocessing(opt, device_id):
if opt.word_vec_size != -1:
opt.src_word_vec_size = opt.word_vec_size
opt.tgt_word_vec_size = opt.word_vec_size
if opt.layers != -1:
opt.enc_layers = opt.layers
opt.dec_layers = opt.layers
if opt.rnn_size != -1:
opt.enc_rnn_size = opt.rnn_size
opt.dec_rnn_size = opt.rnn_size
if opt.seed > 0:
torch.manual_seed(opt.seed)
random.seed(opt.seed)
torch.backends.cudnn.deterministic = True
if device_id >= 0:
torch.cuda.set_device(device_id)
if opt.seed > 0:
torch.cuda.manual_seed(opt.seed)
return opt
def main(opt):
if opt.gpuid:
raise AssertionError("gpuid is deprecated \
see world_size and gpu_ranks")
assert opt.world_size <= 1, "you don't need multi-gpu for morphology"
device_id = 0 if len(opt.gpu_ranks) == 1 else -1
opt = training_opt_postprocessing(opt, device_id)
init_logger(opt.log_file)
# Load checkpoint if we resume from a previous training.
if opt.train_from:
logger.info('Loading checkpoint from %s' % opt.train_from)
checkpoint = torch.load(opt.train_from,
map_location=lambda storage, loc: storage)
# Load default opts values then overwrite it with opts from
# the checkpoint. It's useful in order to re-train a model
dummy_parser = configargparse.ArgumentParser()
opts.model_opts(dummy_parser)
default_opt = dummy_parser.parse_known_args([])[0]
model_opt = default_opt
model_opt.__dict__.update(checkpoint['opt'].__dict__)
logger.info('Loading vocab from checkpoint at %s.' % opt.train_from)
fields = checkpoint['vocab']
else:
checkpoint = None
model_opt = opt
fields = torch.load(opt.data + '.vocab.pt')
for key, values in fields.items():
for name, f in values:
if f.use_vocab:
logger.info(' * %s vocab size = %d' % (name, len(f.vocab)))
logger.info('Building model...')
model = build_model(model_opt, fields, use_gpu(opt), checkpoint)
logger.info(model)
n_params, enc, dec = _tally_parameters(model)
logger.info('encoder: %d' % enc)
logger.info('decoder: %d' % dec)
logger.info('* number of parameters: %d' % n_params)
_check_save_model_path(opt)
params = model.parameters()
optim_args = {"lr": opt.learning_rate}
if opt.optim == "adam":
optim_args["eps"] = 1e-9
elif opt.optim == "adagrad":
optim_args["initial_accumulator_value"] = opt.adagrad_accumulator_init
optim = getattr(torch.optim, opt.optim.title())(params, **optim_args)
print(optim)
trainer = build_trainer(opt, model_opt, device_id, model, fields, optim)
dataset_fields = dict(chain.from_iterable(fields.values()))
device = "cuda" if opt.gpu_ranks else "cpu"
train_dataset = torch.load(opt.data + '.train.pt')
train_dataset.fields = dataset_fields
train_iter = OrderedIterator(
train_dataset, opt.batch_size, sort_within_batch=True,
device=device, repeat=False, shuffle=not opt.no_shuffle)
valid_dataset = torch.load(opt.data + '.valid.pt')
valid_dataset.fields = dataset_fields
valid_iter = OrderedIterator(
valid_dataset, opt.valid_batch_size, train=False,
sort_within_batch=True, device=device)
logger.info('Starting training on {}'.format(device))
trainer.train(train_iter, valid_iter, opt.epochs)
if __name__ == "__main__":
parser = configargparse.ArgumentParser(
description='train.py',
config_file_parser_class=configargparse.YAMLConfigFileParser,
formatter_class=configargparse.ArgumentDefaultsHelpFormatter)
opts.config_opts(parser)
opts.add_md_help_argument(parser)
opts.model_opts(parser)
opts.train_opts(parser)
opt = parser.parse_args()
main(opt)
| true | true |
1c3f3d09d01c34a2e0a05784c79668ab8345ab5d | 16,772 | py | Python | datacube/_version.py | Zac-HD/datacube-core | ebc2025b6fb9d22fb406cdf5f79eba6d144c57e3 | [
"Apache-2.0"
] | 27 | 2016-08-16T18:22:47.000Z | 2018-08-25T17:18:15.000Z | datacube/_version.py | cronosnull/agdc-v2 | 596923779d3650c47a6b43276b3369a5ec619158 | [
"Apache-2.0"
] | 103 | 2018-03-21T15:00:05.000Z | 2020-06-04T05:40:25.000Z | datacube/_version.py | cronosnull/agdc-v2 | 596923779d3650c47a6b43276b3369a5ec619158 | [
"Apache-2.0"
] | 27 | 2016-08-26T18:14:40.000Z | 2021-12-24T08:41:29.000Z |
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.16 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
# pylint: skip-file
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "datacube-"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datacube/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| 34.439425 | 79 | 0.590866 |
import errno
import os
import re
import subprocess
import sys
def get_keywords():
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
def get_config():
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = "datacube-"
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "datacube/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method):
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
| true | true |
1c3f3d27fb37722a95dadc0a104c89f03af4fc1c | 1,652 | py | Python | app/distribution/forms/settings.py | RinKiruko/MatHelp | 5178f83fdf46c92be1014e18e63d64f36e80082e | [
"MIT"
] | null | null | null | app/distribution/forms/settings.py | RinKiruko/MatHelp | 5178f83fdf46c92be1014e18e63d64f36e80082e | [
"MIT"
] | null | null | null | app/distribution/forms/settings.py | RinKiruko/MatHelp | 5178f83fdf46c92be1014e18e63d64f36e80082e | [
"MIT"
] | null | null | null | from django import forms
from django.db.models import QuerySet
from django.utils import timezone
from crud.models import StatementCategory, Statement
from general.forms import *
MONTH_CHOICES = (
(1, 'Январь'),
(2, 'Февраль'),
(3, 'Март'),
(4, 'Апрель'),
(5, 'Май'),
(6, 'Июнь'),
(7, 'Июль'),
(8, 'Август'),
(9, 'Сентябрь'),
(10, 'Октябрь'),
(11, 'Ноябрь'),
(12, 'Декбрь'),
)
class SettingsValidationMixin:
def clean(self):
cleaned_data = super().clean()
if any(cleaned_data.get(cat.id, False) for cat in StatementCategory.objects.all()):
raise forms.ValidationError("Хотя бы одна категория должна быть выбрана")
return cleaned_data
def settings_form_factory(categories: QuerySet = StatementCategory.objects.all()):
today = timezone.now().date()
fields = {
'budget': forms.FloatField(required=True, label='Размер бюджета', min_value=1000),
'distribution_year': forms.IntegerField(
required=True,
label='Год',
initial=today.year,
),
'distribution_month': forms.TypedChoiceField(
required=True,
label='Месяц',
coerce=int,
empty_value=1,
choices=MONTH_CHOICES
)
}
fields.update({
str(category.id): forms.BooleanField(
label=category,
initial=True,
required=False,
) for category in categories
})
result = type(
'SettingsForm',
(SettingsValidationMixin, BaseBoostrapFormMixin, forms.Form),
fields
)
return result
| 24.656716 | 91 | 0.591404 | from django import forms
from django.db.models import QuerySet
from django.utils import timezone
from crud.models import StatementCategory, Statement
from general.forms import *
MONTH_CHOICES = (
(1, 'Январь'),
(2, 'Февраль'),
(3, 'Март'),
(4, 'Апрель'),
(5, 'Май'),
(6, 'Июнь'),
(7, 'Июль'),
(8, 'Август'),
(9, 'Сентябрь'),
(10, 'Октябрь'),
(11, 'Ноябрь'),
(12, 'Декбрь'),
)
class SettingsValidationMixin:
def clean(self):
cleaned_data = super().clean()
if any(cleaned_data.get(cat.id, False) for cat in StatementCategory.objects.all()):
raise forms.ValidationError("Хотя бы одна категория должна быть выбрана")
return cleaned_data
def settings_form_factory(categories: QuerySet = StatementCategory.objects.all()):
today = timezone.now().date()
fields = {
'budget': forms.FloatField(required=True, label='Размер бюджета', min_value=1000),
'distribution_year': forms.IntegerField(
required=True,
label='Год',
initial=today.year,
),
'distribution_month': forms.TypedChoiceField(
required=True,
label='Месяц',
coerce=int,
empty_value=1,
choices=MONTH_CHOICES
)
}
fields.update({
str(category.id): forms.BooleanField(
label=category,
initial=True,
required=False,
) for category in categories
})
result = type(
'SettingsForm',
(SettingsValidationMixin, BaseBoostrapFormMixin, forms.Form),
fields
)
return result
| true | true |
1c3f3daf361120283f4d06efaa952302acb7fa00 | 16,093 | py | Python | tests/test_adt.py | deztructor/pycor | f77e3f197ddda276932af9d3a19e89a590971d3d | [
"MIT"
] | null | null | null | tests/test_adt.py | deztructor/pycor | f77e3f197ddda276932af9d3a19e89a590971d3d | [
"MIT"
] | null | null | null | tests/test_adt.py | deztructor/pycor | f77e3f197ddda276932af9d3a19e89a590971d3d | [
"MIT"
] | null | null | null | from collections import namedtuple
from enum import Enum
from functools import partial
import types
import pytest
from cor.adt.error import (
AccessError,
InvalidFieldError,
MissingFieldError,
RecordError,
)
from cor.adt.hook import (
HooksFactory,
field_invariant,
Target,
)
from cor.adt.record import (
as_basic_type,
ExtensibleRecord,
Factory,
Record,
RecordMixin,
subrecord,
record_factory,
)
from cor.adt.operation import (
anything,
ContractInfo,
convert,
default_conversion,
expect_type,
expect_types,
get_contract_info,
not_empty,
only_if,
provide_missing,
should_be,
skip_missing,
something,
Tag,
)
from cor.util import split_args
class Input(Enum):
Good = 'good'
Bad = 'bad'
def _prepare_test_args(*data, good=list(), bad=list()):
args, kwargs = split_args(Input, *data)
assert not args
good = list(good) + kwargs.get('good', [])
bad = list(bad) + kwargs.get('bad', [])
return good + kwargs.get('good', []), bad + kwargs.get('bad', []),
def _test_good_bad(info, convert, good, bad):
for input_data, expected in good:
test_info = '{}: Correct input: {}'.format(info, input_data)
res = convert(*input_data)
assert res == expected, test_info
for input_data, err in bad:
test_info = '{}: Should cause exception: {}'.format(info, input_data)
with pytest.raises(err):
convert(*input_data)
pytest.fail(test_info)
def _test_conversion(conversion, *args, **kwargs):
good, bad = _prepare_test_args(*args, **kwargs)
good = [([value], res) for value, res in good]
bad = [([value], res) for value, res in bad]
_test_good_bad(conversion.info, conversion.convert, good, bad)
def _test_prepare_field(conversion, *args, **kwargs):
good, bad = _prepare_test_args(*args, **kwargs)
good = [([name, value], res) for name, value, res in good]
bad = [([name, value], res) for name, value, res in bad]
_test_good_bad(conversion.info, conversion.prepare_field, good, bad)
def test_convert():
conversion = convert(int)
_test_conversion(
conversion,
Input.Good, ('1', 1), (2, 2),
Input.Bad, (None, TypeError), ('s', ValueError),
)
def test_provide_missing():
_test_conversion(
provide_missing('foo'),
Input.Good, (13, 13), ('', ''), (None, 'foo'),
)
_test_conversion(
provide_missing({'a': 1, 'b': 2}),
Input.Good, (13, 13), ('', ''), (None, {'a': 1, 'b': 2}),
)
def test_only_if():
conversion = only_if(lambda x: x < 10, 'less than 10')
_test_conversion(
conversion,
Input.Good, (9, 9),
Input.Bad, (10, ValueError)
)
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': 9}, 9),
Input.Bad,
('foo', {'foo': 10}, InvalidFieldError),
('foo', {'bar': 10}, MissingFieldError),
)
def test_skip_missing():
conversion = skip_missing
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, None),
('foo', {'bar': 1, 'foo': 2}, 2),
)
def test_something():
conversion = something
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': 1}, 1),
('foo', {'foo': '1'}, '1'),
Input.Bad,
('foo', None, TypeError),
('foo', {}, KeyError),
('foo', {'bar': 1}, KeyError),
)
def test_anything():
conversion = anything
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, None),
('foo', {'foo': 1}, 1),
('foo', {'foo': '1'}, '1'),
Input.Bad,
('foo', None, AttributeError),
)
def test_expect_types():
conversion = expect_types(str, float)
_test_conversion(
conversion,
good=(
(v, v) for v in
['', 'foo', 1.1]
),
bad=(
(v, TypeError) for v in
[b'', 1, None]
)
)
conversion = expect_type(bytes)
_test_conversion(conversion, Input.Good, (b'bar', b'bar'))
_test_prepare_field(
conversion,
Input.Good, ('foo', {'foo': b'bar'}, b'bar'),
Input.Bad, ('foo', 1, InvalidFieldError),
)
def test_should_be():
v = dict()
conversion = should_be(v)
_test_conversion(
conversion,
Input.Good, (v, v),
Input.Bad, (dict(), ValueError),
)
def _test_binop_conversion(conversion, *args, **kwargs):
'''binary op provides only prepare_field method'''
good, bad = _prepare_test_args(*args, **kwargs)
good = [
('foo', {'foo': value}, res)
for value, res in good
]
bad = [
('foo', {'foo': value}, err)
for value, err in bad
]
_test_prepare_field(conversion, good=good, bad=bad)
def test_or():
conversion = convert(int) | convert(str)
class NoStr:
def __str__(self):
raise OverflowError()
no_str_conversion = NoStr()
_test_binop_conversion(
conversion,
Input.Good,
('1', 1),
('1.1', '1.1'),
('s', 's'),
(None, 'None'),
Input.Bad,
(no_str_conversion, InvalidFieldError)
)
conversion = conversion | only_if(lambda v: isinstance(v, NoStr), 'is NoStr')
_test_binop_conversion(
conversion,
Input.Good,
('1', 1),
('1.1', '1.1'),
('s', 's'),
(None, 'None'),
(no_str_conversion, no_str_conversion),
)
conversion = provide_missing(42) | int
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, 42),
('foo', {'foo': 13}, 13),
)
def test_and():
conversion = convert(int) >> convert(str)
_test_binop_conversion(
conversion,
Input.Good,
('1', '1'),
(True, '1'),
Input.Bad,
('s', InvalidFieldError),
(None, InvalidFieldError),
)
conversion = conversion >> convert(float)
_test_binop_conversion(
conversion,
Input.Good,
('1', 1.0),
(1.1, 1.0),
Input.Bad,
('s', InvalidFieldError),
(None, InvalidFieldError),
)
conversion = skip_missing >> convert(int)
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': '1'}, 1),
('foo', {}, None),
Input.Bad,
('foo', {'foo': 's'}, InvalidFieldError),
)
conversion = provide_missing(42) >> convert(int)
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, 42),
('foo', {'foo': 13}, 13),
Input.Bad,
('foo', {'foo': 'bar'}, InvalidFieldError),
)
def test_empty_record():
class Foo(Record):
pass
foo = Foo()
assert isinstance(foo, Record)
assert list(foo.gen_names()) == []
assert list(foo.gen_fields()) == []
assert as_basic_type(foo) == {}
@as_basic_type.register(Foo)
def _(v):
return v.__class__.__name__
assert as_basic_type(foo) == 'Foo'
pytest.raises(AccessError, setattr, foo, 'bar', 1)
pytest.raises(AttributeError, getattr, foo, 'bar')
foo_factory = record_factory('Foo')
assert isinstance(foo_factory, Factory)
foo2 = foo_factory()
assert isinstance(foo2, Record)
assert list(foo2.gen_fields()) == []
assert as_basic_type(foo2) == {}
def test_minimal_record():
class Foo(Record):
id = expect_type(int)
pytest.raises(RecordError, Foo)
foo = Foo(id=12)
assert list(foo.gen_names()) == ['id',]
assert list(foo.gen_fields()) == [('id', 12)]
assert foo.id == 12
assert foo == {'id': 12}
assert Foo(id=11) != foo
with pytest.raises(AccessError):
foo.id = 13
pytest.fail("Shouldn't allow to change fields")
foo2 = Foo.get_factory()(id=12)
assert as_basic_type(foo2) == {'id': 12}
assert foo2 == foo
class LT24(Record):
id = convert(int) >> only_if(lambda v: v < 24, '< 24')
assert LT24(id=12) == foo2
assert LT24(id=13) != foo2
class Duet(Record):
id = convert(int)
name = convert(str)
assert Duet(id=99, name='foo') == Duet(id=99, name='foo')
assert Duet(id=99, name=1) == Duet(id='99', name='1')
assert Duet(id=99, name='foo') != Duet(id=100, name='foo')
assert Duet(id=99, name='bar') != Duet(id=99, name='foo')
assert foo2 != Duet(id=12, name='')
class WheelerType(Tag):
Bicycle = 'bicycle'
Car = 'car'
Truck = 'truck'
def test_extensible_record():
class Wheeler(ExtensibleRecord):
vehicle_type = convert(WheelerType)
model = expect_type(str)
wheels = expect_type(int)
pytest.raises(RecordError, Wheeler, vehicle_type='table', model='choo', wheels=4)
car_data = dict(vehicle_type='car', model='choo', wheels=4, doors=5)
vehicle = Wheeler(car_data)
assert as_basic_type(vehicle) == car_data
car_dict = dict(vehicle_type=WheelerType.Car, model='choo', wheels=4, doors=5)
assert dict(vehicle) == car_dict
class Car(Record):
vehicle_type = should_be(WheelerType.Car)
model = expect_type(str)
wheels = expect_type(int)
doors = expect_type(int)
car = Car(vehicle)
assert as_basic_type(car) == car_data
class BicycleBreakType(Tag):
Disk = 'disk'
Rim = 'rim'
class Bicycle(Record):
vehicle_type = should_be(WheelerType.Bicycle)
model = expect_type(str)
wheels = expect_type(int)
breaks = convert(BicycleBreakType)
bicycle_data = dict(vehicle_type='bicycle', model='DIY', wheels=2, breaks='disk')
vehicle2 = Wheeler(bicycle_data)
assert vehicle2 != vehicle
pytest.raises(RecordError, Bicycle, vehicle)
bicycle = Bicycle(vehicle2)
assert as_basic_type(bicycle) == bicycle_data
class Truck(Wheeler):
vehicle_type = should_be(WheelerType.Truck)
capacity = expect_type(float)
truck_data = dict(vehicle_type='truck', model='DIY', wheels=8, capacity=20.5, power=400)
truck_wheeler = Wheeler(truck_data)
truck = Truck(truck_wheeler)
assert as_basic_type(truck) == truck_data, \
"Truck is still extensible, should return all passed data"
assert isinstance(truck, Wheeler)
class PowerTruck(Record, Truck):
power = expect_type(int)
def get_truck_data(self):
return (self.capacity, self.power)
power_truck = PowerTruck({**truck, 'breaks': 'disk'})
assert as_basic_type(power_truck) == truck_data, \
"PowerTruck is not extensible, should drop unknown fields"
assert power_truck.get_truck_data() == (20.5, 400)
class BicycleOwner(Record):
name = expect_type(str)
transport = subrecord(Bicycle)
bicycle_owner = BicycleOwner(name='bob', transport=bicycle)
assert as_basic_type(bicycle_owner) == {'name': 'bob', 'transport': bicycle_data}
def test_subrecord():
import ipaddress
class Host(Record):
name = expect_type(str) >> not_empty
connection = record_factory(
'Connection',
ip=convert(ipaddress.ip_address),
mask=expect_type(int),
gateway=convert(ipaddress.ip_address)
)
@as_basic_type.register(ipaddress.IPv4Address)
def ipv4_as_basic_type(v):
return str(v)
connection_data = dict(ip='1.2.3.4', mask=24, gateway='1.2.3.1')
host_data = dict(name='foo', connection=connection_data)
host = Host(host_data)
assert as_basic_type(host) == host_data
pytest.raises(
RecordError,
Host, dict(name='bar', connection={**connection_data, 'gateway': 's'})
)
class Host2(Record):
hostname = expect_type(str)
connection = Host.get_field_converter('connection')
host2 = Host2(hostname='bar', connection=connection_data)
def test_hooks():
identity = lambda *args: args
factory = field_invariant(identity)
invariants = list(factory.gen_hooks('foo'))
assert len(invariants) == 1
identity_invariant = invariants[0]
assert identity_invariant.hook_target == Target.PostInit
obj = types.SimpleNamespace(foo=5)
res = identity_invariant(obj)
assert res == (obj, 'foo', 5)
merge_name_value = lambda _1, name, value: '{}-{}'.format(name, value)
factory2 = factory << field_invariant(merge_name_value)
invariants = list(factory2.gen_hooks('bar'))
assert len(invariants) == 2
assert all(i.hook_target == Target.PostInit for i in invariants)
obj = types.SimpleNamespace(bar=6)
assert [i(obj) for i in invariants] == [(obj, 'bar', 6), 'bar-6']
convert_int = convert(int)
factory_op_int = convert_int << factory
assert isinstance(factory_op_int, HooksFactory)
assert factory.operation == None, \
"Original factory should remain the same"
assert factory_op_int.operation == convert_int
convert_str = convert(str)
factory_op_str = convert_str << factory << factory_op_int
assert isinstance(factory_op_str, HooksFactory)
assert factory.operation == None, \
"Original factory should remain the same"
assert factory_op_int.operation == convert_int, \
"Original factory should remain the same"
assert factory_op_str.operation == convert_str, \
"Factory should use the leftmost operation"
with pytest.raises(TypeError):
factory << convert_int, \
"HooksFactory should be added (after) on top of operation tree"
pytest.raises(ValueError, default_conversion, factory)
with pytest.raises(ValueError):
convert_str >> factory, "HooksFactory can't be used in conversion pipe"
def test_invariant():
import ipaddress
class Host(Record):
ip = convert(ipaddress.ip_address)
mask = expect_type(int)
@property
def network(self):
return ipaddress.ip_network("{}/{}".format(self.ip, self.mask), strict=False)
@as_basic_type.register(ipaddress.IPv4Address)
def ipv4_as_basic_type(v):
return str(v)
h = Host(ip='1.1.1.1', mask=24)
assert as_basic_type(h) == {'ip': '1.1.1.1', 'mask': 24}
def check_gateway(host, _, field_value):
if not field_value in host.network:
raise ValueError()
class NetHost(Host):
gateway = (
convert(ipaddress.ip_address)
<< field_invariant(check_gateway)
)
h = NetHost(ip='1.1.1.1', mask=24, gateway='1.1.1.2')
assert as_basic_type(h) == {'gateway': '1.1.1.2', 'ip': '1.1.1.1', 'mask': 24}
pytest.raises(RecordError, NetHost, ip='1.1.1.1', mask=24, gateway='1.2.1.2')
def test_field_aggregate():
print('TODO')
def test_contract_info():
data = (
(convert(int), "convert to int"),
(convert(str), "convert to str"),
(convert(WheelerType), 'convert to WheelerType("bicycle", "car", "truck")'),
(expect_type(int), "accept only if has type int"),
(not_empty, "accept only if not empty"),
(provide_missing(42) >> convert(int), "provide 42 if missing then convert to int"),
(skip_missing >> convert(int), "skip missing then convert to int"),
(expect_type(int) | expect_type(str), "accept only if has type int or accept only if has type str"),
(
convert(int) >> only_if(lambda v: v > 10, 'value > 10'),
"convert to int then accept only if value > 10"
),
)
for conversion, expected_info in data:
assert get_contract_info(conversion) == expected_info
def test_record_mixin():
class T(RecordMixin):
a = expect_type(int)
b = expect_type(str)
class A(ExtensibleRecord, T):
pass
class B(Record, T):
c = convert(WheelerType)
assert list(A.gen_record_names()) == ['a', 'b']
assert list(B.gen_record_names()) == ['a', 'b', 'c']
a = A(a=1, b='foo', c='car')
b = B(a)
pytest.raises(RecordError, B, c='car')
| 26.956449 | 108 | 0.600199 | from collections import namedtuple
from enum import Enum
from functools import partial
import types
import pytest
from cor.adt.error import (
AccessError,
InvalidFieldError,
MissingFieldError,
RecordError,
)
from cor.adt.hook import (
HooksFactory,
field_invariant,
Target,
)
from cor.adt.record import (
as_basic_type,
ExtensibleRecord,
Factory,
Record,
RecordMixin,
subrecord,
record_factory,
)
from cor.adt.operation import (
anything,
ContractInfo,
convert,
default_conversion,
expect_type,
expect_types,
get_contract_info,
not_empty,
only_if,
provide_missing,
should_be,
skip_missing,
something,
Tag,
)
from cor.util import split_args
class Input(Enum):
Good = 'good'
Bad = 'bad'
def _prepare_test_args(*data, good=list(), bad=list()):
args, kwargs = split_args(Input, *data)
assert not args
good = list(good) + kwargs.get('good', [])
bad = list(bad) + kwargs.get('bad', [])
return good + kwargs.get('good', []), bad + kwargs.get('bad', []),
def _test_good_bad(info, convert, good, bad):
for input_data, expected in good:
test_info = '{}: Correct input: {}'.format(info, input_data)
res = convert(*input_data)
assert res == expected, test_info
for input_data, err in bad:
test_info = '{}: Should cause exception: {}'.format(info, input_data)
with pytest.raises(err):
convert(*input_data)
pytest.fail(test_info)
def _test_conversion(conversion, *args, **kwargs):
good, bad = _prepare_test_args(*args, **kwargs)
good = [([value], res) for value, res in good]
bad = [([value], res) for value, res in bad]
_test_good_bad(conversion.info, conversion.convert, good, bad)
def _test_prepare_field(conversion, *args, **kwargs):
good, bad = _prepare_test_args(*args, **kwargs)
good = [([name, value], res) for name, value, res in good]
bad = [([name, value], res) for name, value, res in bad]
_test_good_bad(conversion.info, conversion.prepare_field, good, bad)
def test_convert():
conversion = convert(int)
_test_conversion(
conversion,
Input.Good, ('1', 1), (2, 2),
Input.Bad, (None, TypeError), ('s', ValueError),
)
def test_provide_missing():
_test_conversion(
provide_missing('foo'),
Input.Good, (13, 13), ('', ''), (None, 'foo'),
)
_test_conversion(
provide_missing({'a': 1, 'b': 2}),
Input.Good, (13, 13), ('', ''), (None, {'a': 1, 'b': 2}),
)
def test_only_if():
conversion = only_if(lambda x: x < 10, 'less than 10')
_test_conversion(
conversion,
Input.Good, (9, 9),
Input.Bad, (10, ValueError)
)
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': 9}, 9),
Input.Bad,
('foo', {'foo': 10}, InvalidFieldError),
('foo', {'bar': 10}, MissingFieldError),
)
def test_skip_missing():
conversion = skip_missing
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, None),
('foo', {'bar': 1, 'foo': 2}, 2),
)
def test_something():
conversion = something
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': 1}, 1),
('foo', {'foo': '1'}, '1'),
Input.Bad,
('foo', None, TypeError),
('foo', {}, KeyError),
('foo', {'bar': 1}, KeyError),
)
def test_anything():
conversion = anything
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, None),
('foo', {'foo': 1}, 1),
('foo', {'foo': '1'}, '1'),
Input.Bad,
('foo', None, AttributeError),
)
def test_expect_types():
conversion = expect_types(str, float)
_test_conversion(
conversion,
good=(
(v, v) for v in
['', 'foo', 1.1]
),
bad=(
(v, TypeError) for v in
[b'', 1, None]
)
)
conversion = expect_type(bytes)
_test_conversion(conversion, Input.Good, (b'bar', b'bar'))
_test_prepare_field(
conversion,
Input.Good, ('foo', {'foo': b'bar'}, b'bar'),
Input.Bad, ('foo', 1, InvalidFieldError),
)
def test_should_be():
v = dict()
conversion = should_be(v)
_test_conversion(
conversion,
Input.Good, (v, v),
Input.Bad, (dict(), ValueError),
)
def _test_binop_conversion(conversion, *args, **kwargs):
good, bad = _prepare_test_args(*args, **kwargs)
good = [
('foo', {'foo': value}, res)
for value, res in good
]
bad = [
('foo', {'foo': value}, err)
for value, err in bad
]
_test_prepare_field(conversion, good=good, bad=bad)
def test_or():
conversion = convert(int) | convert(str)
class NoStr:
def __str__(self):
raise OverflowError()
no_str_conversion = NoStr()
_test_binop_conversion(
conversion,
Input.Good,
('1', 1),
('1.1', '1.1'),
('s', 's'),
(None, 'None'),
Input.Bad,
(no_str_conversion, InvalidFieldError)
)
conversion = conversion | only_if(lambda v: isinstance(v, NoStr), 'is NoStr')
_test_binop_conversion(
conversion,
Input.Good,
('1', 1),
('1.1', '1.1'),
('s', 's'),
(None, 'None'),
(no_str_conversion, no_str_conversion),
)
conversion = provide_missing(42) | int
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, 42),
('foo', {'foo': 13}, 13),
)
def test_and():
conversion = convert(int) >> convert(str)
_test_binop_conversion(
conversion,
Input.Good,
('1', '1'),
(True, '1'),
Input.Bad,
('s', InvalidFieldError),
(None, InvalidFieldError),
)
conversion = conversion >> convert(float)
_test_binop_conversion(
conversion,
Input.Good,
('1', 1.0),
(1.1, 1.0),
Input.Bad,
('s', InvalidFieldError),
(None, InvalidFieldError),
)
conversion = skip_missing >> convert(int)
_test_prepare_field(
conversion,
Input.Good,
('foo', {'foo': '1'}, 1),
('foo', {}, None),
Input.Bad,
('foo', {'foo': 's'}, InvalidFieldError),
)
conversion = provide_missing(42) >> convert(int)
_test_prepare_field(
conversion,
Input.Good,
('foo', {}, 42),
('foo', {'foo': 13}, 13),
Input.Bad,
('foo', {'foo': 'bar'}, InvalidFieldError),
)
def test_empty_record():
class Foo(Record):
pass
foo = Foo()
assert isinstance(foo, Record)
assert list(foo.gen_names()) == []
assert list(foo.gen_fields()) == []
assert as_basic_type(foo) == {}
@as_basic_type.register(Foo)
def _(v):
return v.__class__.__name__
assert as_basic_type(foo) == 'Foo'
pytest.raises(AccessError, setattr, foo, 'bar', 1)
pytest.raises(AttributeError, getattr, foo, 'bar')
foo_factory = record_factory('Foo')
assert isinstance(foo_factory, Factory)
foo2 = foo_factory()
assert isinstance(foo2, Record)
assert list(foo2.gen_fields()) == []
assert as_basic_type(foo2) == {}
def test_minimal_record():
class Foo(Record):
id = expect_type(int)
pytest.raises(RecordError, Foo)
foo = Foo(id=12)
assert list(foo.gen_names()) == ['id',]
assert list(foo.gen_fields()) == [('id', 12)]
assert foo.id == 12
assert foo == {'id': 12}
assert Foo(id=11) != foo
with pytest.raises(AccessError):
foo.id = 13
pytest.fail("Shouldn't allow to change fields")
foo2 = Foo.get_factory()(id=12)
assert as_basic_type(foo2) == {'id': 12}
assert foo2 == foo
class LT24(Record):
id = convert(int) >> only_if(lambda v: v < 24, '< 24')
assert LT24(id=12) == foo2
assert LT24(id=13) != foo2
class Duet(Record):
id = convert(int)
name = convert(str)
assert Duet(id=99, name='foo') == Duet(id=99, name='foo')
assert Duet(id=99, name=1) == Duet(id='99', name='1')
assert Duet(id=99, name='foo') != Duet(id=100, name='foo')
assert Duet(id=99, name='bar') != Duet(id=99, name='foo')
assert foo2 != Duet(id=12, name='')
class WheelerType(Tag):
Bicycle = 'bicycle'
Car = 'car'
Truck = 'truck'
def test_extensible_record():
class Wheeler(ExtensibleRecord):
vehicle_type = convert(WheelerType)
model = expect_type(str)
wheels = expect_type(int)
pytest.raises(RecordError, Wheeler, vehicle_type='table', model='choo', wheels=4)
car_data = dict(vehicle_type='car', model='choo', wheels=4, doors=5)
vehicle = Wheeler(car_data)
assert as_basic_type(vehicle) == car_data
car_dict = dict(vehicle_type=WheelerType.Car, model='choo', wheels=4, doors=5)
assert dict(vehicle) == car_dict
class Car(Record):
vehicle_type = should_be(WheelerType.Car)
model = expect_type(str)
wheels = expect_type(int)
doors = expect_type(int)
car = Car(vehicle)
assert as_basic_type(car) == car_data
class BicycleBreakType(Tag):
Disk = 'disk'
Rim = 'rim'
class Bicycle(Record):
vehicle_type = should_be(WheelerType.Bicycle)
model = expect_type(str)
wheels = expect_type(int)
breaks = convert(BicycleBreakType)
bicycle_data = dict(vehicle_type='bicycle', model='DIY', wheels=2, breaks='disk')
vehicle2 = Wheeler(bicycle_data)
assert vehicle2 != vehicle
pytest.raises(RecordError, Bicycle, vehicle)
bicycle = Bicycle(vehicle2)
assert as_basic_type(bicycle) == bicycle_data
class Truck(Wheeler):
vehicle_type = should_be(WheelerType.Truck)
capacity = expect_type(float)
truck_data = dict(vehicle_type='truck', model='DIY', wheels=8, capacity=20.5, power=400)
truck_wheeler = Wheeler(truck_data)
truck = Truck(truck_wheeler)
assert as_basic_type(truck) == truck_data, \
"Truck is still extensible, should return all passed data"
assert isinstance(truck, Wheeler)
class PowerTruck(Record, Truck):
power = expect_type(int)
def get_truck_data(self):
return (self.capacity, self.power)
power_truck = PowerTruck({**truck, 'breaks': 'disk'})
assert as_basic_type(power_truck) == truck_data, \
"PowerTruck is not extensible, should drop unknown fields"
assert power_truck.get_truck_data() == (20.5, 400)
class BicycleOwner(Record):
name = expect_type(str)
transport = subrecord(Bicycle)
bicycle_owner = BicycleOwner(name='bob', transport=bicycle)
assert as_basic_type(bicycle_owner) == {'name': 'bob', 'transport': bicycle_data}
def test_subrecord():
import ipaddress
class Host(Record):
name = expect_type(str) >> not_empty
connection = record_factory(
'Connection',
ip=convert(ipaddress.ip_address),
mask=expect_type(int),
gateway=convert(ipaddress.ip_address)
)
@as_basic_type.register(ipaddress.IPv4Address)
def ipv4_as_basic_type(v):
return str(v)
connection_data = dict(ip='1.2.3.4', mask=24, gateway='1.2.3.1')
host_data = dict(name='foo', connection=connection_data)
host = Host(host_data)
assert as_basic_type(host) == host_data
pytest.raises(
RecordError,
Host, dict(name='bar', connection={**connection_data, 'gateway': 's'})
)
class Host2(Record):
hostname = expect_type(str)
connection = Host.get_field_converter('connection')
host2 = Host2(hostname='bar', connection=connection_data)
def test_hooks():
identity = lambda *args: args
factory = field_invariant(identity)
invariants = list(factory.gen_hooks('foo'))
assert len(invariants) == 1
identity_invariant = invariants[0]
assert identity_invariant.hook_target == Target.PostInit
obj = types.SimpleNamespace(foo=5)
res = identity_invariant(obj)
assert res == (obj, 'foo', 5)
merge_name_value = lambda _1, name, value: '{}-{}'.format(name, value)
factory2 = factory << field_invariant(merge_name_value)
invariants = list(factory2.gen_hooks('bar'))
assert len(invariants) == 2
assert all(i.hook_target == Target.PostInit for i in invariants)
obj = types.SimpleNamespace(bar=6)
assert [i(obj) for i in invariants] == [(obj, 'bar', 6), 'bar-6']
convert_int = convert(int)
factory_op_int = convert_int << factory
assert isinstance(factory_op_int, HooksFactory)
assert factory.operation == None, \
"Original factory should remain the same"
assert factory_op_int.operation == convert_int
convert_str = convert(str)
factory_op_str = convert_str << factory << factory_op_int
assert isinstance(factory_op_str, HooksFactory)
assert factory.operation == None, \
"Original factory should remain the same"
assert factory_op_int.operation == convert_int, \
"Original factory should remain the same"
assert factory_op_str.operation == convert_str, \
"Factory should use the leftmost operation"
with pytest.raises(TypeError):
factory << convert_int, \
"HooksFactory should be added (after) on top of operation tree"
pytest.raises(ValueError, default_conversion, factory)
with pytest.raises(ValueError):
convert_str >> factory, "HooksFactory can't be used in conversion pipe"
def test_invariant():
import ipaddress
class Host(Record):
ip = convert(ipaddress.ip_address)
mask = expect_type(int)
@property
def network(self):
return ipaddress.ip_network("{}/{}".format(self.ip, self.mask), strict=False)
@as_basic_type.register(ipaddress.IPv4Address)
def ipv4_as_basic_type(v):
return str(v)
h = Host(ip='1.1.1.1', mask=24)
assert as_basic_type(h) == {'ip': '1.1.1.1', 'mask': 24}
def check_gateway(host, _, field_value):
if not field_value in host.network:
raise ValueError()
class NetHost(Host):
gateway = (
convert(ipaddress.ip_address)
<< field_invariant(check_gateway)
)
h = NetHost(ip='1.1.1.1', mask=24, gateway='1.1.1.2')
assert as_basic_type(h) == {'gateway': '1.1.1.2', 'ip': '1.1.1.1', 'mask': 24}
pytest.raises(RecordError, NetHost, ip='1.1.1.1', mask=24, gateway='1.2.1.2')
def test_field_aggregate():
print('TODO')
def test_contract_info():
data = (
(convert(int), "convert to int"),
(convert(str), "convert to str"),
(convert(WheelerType), 'convert to WheelerType("bicycle", "car", "truck")'),
(expect_type(int), "accept only if has type int"),
(not_empty, "accept only if not empty"),
(provide_missing(42) >> convert(int), "provide 42 if missing then convert to int"),
(skip_missing >> convert(int), "skip missing then convert to int"),
(expect_type(int) | expect_type(str), "accept only if has type int or accept only if has type str"),
(
convert(int) >> only_if(lambda v: v > 10, 'value > 10'),
"convert to int then accept only if value > 10"
),
)
for conversion, expected_info in data:
assert get_contract_info(conversion) == expected_info
def test_record_mixin():
class T(RecordMixin):
a = expect_type(int)
b = expect_type(str)
class A(ExtensibleRecord, T):
pass
class B(Record, T):
c = convert(WheelerType)
assert list(A.gen_record_names()) == ['a', 'b']
assert list(B.gen_record_names()) == ['a', 'b', 'c']
a = A(a=1, b='foo', c='car')
b = B(a)
pytest.raises(RecordError, B, c='car')
| true | true |
1c3f3dba7e5615f38b36309c6c6af4695a82f1c3 | 222 | py | Python | python_class/mylist.py | wasit7/cs402 | 5a0f945eb7c9944edc0a423d5c37bc4ef867b950 | [
"MIT"
] | null | null | null | python_class/mylist.py | wasit7/cs402 | 5a0f945eb7c9944edc0a423d5c37bc4ef867b950 | [
"MIT"
] | null | null | null | python_class/mylist.py | wasit7/cs402 | 5a0f945eb7c9944edc0a423d5c37bc4ef867b950 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 19 11:03:23 2015
@author: Wasit
"""
mylist=[]
mylist.append(7)
mylist.append("my string")
mylist.append('my string again')
another_list=[1,2,3]
mylist.append(another_list) | 14.8 | 35 | 0.68018 |
mylist=[]
mylist.append(7)
mylist.append("my string")
mylist.append('my string again')
another_list=[1,2,3]
mylist.append(another_list) | true | true |
1c3f3eaad60aae87f0b648d03b91a17e4f0ba778 | 82,340 | py | Python | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_volumes_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | 2 | 2019-08-23T21:14:00.000Z | 2021-09-07T18:32:34.000Z | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_volumes_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | null | null | null | sdk/netapp/azure-mgmt-netapp/azure/mgmt/netapp/operations/_volumes_operations.py | mohamedshabanofficial/azure-sdk-for-python | 81c585f310cd2ec23d2ad145173958914a075a58 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VolumesOperations(object):
"""VolumesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.netapp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VolumeList"]
"""Describe all volumes.
List all volumes within the capacity pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VolumeList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.netapp.models.VolumeList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VolumeList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VolumeList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes'} # type: ignore
def get(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.Volume"
"""Describe a volume.
Get the details of the specified volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Volume, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.Volume
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Volume"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.Volume"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Volume"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Volume"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'Volume')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Volume', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.Volume"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Volume"]
"""Create or Update a volume.
Create or update the specified volume within the capacity pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Volume object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.Volume
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Volume or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.Volume]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Volume"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.VolumePatch"
**kwargs # type: Any
):
# type: (...) -> Optional["_models.Volume"]
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.Volume"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'VolumePatch')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.VolumePatch"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.Volume"]
"""Update a volume.
Patch the specified volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Volume object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.VolumePatch
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either Volume or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.netapp.models.Volume]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.Volume"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete a volume.
Delete the specified volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'} # type: ignore
def _revert_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.VolumeRevert"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._revert_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'VolumeRevert')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revert_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/revert'} # type: ignore
def begin_revert(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.VolumeRevert"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Revert a volume to one of its snapshots.
Revert a volume to the snapshot specified in the body.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Object for snapshot to revert supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.VolumeRevert
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._revert_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revert.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/revert'} # type: ignore
def _break_replication_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body=None, # type: Optional["_models.BreakReplicationRequest"]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._break_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if body is not None:
body_content = self._serialize.body(body, 'BreakReplicationRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_break_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/breakReplication'} # type: ignore
def begin_break_replication(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body=None, # type: Optional["_models.BreakReplicationRequest"]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Break volume replication.
Break the replication connection on the destination volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Optional body to force break the replication.
:type body: ~azure.mgmt.netapp.models.BreakReplicationRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._break_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_break_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/breakReplication'} # type: ignore
def replication_status(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ReplicationStatus"
"""Get volume replication status.
Get the status of the replication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ReplicationStatus, or the result of cls(response)
:rtype: ~azure.mgmt.netapp.models.ReplicationStatus
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ReplicationStatus"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
# Construct URL
url = self.replication_status.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReplicationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
replication_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/replicationStatus'} # type: ignore
def _resync_replication_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
# Construct URL
url = self._resync_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_resync_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/resyncReplication'} # type: ignore
def begin_resync_replication(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Resync volume replication.
Resync the connection on the destination volume. If the operation is ran on the source volume
it will reverse-resync the connection and sync from destination to source.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._resync_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resync_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/resyncReplication'} # type: ignore
def _delete_replication_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
# Construct URL
url = self._delete_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/deleteReplication'} # type: ignore
def begin_delete_replication(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete volume replication.
Delete the replication connection on the destination volume, and send release to the source
replication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/deleteReplication'} # type: ignore
def _authorize_replication_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.AuthorizeRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._authorize_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'AuthorizeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_authorize_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/authorizeReplication'} # type: ignore
def begin_authorize_replication(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.AuthorizeRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Authorize source volume replication.
Authorize the replication connection on the source volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Authorize request object supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.AuthorizeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._authorize_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_authorize_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/authorizeReplication'} # type: ignore
def _re_initialize_replication_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
# Construct URL
url = self._re_initialize_replication_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_re_initialize_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/reinitializeReplication'} # type: ignore
def begin_re_initialize_replication(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""ReInitialize volume replication.
Re-Initializes the replication connection on the destination volume.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._re_initialize_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_re_initialize_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/reinitializeReplication'} # type: ignore
def _pool_change_initial(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.PoolChangeRequest"
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
# Construct URL
url = self._pool_change_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'PoolChangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_pool_change_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/poolChange'} # type: ignore
def begin_pool_change(
self,
resource_group_name, # type: str
account_name, # type: str
pool_name, # type: str
volume_name, # type: str
body, # type: "_models.PoolChangeRequest"
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Change pool for volume.
Moves volume to another pool.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param account_name: The name of the NetApp account.
:type account_name: str
:param pool_name: The name of the capacity pool.
:type pool_name: str
:param volume_name: The name of the volume.
:type volume_name: str
:param body: Move volume to the pool supplied in the body of the operation.
:type body: ~azure.mgmt.netapp.models.PoolChangeRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._pool_change_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_pool_change.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/poolChange'} # type: ignore
| 51.591479 | 269 | 0.649964 |
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VolumesOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name,
account_name,
pool_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VolumeList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes'}
def get(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self.get.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def _create_or_update_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._create_or_update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'Volume')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Volume', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def begin_create_or_update(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def _update_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
url = self._update_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'VolumePatch')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def begin_update(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('Volume', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def _delete_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
url = self._delete_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def begin_delete(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}'}
def _revert_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._revert_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'VolumeRevert')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_revert_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/revert'}
def begin_revert(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._revert_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revert.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/revert'}
def _break_replication_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body=None,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._break_replication_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
if body is not None:
body_content = self._serialize.body(body, 'BreakReplicationRequest')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_break_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/breakReplication'}
def begin_break_replication(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body=None,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._break_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_break_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/breakReplication'}
def replication_status(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
accept = "application/json"
url = self.replication_status.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ReplicationStatus', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
replication_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/replicationStatus'}
def _resync_replication_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
url = self._resync_replication_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_resync_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/resyncReplication'}
def begin_resync_replication(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._resync_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_resync_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/resyncReplication'}
def _delete_replication_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
url = self._delete_replication_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/deleteReplication'}
def begin_delete_replication(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._delete_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/deleteReplication'}
def _authorize_replication_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._authorize_replication_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'AuthorizeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_authorize_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/authorizeReplication'}
def begin_authorize_replication(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._authorize_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_authorize_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/authorizeReplication'}
def _re_initialize_replication_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
url = self._re_initialize_replication_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_re_initialize_replication_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/reinitializeReplication'}
def begin_re_initialize_replication(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._re_initialize_replication_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_re_initialize_replication.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/reinitializeReplication'}
def _pool_change_initial(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-11-01"
content_type = kwargs.pop("content_type", "application/json")
url = self._pool_change_initial.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
header_parameters = {}
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
body_content_kwargs = {}
body_content = self._serialize.body(body, 'PoolChangeRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_pool_change_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/poolChange'}
def begin_pool_change(
self,
resource_group_name,
account_name,
pool_name,
volume_name,
body,
**kwargs
):
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = self._pool_change_initial(
resource_group_name=resource_group_name,
account_name=account_name,
pool_name=pool_name,
volume_name=volume_name,
body=body,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'poolName': self._serialize.url("pool_name", pool_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z0-9][a-zA-Z0-9\-_]{0,63}$'),
'volumeName': self._serialize.url("volume_name", volume_name, 'str', max_length=64, min_length=1, pattern=r'^[a-zA-Z][a-zA-Z0-9\-_]{0,63}$'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_pool_change.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.NetApp/netAppAccounts/{accountName}/capacityPools/{poolName}/volumes/{volumeName}/poolChange'}
| true | true |
1c3f406dfd935bf28aae35459dc00f6c4f609492 | 16,352 | bzl | Python | scala/scala_maven_import_external.bzl | SeaJaredCode/rules_scala | c291447cabd35fd051989cad000b6ec6490285fd | [
"Apache-2.0"
] | null | null | null | scala/scala_maven_import_external.bzl | SeaJaredCode/rules_scala | c291447cabd35fd051989cad000b6ec6490285fd | [
"Apache-2.0"
] | null | null | null | scala/scala_maven_import_external.bzl | SeaJaredCode/rules_scala | c291447cabd35fd051989cad000b6ec6490285fd | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
'jvm_import_external' offers additional functionality above what maven_jar has to offer.
In addition to downloading the jars, it allows to define this jar's dependencies.
thus it enables the explicit definition of the entire transitive dependency graph.
The rule achieves this by writing 'import' build rules in BUILD files next to the downloaded jars.
The name of the underlying 'import' rule needs to be specified.
An optional 'load' statement can also be provided, along with any other relevant custom attribute.
These import rules must have the following attributes:
- "jars"
- "deps"
- "runtime_deps"
- "exports"
the code here is solely based on `jave_import_external` from bazelbuild/bazel repository and is proposed to be upstreamed back.
the following macros are defined below that utilize jvm_import_external:
- scala_import_external - uses `scala_import` as the underlying build rule
- jvm_maven_import_external - offers a 'maven' like api for identifying jars using 'artifact' format
- scala_maven_import_external - combination of scala_import_external and jvm_maven_import_external
- java_import_external - to demonstrate that the original functionality of `java_import_external` stayed intact.
"""
_HEADER = "# DO NOT EDIT: generated by jvm_import_external()"
_PASS_PROPS = (
"neverlink",
"testonly_",
"visibility",
"exports",
"runtime_deps",
"deps",
"tags",
)
def _jvm_import_external(repository_ctx):
"""Implementation of `java_import_external` rule."""
if (repository_ctx.attr.generated_linkable_rule_name and
not repository_ctx.attr.neverlink):
fail("Only use generated_linkable_rule_name if neverlink is set")
name = repository_ctx.attr.generated_rule_name or repository_ctx.name
urls = repository_ctx.attr.jar_urls
sha = repository_ctx.attr.jar_sha256
path = repository_ctx.name + ".jar"
for url in urls:
if url.endswith(".jar"):
path = url[url.rindex("/") + 1:]
break
srcurls = repository_ctx.attr.srcjar_urls
srcsha = repository_ctx.attr.srcjar_sha256
srcpath = repository_ctx.name + "-src.jar" if srcurls else ""
for url in srcurls:
if url.endswith(".jar"):
srcpath = url[url.rindex("/") + 1:].replace("-sources.jar", "-src.jar")
break
lines = [_HEADER, ""]
if repository_ctx.attr.rule_load:
lines.append(repository_ctx.attr.rule_load)
lines.append("")
if repository_ctx.attr.default_visibility:
lines.append("package(default_visibility = %s)" %
(repository_ctx.attr.default_visibility))
lines.append("")
lines.append("licenses(%s)" % repr(repository_ctx.attr.licenses))
lines.append("")
lines.extend(
_serialize_given_rule_import(
repository_ctx.attr.rule_name,
name,
path,
srcpath,
repository_ctx.attr,
_PASS_PROPS,
repository_ctx.attr.additional_rule_attrs,
),
)
if (repository_ctx.attr.neverlink and
repository_ctx.attr.generated_linkable_rule_name):
lines.extend(
_serialize_given_rule_import(
repository_ctx.attr.rule_name,
repository_ctx.attr.generated_linkable_rule_name,
path,
srcpath,
repository_ctx.attr,
[p for p in _PASS_PROPS if p != "neverlink"],
repository_ctx.attr.additional_rule_attrs,
),
)
extra = repository_ctx.attr.extra_build_file_content
if extra:
lines.append(extra)
if not extra.endswith("\n"):
lines.append("")
repository_ctx.download(urls, path, sha)
if srcurls:
repository_ctx.download(srcurls, srcpath, srcsha)
repository_ctx.file("BUILD", "\n".join(lines))
repository_ctx.file("jar/BUILD", "\n".join([
_HEADER,
"",
"package(default_visibility = %r)" %
(repository_ctx.attr.visibility or
repository_ctx.attr.default_visibility),
"",
"alias(",
" name = \"jar\",",
" actual = \"@%s\"," % repository_ctx.name,
")",
"",
]))
def _decode_maven_coordinates(artifact):
parts = artifact.split(":")
group_id = parts[0]
artifact_id = parts[1]
version = parts[2]
packaging = "jar"
classifier = None
if len(parts) == 4:
packaging = parts[2]
version = parts[3]
elif len(parts) == 5:
packaging = parts[2]
classifier = parts[3]
version = parts[4]
return struct(
group_id = group_id,
artifact_id = artifact_id,
version = version,
classifier = classifier,
packaging = packaging,
)
def _convert_coordinates_to_urls(coordinates, server_urls):
group_id = coordinates.group_id.replace(".", "/")
classifier = coordinates.classifier
if classifier:
classifier = "-" + classifier
else:
classifier = ""
final_name = coordinates.artifact_id + "-" + coordinates.version + classifier + "." + coordinates.packaging
url_suffix = group_id + "/" + coordinates.artifact_id + "/" + coordinates.version + "/" + final_name
urls = []
for server_url in server_urls:
urls.append(_concat_with_needed_slash(server_url, url_suffix))
return urls
def _concat_with_needed_slash(server_url, url_suffix):
if server_url.endswith("/"):
return server_url + url_suffix
else:
return server_url + "/" + url_suffix
def _serialize_given_rule_import(
rule_name,
name,
path,
srcpath,
attrs,
props,
additional_rule_attrs):
lines = [
"%s(" % rule_name,
" name = %s," % repr(name),
" jars = [%s]," % repr(path),
]
if srcpath:
lines.append(" srcjar = %s," % repr(srcpath))
for prop in props:
value = getattr(attrs, prop, None)
if value:
if prop.endswith("_"):
prop = prop[:-1]
lines.append(" %s = %s," % (prop, repr(value)))
for attr_key in additional_rule_attrs:
lines.append(" %s = %s," % (attr_key, additional_rule_attrs[attr_key]))
lines.append(")")
lines.append("")
return lines
jvm_import_external = repository_rule(
implementation = _jvm_import_external,
attrs = {
"rule_name": attr.string(mandatory = True),
"licenses": attr.string_list(mandatory = True, allow_empty = False),
"jar_urls": attr.string_list(mandatory = True, allow_empty = False),
"jar_sha256": attr.string(),
"rule_load": attr.string(),
"additional_rule_attrs": attr.string_dict(),
"srcjar_urls": attr.string_list(),
"srcjar_sha256": attr.string(),
"deps": attr.string_list(),
"runtime_deps": attr.string_list(),
"testonly_": attr.bool(),
"exports": attr.string_list(),
"neverlink": attr.bool(),
"generated_rule_name": attr.string(),
"generated_linkable_rule_name": attr.string(),
"default_visibility": attr.string_list(
default = ["//visibility:public"],
),
"extra_build_file_content": attr.string(),
},
)
def scala_maven_import_external(
artifact,
server_urls,
rule_load = "load(\"@io_bazel_rules_scala//scala:scala_import.bzl\", \"scala_import\")",
fetch_sources = False,
**kwargs):
jvm_maven_import_external(
rule_name = "scala_import",
rule_load = rule_load,
artifact = artifact,
server_urls = server_urls,
fetch_sources = fetch_sources,
#additional string attributes' values have to be escaped in order to accomodate non-string types
# additional_rule_attrs = {"foo": "'bar'"},
**kwargs
)
def jvm_maven_import_external(
artifact,
server_urls,
fetch_sources = False,
**kwargs):
if kwargs.get("srcjar_urls") and fetch_sources:
fail("Either use srcjar_urls or fetch_sources but not both")
coordinates = _decode_maven_coordinates(artifact)
jar_urls = _convert_coordinates_to_urls(coordinates, server_urls)
srcjar_urls = kwargs.pop("srcjar_urls", None)
if fetch_sources:
src_coordinates = struct(
group_id = coordinates.group_id,
artifact_id = coordinates.artifact_id,
version = coordinates.version,
classifier = "sources",
packaging = "jar",
)
srcjar_urls = _convert_coordinates_to_urls(src_coordinates, server_urls)
jvm_import_external(jar_urls = jar_urls, srcjar_urls = srcjar_urls, **kwargs)
def scala_import_external(
rule_load = "load(\"@io_bazel_rules_scala//scala:scala_import.bzl\", \"scala_import\")",
**kwargs):
jvm_import_external(
rule_name = "scala_import",
rule_load = rule_load,
**kwargs
)
"""Rules for defining external Java dependencies.
java_import_external() replaces `maven_jar` and `http_jar`. It is the
recommended solution for defining third party Java dependencies that are
obtained from web servers.
This solution offers high availability, low latency, and repository
scalability at the cost of simplicity. Tooling can be used to generate
The default target in this BUILD file will always have the same name as
the repository itself. This means that other Bazel rules can depend on
it as `@repo//:repo` or `@repo` for short.
### Setup
Add the following to your `WORKSPACE` file:
```python
load("@bazel_tools//tools/build_defs/repo:java.bzl", "java_import_external")
```
### Best Practices
#### Downloading
The recommended best practices for downloading Maven jars are as follows:
1. Always follow release versions or pinned revisions.
2. Permanently mirror all dependencies to GCS or S3 as the first URL
3. Put the original URL in the GCS or S3 object name
4. Make the second URL the original repo1.maven.org URL
5. Make the third URL the maven.ibiblio.org mirror, if it isn't 404
6. Always specify the sha256 checksum
Bazel has one of the most sophisticated systems for downloading files of any
build system. Following these best practices will ensure that your codebase
takes full advantage of the level of reliability that Bazel able to offer. See
https://goo.gl/uQOE11 for more information.
#### Selection
Avoid using jars that bundle their dependencies. For example, a Maven jar for
the artifact com.initech:tps:1.0 should not contain a classes named
com.fakecorp.foo. Try to see if Initech distributes a tps jar that doesn't
bundle its dependencies. Then create a separate java_import_external() for each
one and have the first depend on the second.
Sometimes jars are distributed with their dependencies shaded. What this means
is that com.initech.tps will contain classes like
com.initech.tps.shade.com.fakecorp.foo. This is less problematic, since it
won't lead to mysterious classpath conflicts. But it can lead to inefficient
use of space and make the license of the the end product more difficult to
determine.
#### Licensing
The following values for the licenses field are typically used. If a jar
contains multiple works with difference licenses, then only the most
restrictive one is listed, and the rest are noted in accompanying comments.
The following are examples of how licenses could be categorized, ordered
by those with terms most permissive to least:
- **unencumbered**: CC0, Unlicense
- **permissive**: Beerware
- **notice**: Apache, MIT, X11, BSD, ISC, ZPL, Unicode, JSON, Artistic
- **reciprocal**: MPL, CPL, EPL, Eclipse, APSL, IBMPL, CDDL
- **restricted**: GPL, LGPL, OSL, Sleepycat, QTPL, Java, QMail, NPL
- **by_exception_only**: AGPL, WTFPL
### Naming
Bazel repository names must match the following pattern: `[_0-9A-Za-z]+`. To
choose an appropriate name based on a Maven group and artifact ID, we recommend
an algorithm https://gist.github.com/jart/41bfd977b913c2301627162f1c038e55 which
can be best explained by the following examples:
- com.google.guava:guava becomes com_google_guava
- commons-logging:commons-logging becomes commons_logging
- junit:junit becomes junit
Adopting this naming convention will help maximize the chances that your
codebase will be able to successfully interoperate with other Bazel codebases
using Java.
### Example
Here is an example of a best practice definition of Google's Guava library:
```python
java_import_external(
name = "com_google_guava",
licenses = ["notice"], # Apache 2.0
jar_urls = [
"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar",
"http://repo1.maven.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar",
"http://maven.ibiblio.org/maven2/com/google/guava/guava/20.0/guava-20.0.jar",
],
jar_sha256 = "36a666e3b71ae7f0f0dca23654b67e086e6c93d192f60ba5dfd5519db6c288c8",
deps = [
"@com_google_code_findbugs_jsr305",
"@com_google_errorprone_error_prone_annotations",
],
)
java_import_external(
name = "com_google_code_findbugs_jsr305",
licenses = ["notice"], # BSD 3-clause
jar_urls = [
"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar",
"http://repo1.maven.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar",
"http://maven.ibiblio.org/maven2/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar",
],
jar_sha256 = "905721a0eea90a81534abb7ee6ef4ea2e5e645fa1def0a5cd88402df1b46c9ed",
)
java_import_external(
name = "com_google_errorprone_error_prone_annotations",
licenses = ["notice"], # Apache 2.0
jar_sha256 = "e7749ffdf03fb8ebe08a727ea205acb301c8791da837fee211b99b04f9d79c46",
jar_urls = [
"http://bazel-mirror.storage.googleapis.com/repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.0.15/error_prone_annotations-2.0.15.jar",
"http://maven.ibiblio.org/maven2/com/google/errorprone/error_prone_annotations/2.0.15/error_prone_annotations-2.0.15.jar",
"http://repo1.maven.org/maven2/com/google/errorprone/error_prone_annotations/2.0.15/error_prone_annotations-2.0.15.jar",
],
)
```
### Annotation Processors
Defining jars that contain annotation processors requires a certain level of
trickery, which is best done by copying and pasting from codebases that have
already done it before. Please see the Google Nomulus and Bazel Closure Rules
codebases for examples in which java_import_external has been used to define
Dagger 2.0, AutoValue, and AutoFactory.
Please note that certain care needs to be taken into consideration regarding
whether or not these annotation processors generate actual API, or simply
generate code that implements them. See the Bazel documentation for further
information.
### Test Dependencies
It is strongly recommended that the `testonly_` attribute be specified on
libraries that are intended for testing purposes. This is passed along to the
generated `java_library` rule in order to ensure that test code remains
disjoint from production code.
### Provided Dependencies
The feature in Bazel most analagous to Maven's provided scope is the neverlink
attribute. This should be used in rare circumstances when a distributed jar
will be loaded into a runtime environment where certain dependencies can be
reasonably expected to already be provided.
"""
def java_import_external(jar_sha256, **kwargs):
jvm_import_external(
rule_name = "java_import",
jar_sha256 = jar_sha256,
**kwargs
)
| 37.248292 | 164 | 0.694777 |
_HEADER = "# DO NOT EDIT: generated by jvm_import_external()"
_PASS_PROPS = (
"neverlink",
"testonly_",
"visibility",
"exports",
"runtime_deps",
"deps",
"tags",
)
def _jvm_import_external(repository_ctx):
if (repository_ctx.attr.generated_linkable_rule_name and
not repository_ctx.attr.neverlink):
fail("Only use generated_linkable_rule_name if neverlink is set")
name = repository_ctx.attr.generated_rule_name or repository_ctx.name
urls = repository_ctx.attr.jar_urls
sha = repository_ctx.attr.jar_sha256
path = repository_ctx.name + ".jar"
for url in urls:
if url.endswith(".jar"):
path = url[url.rindex("/") + 1:]
break
srcurls = repository_ctx.attr.srcjar_urls
srcsha = repository_ctx.attr.srcjar_sha256
srcpath = repository_ctx.name + "-src.jar" if srcurls else ""
for url in srcurls:
if url.endswith(".jar"):
srcpath = url[url.rindex("/") + 1:].replace("-sources.jar", "-src.jar")
break
lines = [_HEADER, ""]
if repository_ctx.attr.rule_load:
lines.append(repository_ctx.attr.rule_load)
lines.append("")
if repository_ctx.attr.default_visibility:
lines.append("package(default_visibility = %s)" %
(repository_ctx.attr.default_visibility))
lines.append("")
lines.append("licenses(%s)" % repr(repository_ctx.attr.licenses))
lines.append("")
lines.extend(
_serialize_given_rule_import(
repository_ctx.attr.rule_name,
name,
path,
srcpath,
repository_ctx.attr,
_PASS_PROPS,
repository_ctx.attr.additional_rule_attrs,
),
)
if (repository_ctx.attr.neverlink and
repository_ctx.attr.generated_linkable_rule_name):
lines.extend(
_serialize_given_rule_import(
repository_ctx.attr.rule_name,
repository_ctx.attr.generated_linkable_rule_name,
path,
srcpath,
repository_ctx.attr,
[p for p in _PASS_PROPS if p != "neverlink"],
repository_ctx.attr.additional_rule_attrs,
),
)
extra = repository_ctx.attr.extra_build_file_content
if extra:
lines.append(extra)
if not extra.endswith("\n"):
lines.append("")
repository_ctx.download(urls, path, sha)
if srcurls:
repository_ctx.download(srcurls, srcpath, srcsha)
repository_ctx.file("BUILD", "\n".join(lines))
repository_ctx.file("jar/BUILD", "\n".join([
_HEADER,
"",
"package(default_visibility = %r)" %
(repository_ctx.attr.visibility or
repository_ctx.attr.default_visibility),
"",
"alias(",
" name = \"jar\",",
" actual = \"@%s\"," % repository_ctx.name,
")",
"",
]))
def _decode_maven_coordinates(artifact):
parts = artifact.split(":")
group_id = parts[0]
artifact_id = parts[1]
version = parts[2]
packaging = "jar"
classifier = None
if len(parts) == 4:
packaging = parts[2]
version = parts[3]
elif len(parts) == 5:
packaging = parts[2]
classifier = parts[3]
version = parts[4]
return struct(
group_id = group_id,
artifact_id = artifact_id,
version = version,
classifier = classifier,
packaging = packaging,
)
def _convert_coordinates_to_urls(coordinates, server_urls):
group_id = coordinates.group_id.replace(".", "/")
classifier = coordinates.classifier
if classifier:
classifier = "-" + classifier
else:
classifier = ""
final_name = coordinates.artifact_id + "-" + coordinates.version + classifier + "." + coordinates.packaging
url_suffix = group_id + "/" + coordinates.artifact_id + "/" + coordinates.version + "/" + final_name
urls = []
for server_url in server_urls:
urls.append(_concat_with_needed_slash(server_url, url_suffix))
return urls
def _concat_with_needed_slash(server_url, url_suffix):
if server_url.endswith("/"):
return server_url + url_suffix
else:
return server_url + "/" + url_suffix
def _serialize_given_rule_import(
rule_name,
name,
path,
srcpath,
attrs,
props,
additional_rule_attrs):
lines = [
"%s(" % rule_name,
" name = %s," % repr(name),
" jars = [%s]," % repr(path),
]
if srcpath:
lines.append(" srcjar = %s," % repr(srcpath))
for prop in props:
value = getattr(attrs, prop, None)
if value:
if prop.endswith("_"):
prop = prop[:-1]
lines.append(" %s = %s," % (prop, repr(value)))
for attr_key in additional_rule_attrs:
lines.append(" %s = %s," % (attr_key, additional_rule_attrs[attr_key]))
lines.append(")")
lines.append("")
return lines
jvm_import_external = repository_rule(
implementation = _jvm_import_external,
attrs = {
"rule_name": attr.string(mandatory = True),
"licenses": attr.string_list(mandatory = True, allow_empty = False),
"jar_urls": attr.string_list(mandatory = True, allow_empty = False),
"jar_sha256": attr.string(),
"rule_load": attr.string(),
"additional_rule_attrs": attr.string_dict(),
"srcjar_urls": attr.string_list(),
"srcjar_sha256": attr.string(),
"deps": attr.string_list(),
"runtime_deps": attr.string_list(),
"testonly_": attr.bool(),
"exports": attr.string_list(),
"neverlink": attr.bool(),
"generated_rule_name": attr.string(),
"generated_linkable_rule_name": attr.string(),
"default_visibility": attr.string_list(
default = ["//visibility:public"],
),
"extra_build_file_content": attr.string(),
},
)
def scala_maven_import_external(
artifact,
server_urls,
rule_load = "load(\"@io_bazel_rules_scala//scala:scala_import.bzl\", \"scala_import\")",
fetch_sources = False,
**kwargs):
jvm_maven_import_external(
rule_name = "scala_import",
rule_load = rule_load,
artifact = artifact,
server_urls = server_urls,
fetch_sources = fetch_sources,
# additional_rule_attrs = {"foo": "'bar'"},
**kwargs
)
def jvm_maven_import_external(
artifact,
server_urls,
fetch_sources = False,
**kwargs):
if kwargs.get("srcjar_urls") and fetch_sources:
fail("Either use srcjar_urls or fetch_sources but not both")
coordinates = _decode_maven_coordinates(artifact)
jar_urls = _convert_coordinates_to_urls(coordinates, server_urls)
srcjar_urls = kwargs.pop("srcjar_urls", None)
if fetch_sources:
src_coordinates = struct(
group_id = coordinates.group_id,
artifact_id = coordinates.artifact_id,
version = coordinates.version,
classifier = "sources",
packaging = "jar",
)
srcjar_urls = _convert_coordinates_to_urls(src_coordinates, server_urls)
jvm_import_external(jar_urls = jar_urls, srcjar_urls = srcjar_urls, **kwargs)
def scala_import_external(
rule_load = "load(\"@io_bazel_rules_scala//scala:scala_import.bzl\", \"scala_import\")",
**kwargs):
jvm_import_external(
rule_name = "scala_import",
rule_load = rule_load,
**kwargs
)
def java_import_external(jar_sha256, **kwargs):
jvm_import_external(
rule_name = "java_import",
jar_sha256 = jar_sha256,
**kwargs
)
| true | true |
1c3f40ba3671355bb84bec525b3ec2763f6c7c75 | 3,097 | py | Python | backend/backend/settings.py | RebornBeat/CodeEmerge | 81066b1be8690c4600e3c656d7da45f035ab2ad7 | [
"MIT"
] | null | null | null | backend/backend/settings.py | RebornBeat/CodeEmerge | 81066b1be8690c4600e3c656d7da45f035ab2ad7 | [
"MIT"
] | null | null | null | backend/backend/settings.py | RebornBeat/CodeEmerge | 81066b1be8690c4600e3c656d7da45f035ab2ad7 | [
"MIT"
] | null | null | null | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'pzfu+ar#2)!-)(ug%hz)*_d@%e@g$%r!k)*doy$8imp&26!n$o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'course.apps.CourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| 25.385246 | 91 | 0.693252 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'pzfu+ar#2)!-)(ug%hz)*_d@%e@g$%r!k)*doy$8imp&26!n$o'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'course.apps.CourseConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| true | true |
1c3f414f148b99f54adabdd14a531a20e819bfe9 | 863 | py | Python | ooobuild/dyn/configuration/default_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/configuration/default_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/dyn/configuration/default_provider.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Service Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.configuration
from ...lo.configuration.default_provider import DefaultProvider as DefaultProvider
__all__ = ['DefaultProvider']
| 33.192308 | 83 | 0.763615 |
from ...lo.configuration.default_provider import DefaultProvider as DefaultProvider
__all__ = ['DefaultProvider']
| true | true |
1c3f421debe446f13f349bd6fae64340c34dcb36 | 1,677 | py | Python | setup.py | kikitrade/dubbo-python3 | c8f721d2b7e73909f283c7cdca3b5449892ca400 | [
"Apache-2.0"
] | null | null | null | setup.py | kikitrade/dubbo-python3 | c8f721d2b7e73909f283c7cdca3b5449892ca400 | [
"Apache-2.0"
] | null | null | null | setup.py | kikitrade/dubbo-python3 | c8f721d2b7e73909f283c7cdca3b5449892ca400 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
from setuptools import setup, find_packages
setup(
name='dubbo-python3',
version='1.0.3',
url='https://github.com/kikitrade/dubbo-python3',
author='holly',
author_email='hao.holly@gmail.com',
description='Python3 Dubbo Client.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests', 'tools']),
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: Chinese (Simplified)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'kazoo==2.8.0'
],
)
| 35.680851 | 75 | 0.670841 |
from setuptools import setup, find_packages
setup(
name='dubbo-python3',
version='1.0.3',
url='https://github.com/kikitrade/dubbo-python3',
author='holly',
author_email='hao.holly@gmail.com',
description='Python3 Dubbo Client.',
license='Apache License 2.0',
packages=find_packages(exclude=['tests', 'tools']),
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: Chinese (Simplified)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
install_requires=[
'kazoo==2.8.0'
],
)
| true | true |
1c3f422e6389aefe6aac0be4a15ed6fdbe434335 | 11,028 | py | Python | pygmt/src/project.py | Test-Organization-6/pygmt | 0aa04d79dfd5d1aeaec9e4b2e4b43850bd6c0299 | [
"BSD-3-Clause"
] | null | null | null | pygmt/src/project.py | Test-Organization-6/pygmt | 0aa04d79dfd5d1aeaec9e4b2e4b43850bd6c0299 | [
"BSD-3-Clause"
] | null | null | null | pygmt/src/project.py | Test-Organization-6/pygmt | 0aa04d79dfd5d1aeaec9e4b2e4b43850bd6c0299 | [
"BSD-3-Clause"
] | null | null | null | """
project - Project data onto lines or great circles, or generate tracks.
"""
import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
A="azimuth",
C="center",
E="endpoint",
F="convention",
G="generate",
L="length",
N="flat_earth",
Q="unit",
S="sort",
T="pole",
V="verbose",
W="width",
Z="ellipse",
f="coltypes",
)
@kwargs_to_strings(E="sequence", L="sequence", T="sequence", W="sequence", C="sequence")
def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):
r"""
Project data onto lines or great circles, or generate tracks.
Project reads arbitrary :math:`(x, y [, z])` data and returns any
combination of :math:`(x, y, z, p, q, r, s)`, where :math:`(p, q)` are the
coordinates in the projection, :math:`(r, s)` is the position in the
:math:`(x, y)` coordinate system of the point on the profile (:math:`q = 0`
path) closest to :math:`(x, y)`, and :math:`z` is all remaining columns in
the input (beyond the required :math:`x` and :math:`y` columns).
Alternatively, ``project`` may be used to generate
:math:`(r, s, p)` triples at equal increments along a profile using the
``generate`` parameter. In this case, the value of ``data`` is ignored
(you can use, e.g., ``data=None``).
Projections are defined in any (but only) one of three ways:
1. By a ``center`` and an ``azimuth`` in degrees clockwise from North.
2. By a ``center`` and ``endpoint`` of the projection path.
3. By a ``center`` and a ``pole`` position.
To spherically project data along a great circle path, an oblique
coordinate system is created which has its equator along that path, and the
zero meridian through the Center. Then the oblique longitude (:math:`p`)
corresponds to the distance from the Center along the great circle, and the
oblique latitude (:math:`q`) corresponds to the distance perpendicular to
the great circle path. When moving in the increasing (:math:`p`) direction,
(toward B or in the azimuth direction), the positive (:math:`q`) direction
is to your left. If a Pole has been specified, then the positive
(:math:`q`) direction is toward the pole.
To specify an oblique projection, use the ``pole`` option to set
the pole. Then the equator of the projection is already determined and the
``center`` option is used to locate the :math:`p = 0` meridian. The center
*cx/cy* will be taken as a point through which the :math:`p = 0` meridian
passes. If you do not care to choose a particular point, use the South pole
(*cx* = 0, *cy* = -90).
Data can be selectively windowed by using the ``length`` and ``width``
options. If ``width`` is used, the projection width is set to use only
data with :math:`w_{{min}} < q < w_{{max}}`. If ``length`` is set, then
the length is set to use only those data with
:math:`l_{{min}} < p < l_{{max}}`. If the ``endpoint`` option
has been used to define the projection, then ``length="w"`` may be used to
window the length of the projection to exactly the span from O to B.
Flat Earth (Cartesian) coordinate transformations can also be made. Set
``flat_earth=True`` and remember that azimuth is clockwise from North (the
y axis), NOT the usual cartesian theta, which is counterclockwise from the
x axis. azimuth = 90 - theta.
No assumptions are made regarding the units for
:math:`x, y, r, s, p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, w_{{max}}`.
If -Q is selected, map units are assumed and :math:`x, y, r, s` must be in
degrees and :math:`p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, w_{{max}}`
will be in km.
Calculations of specific great-circle and geodesic distances or for
back-azimuths or azimuths are better done using :gmt-docs:`mapproject` as
project is strictly spherical.
Full option list at :gmt-docs:`project.html`
{aliases}
Parameters
----------
data : str or {table-like}
Pass in (x, y, z) or (longitude, latitude, elevation) values by
providing a file name to an ASCII data table, a 2D
{table-classes}.
center : str or list
*cx*/*cy*.
Set the origin of the projection, in Definition 1 or 2. If
Definition 3 is used, then *cx/cy* are the coordinates of a
point through which the oblique zero meridian (:math:`p = 0`) should
pass. The *cx/cy* is not required to be 90 degrees from the pole.
azimuth : float or str
Define the azimuth of the projection (Definition 1).
endpoint : str or list
*bx*/*by*.
Define the end point of the projection path (Definition 2).
convention : str
Specify the desired output using any combination of **xyzpqrs**, in
any order [Default is **xypqrsz**]. Do not space between the letters.
Use lower case. The output will be columns of values corresponding to
your ``convention``. The **z** flag is special and refers to all
numerical columns beyond the leading **x** and **y** in your input
record. The **z** flag also includes any trailing text (which is
placed at the end of the record regardless of the order of **z** in
``convention``). **Note**: If ``generate`` is True, then the output
order is hardwired to be **rsp** and ``convention`` is not allowed.
generate : str
*dist* [/*colat*][**+c**\|\ **h**].
Create :math:`(r, s, p)` output data every *dist* units of :math:`p`
(See `unit` option). Alternatively, append */colat* for a small
circle instead [Default is a colatitude of 90, i.e., a great circle].
If setting a pole with ``pole`` and you want the small circle to go
through *cx*/*cy*, append **+c** to compute the required colatitude.
Use ``center`` and ``endpoint`` to generate a circle that goes
through the center and end point. Note, in this case the center and
end point cannot be farther apart than :math:`2|\mbox{{colat}}|`.
Finally, if you append **+h** then we will report the position of
the pole as part of the segment header [Default is no header].
Note: No input is read and the value of ``data``, ``x``, ``y``,
and ``z`` is ignored if ``generate`` is used.
length : str or list
[**w**\|\ *l_min*/*l_max*].
Project only those data whose *p* coordinate is
within :math:`l_{{min}} < p < l_{{max}}`. If ``endpoint`` has been set,
then you may alternatively use **w** to stay within the distance from
``center`` to ``endpoint``.
flat_earth : bool
Make a Cartesian coordinate transformation in the plane.
[Default is ``False``; plane created with spherical trigonometry.]
unit : bool
Set units for :math:`x, y, r, s` degrees and
:math:`p, q, dist, l_{{min}}, l_{{max}}, w_{{min}}, {{w_max}}` to km.
[Default is ``False``; all arguments use the same units]
sort : bool
Sort the output into increasing :math:`p` order. Useful when projecting
random data into a sequential profile.
pole : str or list
*px*/*py*.
Set the position of the rotation pole of the projection.
(Definition 3).
{V}
width : str or list
*w_min*/*w_max*.
Project only those data whose :math:`q` coordinate is
within :math:`w_{{min}} < q < w_{{max}}`.
ellipse : str
*major*/*minor*/*azimuth* [**+e**\|\ **n**].
Used in conjunction with ``center`` (sets its center) and ``generate``
(sets the distance increment) to create the coordinates of an ellipse
with *major* and *minor* axes given in km (unless ``flat_earth`` is
given for a Cartesian ellipse) and the *azimuth* of the major axis in
degrees. Append **+e** to adjust the increment set via ``generate`` so
that the the ellipse has equal distance increments [Default uses the
given increment and closes the ellipse]. Instead, append **+n** to set
a specific number of unique equidistant data via ``generate``. For
degenerate ellipses you can just supply a single *diameter* instead. A
geographic diameter may be specified in any desired unit other than km
by appending the unit (e.g., 3d for degrees) [Default is km];
the increment is assumed to be in the same unit. **Note**:
For the Cartesian ellipse (which requires ``flat_earth``), the
*direction* is counter-clockwise from the horizontal instead of an
*azimuth*.
outfile : str
The file name for the output ASCII file.
{f}
Returns
-------
track: pandas.DataFrame or None
Return type depends on whether the ``outfile`` parameter is set:
- :class:`pandas.DataFrame` table with (x, y, ..., newcolname) if
``outfile`` is not set
- None if ``outfile`` is set (output will be stored in file set
by ``outfile``)
"""
if "C" not in kwargs:
raise GMTInvalidInput("The `center` parameter must be specified.")
if "G" not in kwargs and data is None:
raise GMTInvalidInput(
"The `data` parameter must be specified unless `generate` is used."
)
if "G" in kwargs and "F" in kwargs:
raise GMTInvalidInput(
"The `convention` parameter is not allowed with `generate`."
)
with GMTTempFile(suffix=".csv") as tmpfile:
if outfile is None: # Output to tmpfile if outfile is not set
outfile = tmpfile.name
with Session() as lib:
if "G" not in kwargs:
# Choose how data will be passed into the module
table_context = lib.virtualfile_from_data(
check_kind="vector", data=data, x=x, y=y, z=z, required_z=False
)
# Run project on the temporary (csv) data table
with table_context as infile:
arg_str = " ".join(
[infile, build_arg_string(kwargs), "->" + outfile]
)
else:
arg_str = " ".join([build_arg_string(kwargs), "->" + outfile])
lib.call_module(module="project", args=arg_str)
# if user did not set outfile, return pd.DataFrame
if outfile == tmpfile.name:
if "G" in kwargs:
column_names = list("rsp")
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
else:
result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">")
# return None if outfile set, output in outfile
elif outfile != tmpfile.name:
result = None
return result
| 43.247059 | 88 | 0.622416 | import pandas as pd
from pygmt.clib import Session
from pygmt.exceptions import GMTInvalidInput
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
@fmt_docstring
@use_alias(
A="azimuth",
C="center",
E="endpoint",
F="convention",
G="generate",
L="length",
N="flat_earth",
Q="unit",
S="sort",
T="pole",
V="verbose",
W="width",
Z="ellipse",
f="coltypes",
)
@kwargs_to_strings(E="sequence", L="sequence", T="sequence", W="sequence", C="sequence")
def project(data=None, x=None, y=None, z=None, outfile=None, **kwargs):
if "C" not in kwargs:
raise GMTInvalidInput("The `center` parameter must be specified.")
if "G" not in kwargs and data is None:
raise GMTInvalidInput(
"The `data` parameter must be specified unless `generate` is used."
)
if "G" in kwargs and "F" in kwargs:
raise GMTInvalidInput(
"The `convention` parameter is not allowed with `generate`."
)
with GMTTempFile(suffix=".csv") as tmpfile:
if outfile is None:
outfile = tmpfile.name
with Session() as lib:
if "G" not in kwargs:
table_context = lib.virtualfile_from_data(
check_kind="vector", data=data, x=x, y=y, z=z, required_z=False
)
with table_context as infile:
arg_str = " ".join(
[infile, build_arg_string(kwargs), "->" + outfile]
)
else:
arg_str = " ".join([build_arg_string(kwargs), "->" + outfile])
lib.call_module(module="project", args=arg_str)
if outfile == tmpfile.name:
if "G" in kwargs:
column_names = list("rsp")
result = pd.read_csv(tmpfile.name, sep="\t", names=column_names)
else:
result = pd.read_csv(tmpfile.name, sep="\t", header=None, comment=">")
elif outfile != tmpfile.name:
result = None
return result
| true | true |
1c3f434c07380dbc6d20fcddd44cfbd2e197eca8 | 14,185 | py | Python | napari/_qt/dialogs/preferences_dialog.py | marlene09/napari | d3284b5df2efc0fad2664f954cbc52cca9daa105 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/preferences_dialog.py | marlene09/napari | d3284b5df2efc0fad2664f954cbc52cca9daa105 | [
"BSD-3-Clause"
] | null | null | null | napari/_qt/dialogs/preferences_dialog.py | marlene09/napari | d3284b5df2efc0fad2664f954cbc52cca9daa105 | [
"BSD-3-Clause"
] | null | null | null | import json
from qtpy.QtCore import QSize, Signal
from qtpy.QtWidgets import (
QDialog,
QHBoxLayout,
QListWidget,
QPushButton,
QStackedWidget,
QVBoxLayout,
)
from ..._vendor.qt_json_builder.qt_jsonschema_form import WidgetBuilder
from ...utils.settings import get_settings
from ...utils.translations import trans
from .qt_message_dialogs import ConfirmDialog, ResetNapariInfoDialog
class PreferencesDialog(QDialog):
"""Preferences Dialog for Napari user settings."""
valueChanged = Signal()
updatedValues = Signal()
ui_schema = {
"call_order": {"ui:widget": "plugins"},
"highlight_thickness": {"ui:widget": "highlight"},
"shortcuts": {"ui:widget": "shortcuts"},
}
resized = Signal(QSize)
closed = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._list = QListWidget(self)
self._stack = QStackedWidget(self)
self._list.setObjectName("Preferences")
# Set up buttons
self._button_cancel = QPushButton(trans._("Cancel"))
self._button_ok = QPushButton(trans._("OK"))
self._default_restore = QPushButton(trans._("Restore defaults"))
# Setup
self.setWindowTitle(trans._("Preferences"))
self._button_ok.setDefault(True)
# Layout
left_layout = QVBoxLayout()
left_layout.addWidget(self._list)
left_layout.addStretch()
left_layout.addWidget(self._default_restore)
left_layout.addWidget(self._button_cancel)
left_layout.addWidget(self._button_ok)
main_layout = QHBoxLayout()
main_layout.addLayout(left_layout, 1)
main_layout.addWidget(self._stack, 3)
self.setLayout(main_layout)
# Signals
self._list.currentRowChanged.connect(
lambda index: self._stack.setCurrentIndex(index)
)
self._button_cancel.clicked.connect(self.on_click_cancel)
self._button_ok.clicked.connect(self.on_click_ok)
self._default_restore.clicked.connect(self.restore_defaults)
self.rejected.connect(self.on_click_cancel)
# Make widget
self.make_dialog()
self._list.setCurrentRow(0)
def _restart_dialog(self, event=None, extra_str=""):
"""Displays the dialog informing user a restart is required.
Paramters
---------
event : Event
extra_str : str
Extra information to add to the message about needing a restart.
"""
text_str = trans._(
"napari requires a restart for image rendering changes to apply."
)
widget = ResetNapariInfoDialog(
parent=self,
text=text_str,
)
widget.exec_()
def accept(self):
"""Override to emit signal."""
self.closed.emit()
super().accept()
def closeEvent(self, event):
"""Override to emit signal."""
self.closed.emit()
super().closeEvent(event)
def reject(self):
"""Override to handle Escape."""
super().reject()
self.close()
def resizeEvent(self, event):
"""Override to emit signal."""
self.resized.emit(event.size())
super().resizeEvent(event)
def make_dialog(self):
"""Removes settings not to be exposed to user and creates dialog pages."""
settings = get_settings()
# Because there are multiple pages, need to keep a dictionary of values dicts.
# One set of keywords are for each page, then in each entry for a page, there are dicts
# of setting and its value.
self._values_orig_dict = {}
self._values_dict = {}
self._setting_changed_dict = {}
for page, setting in settings.schemas().items():
schema, values, properties = self.get_page_dict(setting)
self._setting_changed_dict[page] = {}
self._values_orig_dict[page] = values
self._values_dict[page] = values
# Only add pages if there are any properties to add.
if properties:
self.add_page(schema, values)
def get_page_dict(self, setting):
"""Provides the schema, set of values for each setting, and the properties
for each setting.
Parameters
----------
setting : dict
Dictionary of settings for a page within the settings manager.
Returns
-------
schema : dict
Json schema of the setting page.
values : dict
Dictionary of values currently set for each parameter in the settings.
properties : dict
Dictionary of properties within the json schema.
"""
schema = json.loads(setting['json_schema'])
# Resolve allOf references
definitions = schema.get("definitions", {})
if definitions:
for key, data in schema["properties"].items():
if "allOf" in data:
allof = data["allOf"]
allof = [d["$ref"].rsplit("/")[-1] for d in allof]
for definition in allof:
local_def = definitions[definition]
schema["properties"][key]["enum"] = local_def["enum"]
schema["properties"][key]["type"] = "string"
# Need to remove certain properties that will not be displayed on the GUI
properties = schema.pop('properties')
model = setting['model']
values = model.dict()
napari_config = getattr(model, "NapariConfig", None)
if napari_config is not None:
for val in napari_config.preferences_exclude:
properties.pop(val)
values.pop(val)
schema['properties'] = properties
return schema, values, properties
def restore_defaults(self):
"""Launches dialog to confirm restore settings choice."""
self._reset_dialog = ConfirmDialog(
parent=self,
text=trans._("Are you sure you want to restore default settings?"),
)
self._reset_dialog.valueChanged.connect(self._reset_widgets)
self._reset_dialog.exec_()
def _reset_widgets(self, event=None):
"""Deletes the widgets and rebuilds with defaults.
Parameter
---------
event: bool
Indicates whether to restore the defaults. When a user clicks "Restore", the signal
event emitted will be True. If "Cancel" is selected, event will be False and nothing
is done.
"""
if event is True:
get_settings().reset()
self.accept()
self.valueChanged.emit()
self._list.clear()
for n in range(self._stack.count()):
widget = self._stack.removeWidget( # noqa: F841
self._stack.currentWidget()
)
del widget
self.make_dialog()
self._list.setCurrentRow(0)
self.show()
def on_click_ok(self):
"""Keeps the selected preferences saved to settings."""
self.updatedValues.emit()
self.accept()
def on_click_cancel(self):
"""Restores the settings in place when dialog was launched."""
# Need to check differences for each page.
settings = get_settings()
for n in range(self._stack.count()):
# Must set the current row so that the proper list is updated
# in check differences.
self._list.setCurrentRow(n)
page = self._list.currentItem().text().split(" ")[0].lower()
# get new values for settings. If they were changed from values at beginning
# of preference dialog session, change them back.
# Using the settings value seems to be the best way to get the checkboxes right
# on the plugin call order widget.
setting = settings.schemas()[page]
schema, new_values, properties = self.get_page_dict(setting)
self.check_differences(self._values_orig_dict[page], new_values)
# need to reset plugin_manager to defaults and change keybindings in action_manager.
# Emit signal to do this in main window.
self.valueChanged.emit()
self._list.setCurrentRow(0)
self.close()
def add_page(self, schema, values):
"""Creates a new page for each section in dialog.
Parameters
----------
schema : dict
Json schema including all information to build each page in the
preferences dialog.
values : dict
Dictionary of current values set in preferences.
"""
widget = self.build_page_dialog(schema, values)
self._list.addItem(schema["title"])
self._stack.addWidget(widget)
def build_page_dialog(self, schema, values):
"""Builds the preferences widget using the json schema builder.
Parameters
----------
schema : dict
Json schema including all information to build each page in the
preferences dialog.
values : dict
Dictionary of current values set in preferences.
"""
settings = get_settings()
builder = WidgetBuilder()
form = builder.create_form(schema, self.ui_schema)
# Disable widgets that loaded settings from environment variables
section = schema["section"]
form_layout = form.widget.layout()
for row in range(form.widget.layout().rowCount()):
widget = form_layout.itemAt(row, form_layout.FieldRole).widget()
name = widget._name
disable = bool(
settings._env_settings.get(section, {}).get(name, None)
)
widget.setDisabled(disable)
try:
widget.opacity.setOpacity(0.3 if disable else 1)
except AttributeError:
# some widgets may not have opacity (such as the QtPluginSorter)
pass
# set state values for widget
form.widget.state = values
if section == 'experimental':
# need to disable async if octree is enabled.
if values['octree'] is True:
form = self._disable_async(form, values)
form.widget.on_changed.connect(
lambda d: self.check_differences(
d,
self._values_dict[schema["title"].lower()],
)
)
return form
def _disable_async(self, form, values, disable=True, state=True):
"""Disable async if octree is True."""
settings = get_settings()
# need to make sure that if async_ is an environment setting, that we don't
# enable it here.
if (
settings._env_settings['experimental'].get('async_', None)
is not None
):
disable = True
idx = list(values.keys()).index('async_')
form_layout = form.widget.layout()
widget = form_layout.itemAt(idx, form_layout.FieldRole).widget()
widget.opacity.setOpacity(0.3 if disable else 1)
widget.setDisabled(disable)
return form
def _values_changed(self, page, new_dict, old_dict):
"""Loops through each setting in a page to determine if it changed.
Parameters
----------
new_dict : dict
Dict that has the most recent changes by user. Each key is a setting value
and each item is the value.
old_dict : dict
Dict wtih values set at the begining of preferences dialog session.
"""
for setting_name, value in new_dict.items():
if value != old_dict[setting_name]:
self._setting_changed_dict[page][setting_name] = value
elif (
value == old_dict[setting_name]
and setting_name in self._setting_changed_dict[page]
):
self._setting_changed_dict[page].pop(setting_name)
def set_current_index(self, index: int):
"""
Set the current page on the preferences by index.
Parameters
----------
index : int
Index of page to set as current one.
"""
self._list.setCurrentRow(index)
def check_differences(self, new_dict, old_dict):
"""Changes settings in settings manager with changes from dialog.
Parameters
----------
new_dict : dict
Dict that has the most recent changes by user. Each key is a setting parameter
and each item is the value.
old_dict : dict
Dict wtih values set at the beginning of the preferences dialog session.
"""
settings = get_settings()
page = self._list.currentItem().text().split(" ")[0].lower()
self._values_changed(page, new_dict, old_dict)
different_values = self._setting_changed_dict[page]
if len(different_values) > 0:
# change the values in settings
for setting_name, value in different_values.items():
try:
setattr(settings._settings[page], setting_name, value)
self._values_dict[page] = new_dict
if page == 'experimental':
if setting_name == 'octree':
# disable/enable async checkbox
widget = self._stack.currentWidget()
cstate = True if value is True else False
self._disable_async(
widget, new_dict, disable=cstate
)
# need to inform user that napari restart needed.
self._restart_dialog()
elif setting_name == 'async_':
# need to inform user that napari restart needed.
self._restart_dialog()
except: # noqa: E722
continue
| 34.513382 | 97 | 0.585407 | import json
from qtpy.QtCore import QSize, Signal
from qtpy.QtWidgets import (
QDialog,
QHBoxLayout,
QListWidget,
QPushButton,
QStackedWidget,
QVBoxLayout,
)
from ..._vendor.qt_json_builder.qt_jsonschema_form import WidgetBuilder
from ...utils.settings import get_settings
from ...utils.translations import trans
from .qt_message_dialogs import ConfirmDialog, ResetNapariInfoDialog
class PreferencesDialog(QDialog):
valueChanged = Signal()
updatedValues = Signal()
ui_schema = {
"call_order": {"ui:widget": "plugins"},
"highlight_thickness": {"ui:widget": "highlight"},
"shortcuts": {"ui:widget": "shortcuts"},
}
resized = Signal(QSize)
closed = Signal()
def __init__(self, parent=None):
super().__init__(parent)
self._list = QListWidget(self)
self._stack = QStackedWidget(self)
self._list.setObjectName("Preferences")
self._button_cancel = QPushButton(trans._("Cancel"))
self._button_ok = QPushButton(trans._("OK"))
self._default_restore = QPushButton(trans._("Restore defaults"))
self.setWindowTitle(trans._("Preferences"))
self._button_ok.setDefault(True)
left_layout = QVBoxLayout()
left_layout.addWidget(self._list)
left_layout.addStretch()
left_layout.addWidget(self._default_restore)
left_layout.addWidget(self._button_cancel)
left_layout.addWidget(self._button_ok)
main_layout = QHBoxLayout()
main_layout.addLayout(left_layout, 1)
main_layout.addWidget(self._stack, 3)
self.setLayout(main_layout)
self._list.currentRowChanged.connect(
lambda index: self._stack.setCurrentIndex(index)
)
self._button_cancel.clicked.connect(self.on_click_cancel)
self._button_ok.clicked.connect(self.on_click_ok)
self._default_restore.clicked.connect(self.restore_defaults)
self.rejected.connect(self.on_click_cancel)
self.make_dialog()
self._list.setCurrentRow(0)
def _restart_dialog(self, event=None, extra_str=""):
text_str = trans._(
"napari requires a restart for image rendering changes to apply."
)
widget = ResetNapariInfoDialog(
parent=self,
text=text_str,
)
widget.exec_()
def accept(self):
self.closed.emit()
super().accept()
def closeEvent(self, event):
self.closed.emit()
super().closeEvent(event)
def reject(self):
super().reject()
self.close()
def resizeEvent(self, event):
self.resized.emit(event.size())
super().resizeEvent(event)
def make_dialog(self):
settings = get_settings()
self._values_orig_dict = {}
self._values_dict = {}
self._setting_changed_dict = {}
for page, setting in settings.schemas().items():
schema, values, properties = self.get_page_dict(setting)
self._setting_changed_dict[page] = {}
self._values_orig_dict[page] = values
self._values_dict[page] = values
if properties:
self.add_page(schema, values)
def get_page_dict(self, setting):
schema = json.loads(setting['json_schema'])
definitions = schema.get("definitions", {})
if definitions:
for key, data in schema["properties"].items():
if "allOf" in data:
allof = data["allOf"]
allof = [d["$ref"].rsplit("/")[-1] for d in allof]
for definition in allof:
local_def = definitions[definition]
schema["properties"][key]["enum"] = local_def["enum"]
schema["properties"][key]["type"] = "string"
properties = schema.pop('properties')
model = setting['model']
values = model.dict()
napari_config = getattr(model, "NapariConfig", None)
if napari_config is not None:
for val in napari_config.preferences_exclude:
properties.pop(val)
values.pop(val)
schema['properties'] = properties
return schema, values, properties
def restore_defaults(self):
self._reset_dialog = ConfirmDialog(
parent=self,
text=trans._("Are you sure you want to restore default settings?"),
)
self._reset_dialog.valueChanged.connect(self._reset_widgets)
self._reset_dialog.exec_()
def _reset_widgets(self, event=None):
if event is True:
get_settings().reset()
self.accept()
self.valueChanged.emit()
self._list.clear()
for n in range(self._stack.count()):
widget = self._stack.removeWidget(
self._stack.currentWidget()
)
del widget
self.make_dialog()
self._list.setCurrentRow(0)
self.show()
def on_click_ok(self):
self.updatedValues.emit()
self.accept()
def on_click_cancel(self):
settings = get_settings()
for n in range(self._stack.count()):
self._list.setCurrentRow(n)
page = self._list.currentItem().text().split(" ")[0].lower()
setting = settings.schemas()[page]
schema, new_values, properties = self.get_page_dict(setting)
self.check_differences(self._values_orig_dict[page], new_values)
self.valueChanged.emit()
self._list.setCurrentRow(0)
self.close()
def add_page(self, schema, values):
widget = self.build_page_dialog(schema, values)
self._list.addItem(schema["title"])
self._stack.addWidget(widget)
def build_page_dialog(self, schema, values):
settings = get_settings()
builder = WidgetBuilder()
form = builder.create_form(schema, self.ui_schema)
section = schema["section"]
form_layout = form.widget.layout()
for row in range(form.widget.layout().rowCount()):
widget = form_layout.itemAt(row, form_layout.FieldRole).widget()
name = widget._name
disable = bool(
settings._env_settings.get(section, {}).get(name, None)
)
widget.setDisabled(disable)
try:
widget.opacity.setOpacity(0.3 if disable else 1)
except AttributeError:
pass
form.widget.state = values
if section == 'experimental':
if values['octree'] is True:
form = self._disable_async(form, values)
form.widget.on_changed.connect(
lambda d: self.check_differences(
d,
self._values_dict[schema["title"].lower()],
)
)
return form
def _disable_async(self, form, values, disable=True, state=True):
settings = get_settings()
# enable it here.
if (
settings._env_settings['experimental'].get('async_', None)
is not None
):
disable = True
idx = list(values.keys()).index('async_')
form_layout = form.widget.layout()
widget = form_layout.itemAt(idx, form_layout.FieldRole).widget()
widget.opacity.setOpacity(0.3 if disable else 1)
widget.setDisabled(disable)
return form
def _values_changed(self, page, new_dict, old_dict):
for setting_name, value in new_dict.items():
if value != old_dict[setting_name]:
self._setting_changed_dict[page][setting_name] = value
elif (
value == old_dict[setting_name]
and setting_name in self._setting_changed_dict[page]
):
self._setting_changed_dict[page].pop(setting_name)
def set_current_index(self, index: int):
self._list.setCurrentRow(index)
def check_differences(self, new_dict, old_dict):
settings = get_settings()
page = self._list.currentItem().text().split(" ")[0].lower()
self._values_changed(page, new_dict, old_dict)
different_values = self._setting_changed_dict[page]
if len(different_values) > 0:
# change the values in settings
for setting_name, value in different_values.items():
try:
setattr(settings._settings[page], setting_name, value)
self._values_dict[page] = new_dict
if page == 'experimental':
if setting_name == 'octree':
# disable/enable async checkbox
widget = self._stack.currentWidget()
cstate = True if value is True else False
self._disable_async(
widget, new_dict, disable=cstate
)
# need to inform user that napari restart needed.
self._restart_dialog()
elif setting_name == 'async_':
# need to inform user that napari restart needed.
self._restart_dialog()
except: # noqa: E722
continue
| true | true |
1c3f43546d2c6e7b55e1780c060faf8bc8c3afc1 | 636 | py | Python | backend/manage.py | crowdbotics-apps/muddy-term-29546 | 6e530c79087dbd3657982886fc0fe77de21f4adc | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/manage.py | crowdbotics-apps/muddy-term-29546 | 6e530c79087dbd3657982886fc0fe77de21f4adc | [
"FTL",
"AML",
"RSA-MD"
] | 42 | 2021-08-06T02:56:25.000Z | 2021-12-26T17:40:42.000Z | backend/manage.py | crowdbotics-apps/muddy-term-29546 | 6e530c79087dbd3657982886fc0fe77de21f4adc | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'muddy_term_29546.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.909091 | 80 | 0.687107 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'muddy_term_29546.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
1c3f440a7420883459bb7d4819ae2fbbb608a658 | 425 | py | Python | src/girard/series_convergence.py | gnsantos/solidus | ea4ffcf391ee0e9cf775b984a1aa6776c55ae67e | [
"Apache-2.0"
] | null | null | null | src/girard/series_convergence.py | gnsantos/solidus | ea4ffcf391ee0e9cf775b984a1aa6776c55ae67e | [
"Apache-2.0"
] | null | null | null | src/girard/series_convergence.py | gnsantos/solidus | ea4ffcf391ee0e9cf775b984a1aa6776c55ae67e | [
"Apache-2.0"
] | null | null | null | import numpy as np
def convergence_matrix(spanning_matrix):
grammian_matrix = spanning_matrix.T * spanning_matrix
cm = (-1) * grammian_matrix
np.fill_diagonal(cm, 1)
return cm
def check_convergence(spanning_matrix):
matrix_for_convergence = convergence_matrix(spanning_matrix)
convergence_matrix_eigenvalues = np.linalg.eigvals(convergence_matrix)
return min(convergence_matrix_eigenvalues) > 0
| 32.692308 | 74 | 0.785882 | import numpy as np
def convergence_matrix(spanning_matrix):
grammian_matrix = spanning_matrix.T * spanning_matrix
cm = (-1) * grammian_matrix
np.fill_diagonal(cm, 1)
return cm
def check_convergence(spanning_matrix):
matrix_for_convergence = convergence_matrix(spanning_matrix)
convergence_matrix_eigenvalues = np.linalg.eigvals(convergence_matrix)
return min(convergence_matrix_eigenvalues) > 0
| true | true |
1c3f443cdb3a48c534a1d5fa069ce25e9b56958e | 2,812 | py | Python | OPTOSTools/Visualization_CNN/Print_Features.py | Vengadore/Segmentation_OPTOS | d15b6480a567c987b10f7bf680672356e68b7e5b | [
"Apache-2.0"
] | 1 | 2020-10-31T21:01:26.000Z | 2020-10-31T21:01:26.000Z | OPTOSTools/Visualization_CNN/Print_Features.py | Vengadore/Segmentation_OPTOS | d15b6480a567c987b10f7bf680672356e68b7e5b | [
"Apache-2.0"
] | null | null | null | OPTOSTools/Visualization_CNN/Print_Features.py | Vengadore/Segmentation_OPTOS | d15b6480a567c987b10f7bf680672356e68b7e5b | [
"Apache-2.0"
] | null | null | null | import cv2
from tensorflow.keras.models import Model
class Model_CNN:
""" Model_CNN(model)
- Reads a CNN model and looks in the name of the layers for "conv", if found it is saved as an index for extracting feature maps.
model: CNN model to extract feature maps from.
"""
def __init__(self,model):
# Create a CNN Model
self.model = model
# Select the layers that have a convolutional layer
self.conv_index = [ind for (ind,layer) in enumerate(model.layers) if "conv" in layer.name]
# Feature map shapes
self.conv_shapes = [(ind,model.layers[ind].name,model.layers[ind].output.shape) for ind in self.conv_index]
outputs = [self.model.layers[i].output for i in self.conv_index]
self.model = Model(inputs=self.model.inputs, outputs = outputs)
# Extract the weights of the kernels in the convolutional layers
self.conv_weights = [(ind,model.layers[ind].name,model.layers[ind].get_weights()) for ind in self.conv_index]
#self.model.summary()
print(f"Input shape of visualization model {model.layers[0].output.shape}")
def feature_map(self,image):
"""
Computes the Feature Maps given an image, the output is a list of the various convolutional layers
"""
return self.model.predict(image)
class ImageT:
""" ImageT(Reescale = False, Resize = False)
- To create transformations between colors spaces
Reescale: Reescales image to 0 and 1 dividing by 255
Resize: Resizes the image to a given size by a tuple
"""
def __init__(self,Reescale = False, Resize = False):
self.R = Reescale
self.size = Resize
""
def BGR2RGB(self,image):
"""
:param image:
:return:
"""
image = cv2.cvtColor(image, 4)
# If reescale parameter is true the image values are divided by 255 to fit values between 0 and 1
if self.R:
image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# If Resize is a tuple then the image is resized
if type((1,1)) == type(self.size):
image = cv2.resize(image,self.size)
return image
def RGB2BGR(self,image):
"""
:param image:
:return:
"""
image = cv2.cvtColor(image, 4)
# If reescale parameter is true the image values are divided by 255 to fit values between 0 and 1
if self.R:
image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
# If Resize is a tuple then the image is resized
if type((1,1)) == type(self.size):
image = cv2.resize(image, self.size)
return image | 34.716049 | 137 | 0.625178 | import cv2
from tensorflow.keras.models import Model
class Model_CNN:
def __init__(self,model):
self.model = model
self.conv_index = [ind for (ind,layer) in enumerate(model.layers) if "conv" in layer.name]
self.conv_shapes = [(ind,model.layers[ind].name,model.layers[ind].output.shape) for ind in self.conv_index]
outputs = [self.model.layers[i].output for i in self.conv_index]
self.model = Model(inputs=self.model.inputs, outputs = outputs)
self.conv_weights = [(ind,model.layers[ind].name,model.layers[ind].get_weights()) for ind in self.conv_index]
print(f"Input shape of visualization model {model.layers[0].output.shape}")
def feature_map(self,image):
return self.model.predict(image)
class ImageT:
def __init__(self,Reescale = False, Resize = False):
self.R = Reescale
self.size = Resize
def BGR2RGB(self,image):
image = cv2.cvtColor(image, 4)
if self.R:
image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
if type((1,1)) == type(self.size):
image = cv2.resize(image,self.size)
return image
def RGB2BGR(self,image):
image = cv2.cvtColor(image, 4)
if self.R:
image = cv2.normalize(image, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
if type((1,1)) == type(self.size):
image = cv2.resize(image, self.size)
return image | true | true |
1c3f444859a21cabd7337ae0ebbff4509045aa69 | 505 | py | Python | servio-backend-app/servio/user/migrations/0007_alter_user_service.py | emreerkaslan/SWE573 | 086f44bfbf6feb9629148de820d76aef1088c909 | [
"MIT"
] | null | null | null | servio-backend-app/servio/user/migrations/0007_alter_user_service.py | emreerkaslan/SWE573 | 086f44bfbf6feb9629148de820d76aef1088c909 | [
"MIT"
] | 19 | 2021-10-21T12:43:36.000Z | 2021-12-05T14:21:55.000Z | servio-backend-app/servio/user/migrations/0007_alter_user_service.py | emreerkaslan/Servio | 086f44bfbf6feb9629148de820d76aef1088c909 | [
"MIT"
] | null | null | null | # Generated by Django 4.0 on 2022-01-02 13:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0004_alter_service_requests'),
('user', '0006_alter_user_feedbacks_alter_user_following'),
]
operations = [
migrations.AlterField(
model_name='user',
name='service',
field=models.ManyToManyField(blank=True, related_name='services', to='service.Service'),
),
]
| 25.25 | 100 | 0.637624 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('service', '0004_alter_service_requests'),
('user', '0006_alter_user_feedbacks_alter_user_following'),
]
operations = [
migrations.AlterField(
model_name='user',
name='service',
field=models.ManyToManyField(blank=True, related_name='services', to='service.Service'),
),
]
| true | true |
1c3f45dbc1e43153985a7940de0973749caed8f1 | 8,176 | py | Python | examples/basic-tour.py | se-hwan/dynamicCostMPC | f461fe1f9c23783db53dbfe362a26fb33c20a695 | [
"MIT"
] | null | null | null | examples/basic-tour.py | se-hwan/dynamicCostMPC | f461fe1f9c23783db53dbfe362a26fb33c20a695 | [
"MIT"
] | null | null | null | examples/basic-tour.py | se-hwan/dynamicCostMPC | f461fe1f9c23783db53dbfe362a26fb33c20a695 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Basic tour of the Bayesian Optimization package
#
# This is a constrained global optimization package built upon bayesian inference and gaussian process, that attempts to find the maximum value of an unknown function in as few iterations as possible. This technique is particularly suited for optimization of high cost functions, situations where the balance between exploration and exploitation is important.
#
# Bayesian optimization works by constructing a posterior distribution of functions (gaussian process) that best describes the function you want to optimize. As the number of observations grows, the posterior distribution improves, and the algorithm becomes more certain of which regions in parameter space are worth exploring and which are not, as seen in the picture below.
#
# As you iterate over and over, the algorithm balances its needs of exploration and exploitation taking into account what it knows about the target function. At each step a Gaussian Process is fitted to the known samples (points previously explored), and the posterior distribution, combined with a exploration strategy (such as UCB (Upper Confidence Bound), or EI (Expected Improvement)), are used to determine the next point that should be explored (see the gif below).
#
# This process is designed to minimize the number of steps required to find a combination of parameters that are close to the optimal combination. To do so, this method uses a proxy optimization problem (finding the maximum of the acquisition function) that, albeit still a hard problem, is cheaper (in the computational sense) and common tools can be employed. Therefore Bayesian Optimization is most adequate for situations where sampling the function to be optimized is a very expensive endeavor. See the references for a proper discussion of this method.
# ## 1. Specifying the function to be optimized
#
# This is a function optimization package, therefore the first and most important ingreedient is, of course, the function to be optimized.
#
# **DISCLAIMER:** We know exactly how the output of the function below depends on its parameter. Obviously this is just an example, and you shouldn't expect to know it in a real scenario. However, it should be clear that you don't need to. All you need in order to use this package (and more generally, this technique) is a function `f` that takes a known set of parameters and outputs a real number.
# In[1]:
def black_box_function(x, y):
"""Function with unknown internals we wish to maximize.
This is just serving as an example, for all intents and
purposes think of the internals of this function, i.e.: the process
which generates its output values, as unknown.
"""
return -x ** 2 - (y - 1) ** 2 + 1
# ## 2. Getting Started
#
# All we need to get started is to instanciate a `BayesianOptimization` object specifying a function to be optimized `f`, and its parameters with their corresponding bounds, `pbounds`. This is a constrained optimization technique, so you must specify the minimum and maximum values that can be probed for each parameter in order for it to work
# In[2]:
from bayes_opt import BayesianOptimization
# In[3]:
# Bounded region of parameter space
pbounds = {'x': (2, 4), 'y': (-3, 3)}
# In[4]:
optimizer = BayesianOptimization(
f=black_box_function,
pbounds=pbounds,
verbose=2, # verbose = 1 prints only when a maximum is observed, verbose = 0 is silent
random_state=1,
)
# The BayesianOptimization object will work out of the box without much tuning needed. The main method you should be aware of is `maximize`, which does exactly what you think it does.
#
# There are many parameters you can pass to maximize, nonetheless, the most important ones are:
# - `n_iter`: How many steps of bayesian optimization you want to perform. The more steps the more likely to find a good maximum you are.
# - `init_points`: How many steps of **random** exploration you want to perform. Random exploration can help by diversifying the exploration space.
# In[5]:
optimizer.maximize(
init_points=2,
n_iter=3,
)
# The best combination of parameters and target value found can be accessed via the property `bo.max`.
# In[6]:
print(optimizer.max)
# While the list of all parameters probed and their corresponding target values is available via the property `bo.res`.
# In[7]:
for i, res in enumerate(optimizer.res):
print("Iteration {}: \n\t{}".format(i, res))
# ### 2.1 Changing bounds
#
# During the optimization process you may realize the bounds chosen for some parameters are not adequate. For these situations you can invoke the method `set_bounds` to alter them. You can pass any combination of **existing** parameters and their associated new bounds.
# In[8]:
optimizer.set_bounds(new_bounds={"x": (-2, 3)})
# In[9]:
optimizer.maximize(
init_points=0,
n_iter=5,
)
# ## 3. Guiding the optimization
#
# It is often the case that we have an idea of regions of the parameter space where the maximum of our function might lie. For these situations the `BayesianOptimization` object allows the user to specify specific points to be probed. By default these will be explored lazily (`lazy=True`), meaning these points will be evaluated only the next time you call `maximize`. This probing process happens before the gaussian process takes over.
#
# Parameters can be passed as dictionaries such as below:
# In[10]:
optimizer.probe(
params={"x": 0.5, "y": 0.7},
lazy=True,
)
# Or as an iterable. Beware that the order has to be alphabetical. You can usee `optimizer.space.keys` for guidance
# In[11]:
print(optimizer.space.keys)
# In[12]:
optimizer.probe(
params=[-0.3, 0.1],
lazy=True,
)
# In[13]:
optimizer.maximize(init_points=0, n_iter=0)
# ## 4. Saving, loading and restarting
#
# By default you can follow the progress of your optimization by setting `verbose>0` when instanciating the `BayesianOptimization` object. If you need more control over logging/alerting you will need to use an observer. For more information about observers checkout the advanced tour notebook. Here we will only see how to use the native `JSONLogger` object to save to and load progress from files.
#
# ### 4.1 Saving progress
# In[14]:
from bayes_opt.logger import JSONLogger
from bayes_opt.event import Events
# The observer paradigm works by:
# 1. Instantiating an observer object.
# 2. Tying the observer object to a particular event fired by an optimizer.
#
# The `BayesianOptimization` object fires a number of internal events during optimization, in particular, everytime it probes the function and obtains a new parameter-target combination it will fire an `Events.OPTIMIZATION_STEP` event, which our logger will listen to.
#
# **Caveat:** The logger will not look back at previously probed points.
# In[15]:
logger = JSONLogger(path="./logs.json")
optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
# In[16]:
optimizer.maximize(
init_points=2,
n_iter=3,
)
# ### 4.2 Loading progress
#
# Naturally, if you stored progress you will be able to load that onto a new instance of `BayesianOptimization`. The easiest way to do it is by invoking the `load_logs` function, from the `util` submodule.
# In[17]:
from bayes_opt.util import load_logs
# In[18]:
new_optimizer = BayesianOptimization(
f=black_box_function,
pbounds={"x": (-2, 2), "y": (-2, 2)},
verbose=2,
random_state=7,
)
print(len(new_optimizer.space))
# In[19]:
load_logs(new_optimizer, logs=["./logs.json"]);
# In[20]:
print("New optimizer is now aware of {} points.".format(len(new_optimizer.space)))
# In[21]:
new_optimizer.maximize(
init_points=0,
n_iter=10,
)
# ## Next Steps
#
# This tour should be enough to cover most usage scenarios of this package. If, however, you feel like you need to know more, please checkout the `advanced-tour` notebook. There you will be able to find other, more advanced features of this package that could be what you're looking for. Also, browse the examples folder for implementation tips and ideas.
| 35.090129 | 558 | 0.74841 |
3)}
optimizer = BayesianOptimization(
f=black_box_function,
pbounds=pbounds,
verbose=2,
random_state=1,
)
optimizer.maximize(
init_points=2,
n_iter=3,
)
print(optimizer.max)
for i, res in enumerate(optimizer.res):
print("Iteration {}: \n\t{}".format(i, res))
zer.maximize(
init_points=0,
n_iter=5,
)
lazy=True,
)
print(optimizer.space.keys)
optimizer.probe(
params=[-0.3, 0.1],
lazy=True,
)
optimizer.maximize(init_points=0, n_iter=0)
optimizer.subscribe(Events.OPTIMIZATION_STEP, logger)
optimizer.maximize(
init_points=2,
n_iter=3,
)
sianOptimization(
f=black_box_function,
pbounds={"x": (-2, 2), "y": (-2, 2)},
verbose=2,
random_state=7,
)
print(len(new_optimizer.space))
load_logs(new_optimizer, logs=["./logs.json"]);
print("New optimizer is now aware of {} points.".format(len(new_optimizer.space)))
new_optimizer.maximize(
init_points=0,
n_iter=10,
)
| true | true |
1c3f472f9aa622d94beff234a175e42926e0ed64 | 2,813 | py | Python | testing/__init__.py | weltonrodrigo/dumb-init | a0e0776bec98e9a332385b5c320f978b67db193e | [
"MIT"
] | null | null | null | testing/__init__.py | weltonrodrigo/dumb-init | a0e0776bec98e9a332385b5c320f978b67db193e | [
"MIT"
] | null | null | null | testing/__init__.py | weltonrodrigo/dumb-init | a0e0776bec98e9a332385b5c320f978b67db193e | [
"MIT"
] | null | null | null | import errno
import os
import re
import signal
import sys
import time
from contextlib import contextmanager
from subprocess import PIPE
from subprocess import Popen
from py._path.local import LocalPath
# these signals cause dumb-init to suspend itself
SUSPEND_SIGNALS = frozenset([
signal.SIGTSTP,
signal.SIGTTOU,
signal.SIGTTIN,
])
NORMAL_SIGNALS = frozenset(
set(range(1, 32)) -
{signal.SIGKILL, signal.SIGSTOP, signal.SIGCHLD} -
SUSPEND_SIGNALS
)
@contextmanager
def print_signals(args=()):
"""Start print_signals and yield dumb-init process and print_signals PID."""
proc = Popen(
(
('dumb-init',) +
tuple(args) +
(sys.executable, '-m', 'testing.print_signals')
),
stdout=PIPE,
)
line = proc.stdout.readline()
m = re.match(b'^ready \\(pid: ([0-9]+)\\)\n$', line)
assert m, line
yield proc, m.group(1).decode('ascii')
for pid in pid_tree(proc.pid):
os.kill(pid, signal.SIGKILL)
def child_pids(pid):
"""Return a list of direct child PIDs for the given PID."""
children = set()
for p in LocalPath('/proc').listdir():
try:
stat = open(p.join('stat').strpath).read()
m = re.match(r'^\d+ \(.+?\) [a-zA-Z] (\d+) ', stat)
assert m, stat
ppid = int(m.group(1))
if ppid == pid:
children.add(int(p.basename))
except IOError:
# Happens when the process exits after listing it, or between
# opening stat and reading it.
pass
return children
def pid_tree(pid):
"""Return a list of all descendant PIDs for the given PID."""
children = child_pids(pid)
return {
pid
for child in children
for pid in pid_tree(child)
} | children
def is_alive(pid):
"""Return whether a process is running with the given PID."""
return LocalPath('/proc').join(str(pid)).isdir()
def process_state(pid):
"""Return a process' state, such as "stopped" or "running"."""
status = LocalPath('/proc').join(str(pid), 'status').read()
m = re.search(r'^State:\s+[A-Z] \(([a-z]+)\)$', status, re.MULTILINE)
return m.group(1)
def sleep_until(fn, timeout=1.5):
"""Sleep until fn succeeds, or we time out."""
interval = 0.01
so_far = 0
while True:
try:
fn()
except Exception:
if so_far >= timeout:
raise
else:
break
time.sleep(interval)
so_far += interval
def kill_if_alive(pid, signum=signal.SIGKILL):
"""Kill a process, ignoring "no such process" errors."""
try:
os.kill(pid, signum)
except OSError as ex:
if ex.errno != errno.ESRCH: # No such process
raise
| 25.116071 | 80 | 0.586562 | import errno
import os
import re
import signal
import sys
import time
from contextlib import contextmanager
from subprocess import PIPE
from subprocess import Popen
from py._path.local import LocalPath
SUSPEND_SIGNALS = frozenset([
signal.SIGTSTP,
signal.SIGTTOU,
signal.SIGTTIN,
])
NORMAL_SIGNALS = frozenset(
set(range(1, 32)) -
{signal.SIGKILL, signal.SIGSTOP, signal.SIGCHLD} -
SUSPEND_SIGNALS
)
@contextmanager
def print_signals(args=()):
proc = Popen(
(
('dumb-init',) +
tuple(args) +
(sys.executable, '-m', 'testing.print_signals')
),
stdout=PIPE,
)
line = proc.stdout.readline()
m = re.match(b'^ready \\(pid: ([0-9]+)\\)\n$', line)
assert m, line
yield proc, m.group(1).decode('ascii')
for pid in pid_tree(proc.pid):
os.kill(pid, signal.SIGKILL)
def child_pids(pid):
children = set()
for p in LocalPath('/proc').listdir():
try:
stat = open(p.join('stat').strpath).read()
m = re.match(r'^\d+ \(.+?\) [a-zA-Z] (\d+) ', stat)
assert m, stat
ppid = int(m.group(1))
if ppid == pid:
children.add(int(p.basename))
except IOError:
pass
return children
def pid_tree(pid):
children = child_pids(pid)
return {
pid
for child in children
for pid in pid_tree(child)
} | children
def is_alive(pid):
return LocalPath('/proc').join(str(pid)).isdir()
def process_state(pid):
status = LocalPath('/proc').join(str(pid), 'status').read()
m = re.search(r'^State:\s+[A-Z] \(([a-z]+)\)$', status, re.MULTILINE)
return m.group(1)
def sleep_until(fn, timeout=1.5):
interval = 0.01
so_far = 0
while True:
try:
fn()
except Exception:
if so_far >= timeout:
raise
else:
break
time.sleep(interval)
so_far += interval
def kill_if_alive(pid, signum=signal.SIGKILL):
try:
os.kill(pid, signum)
except OSError as ex:
if ex.errno != errno.ESRCH:
raise
| true | true |
1c3f475195fe3b3bf6ec44debfc1ea20d2c4a46b | 132 | py | Python | Operator and String/3.5.3.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | Operator and String/3.5.3.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | Operator and String/3.5.3.py | ferrerinicolas/python_samples | 107cead4fbee30b275a5e2be1257833129ce5e46 | [
"MIT"
] | null | null | null | print("hello " + ", world!")
print("a" + "b" + "c")
print("hi" * 3)
print("hi" + str(3))
print("My bike has " + str(6) + " gears.") | 22 | 42 | 0.492424 | print("hello " + ", world!")
print("a" + "b" + "c")
print("hi" * 3)
print("hi" + str(3))
print("My bike has " + str(6) + " gears.") | true | true |
1c3f47f1ed2669ba90d5a94b8c0f1e2af675c37d | 4,933 | py | Python | Message/InfoMessage_pb2.py | qikkDB/qikkdb-python-network-client | 3e5c6ed3e13957dbc16b5bf9fdefe92e5cf054d3 | [
"Apache-2.0"
] | 5 | 2020-06-30T11:55:26.000Z | 2021-04-24T00:05:35.000Z | Message/InfoMessage_pb2.py | qikkDB/qikkdb-python-network-client | 3e5c6ed3e13957dbc16b5bf9fdefe92e5cf054d3 | [
"Apache-2.0"
] | null | null | null | Message/InfoMessage_pb2.py | qikkDB/qikkdb-python-network-client | 3e5c6ed3e13957dbc16b5bf9fdefe92e5cf054d3 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: Message/InfoMessage.proto
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='Message/InfoMessage.proto',
package='QikkDB.NetworkClient.Message',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19Message/InfoMessage.proto\x12\x1cQikkDB.NetworkClient.Message\"\xec\x01\n\x0bInfoMessage\x12\x42\n\x04\x43ode\x18\x01 \x01(\x0e\x32\x34.QikkDB.NetworkClient.Message.InfoMessage.StatusCode\x12\x0f\n\x07Message\x18\x02 \x01(\t\"\x87\x01\n\nStatusCode\x12\x06\n\x02OK\x10\x00\x12\x08\n\x04WAIT\x10\x01\x12\x13\n\x0fGET_NEXT_RESULT\x10\x06\x12\x0f\n\x0bQUERY_ERROR\x10\x02\x12\x10\n\x0cIMPORT_ERROR\x10\x03\x12\x12\n\x0e\x43ONN_ESTABLISH\x10\x04\x12\x0c\n\x08\x43ONN_END\x10\x05\x12\r\n\tHEARTBEAT\x10\x07\x62\x06proto3'
)
_INFOMESSAGE_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='QikkDB.NetworkClient.Message.InfoMessage.StatusCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WAIT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GET_NEXT_RESULT', index=2, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='QUERY_ERROR', index=3, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMPORT_ERROR', index=4, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONN_ESTABLISH', index=5, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONN_END', index=6, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEARTBEAT', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=161,
serialized_end=296,
)
_sym_db.RegisterEnumDescriptor(_INFOMESSAGE_STATUSCODE)
_INFOMESSAGE = _descriptor.Descriptor(
name='InfoMessage',
full_name='QikkDB.NetworkClient.Message.InfoMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Code', full_name='QikkDB.NetworkClient.Message.InfoMessage.Code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Message', full_name='QikkDB.NetworkClient.Message.InfoMessage.Message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_INFOMESSAGE_STATUSCODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=296,
)
_INFOMESSAGE.fields_by_name['Code'].enum_type = _INFOMESSAGE_STATUSCODE
_INFOMESSAGE_STATUSCODE.containing_type = _INFOMESSAGE
DESCRIPTOR.message_types_by_name['InfoMessage'] = _INFOMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InfoMessage = _reflection.GeneratedProtocolMessageType('InfoMessage', (_message.Message,), {
'DESCRIPTOR' : _INFOMESSAGE,
'__module__' : 'Message.InfoMessage_pb2'
# @@protoc_insertion_point(class_scope:QikkDB.NetworkClient.Message.InfoMessage)
})
_sym_db.RegisterMessage(InfoMessage)
# @@protoc_insertion_point(module_scope)
| 36.272059 | 540 | 0.759376 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='Message/InfoMessage.proto',
package='QikkDB.NetworkClient.Message',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x19Message/InfoMessage.proto\x12\x1cQikkDB.NetworkClient.Message\"\xec\x01\n\x0bInfoMessage\x12\x42\n\x04\x43ode\x18\x01 \x01(\x0e\x32\x34.QikkDB.NetworkClient.Message.InfoMessage.StatusCode\x12\x0f\n\x07Message\x18\x02 \x01(\t\"\x87\x01\n\nStatusCode\x12\x06\n\x02OK\x10\x00\x12\x08\n\x04WAIT\x10\x01\x12\x13\n\x0fGET_NEXT_RESULT\x10\x06\x12\x0f\n\x0bQUERY_ERROR\x10\x02\x12\x10\n\x0cIMPORT_ERROR\x10\x03\x12\x12\n\x0e\x43ONN_ESTABLISH\x10\x04\x12\x0c\n\x08\x43ONN_END\x10\x05\x12\r\n\tHEARTBEAT\x10\x07\x62\x06proto3'
)
_INFOMESSAGE_STATUSCODE = _descriptor.EnumDescriptor(
name='StatusCode',
full_name='QikkDB.NetworkClient.Message.InfoMessage.StatusCode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='OK', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='WAIT', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='GET_NEXT_RESULT', index=2, number=6,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='QUERY_ERROR', index=3, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='IMPORT_ERROR', index=4, number=3,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONN_ESTABLISH', index=5, number=4,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='CONN_END', index=6, number=5,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='HEARTBEAT', index=7, number=7,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=161,
serialized_end=296,
)
_sym_db.RegisterEnumDescriptor(_INFOMESSAGE_STATUSCODE)
_INFOMESSAGE = _descriptor.Descriptor(
name='InfoMessage',
full_name='QikkDB.NetworkClient.Message.InfoMessage',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='Code', full_name='QikkDB.NetworkClient.Message.InfoMessage.Code', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='Message', full_name='QikkDB.NetworkClient.Message.InfoMessage.Message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_INFOMESSAGE_STATUSCODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=60,
serialized_end=296,
)
_INFOMESSAGE.fields_by_name['Code'].enum_type = _INFOMESSAGE_STATUSCODE
_INFOMESSAGE_STATUSCODE.containing_type = _INFOMESSAGE
DESCRIPTOR.message_types_by_name['InfoMessage'] = _INFOMESSAGE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InfoMessage = _reflection.GeneratedProtocolMessageType('InfoMessage', (_message.Message,), {
'DESCRIPTOR' : _INFOMESSAGE,
'__module__' : 'Message.InfoMessage_pb2'
})
_sym_db.RegisterMessage(InfoMessage)
| true | true |
1c3f4846e299df5f15a74d0917ad5deaac68416d | 3,827 | bzl | Python | kythe/cxx/extractor/proto/testdata/proto_extractor_test.bzl | bef0/kythe | 2adcb540ae9dbd61879315a5ade8d3716ee3d3d8 | [
"Apache-2.0"
] | null | null | null | kythe/cxx/extractor/proto/testdata/proto_extractor_test.bzl | bef0/kythe | 2adcb540ae9dbd61879315a5ade8d3716ee3d3d8 | [
"Apache-2.0"
] | null | null | null | kythe/cxx/extractor/proto/testdata/proto_extractor_test.bzl | bef0/kythe | 2adcb540ae9dbd61879315a5ade8d3716ee3d3d8 | [
"Apache-2.0"
] | null | null | null | """Rules for testing the proto extractor"""
# Copyright 2018 The Kythe Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_skylib//lib:dicts.bzl", "dicts")
def _extract_kzip_impl(ctx):
cmd = [ctx.executable.extractor.path] + [p.path for p in ctx.files.srcs] + ctx.attr.opts
ctx.actions.run_shell(
mnemonic = "Extract",
command = " ".join(cmd),
env = dicts.add(ctx.attr.extra_env, {"KYTHE_OUTPUT_FILE": ctx.outputs.kzip.path}),
outputs = [ctx.outputs.kzip],
tools = [ctx.executable.extractor],
inputs = ctx.files.srcs + ctx.files.deps,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = [ctx.outputs.kzip]))]
extract_kzip = rule(
implementation = _extract_kzip_impl,
attrs = {
"srcs": attr.label_list(allow_files = True, mandatory = True),
"deps": attr.label_list(allow_files = True),
"extractor": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/extractor/proto:proto_extractor"),
),
"opts": attr.string_list(),
"extra_env": attr.string_dict(),
},
outputs = {"kzip": "%{name}.kzip"},
)
def _kzip_diff_test_impl(ctx):
# Write a script that `bazel test` will execute.
script = " ".join([
ctx.executable.diff_bin.short_path,
ctx.executable.kindex_tool.short_path,
ctx.files.kzip[0].short_path,
ctx.files.golden_file[0].short_path,
])
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
runfiles = ctx.runfiles(files = [
ctx.executable.diff_bin,
ctx.executable.kindex_tool,
ctx.file.kzip,
ctx.file.golden_file,
])
return [DefaultInfo(runfiles = runfiles)]
kzip_diff_test = rule(
implementation = _kzip_diff_test_impl,
attrs = {
"golden_file": attr.label(mandatory = True, allow_single_file = True),
"kzip": attr.label(mandatory = True, allow_single_file = True),
"diff_bin": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/extractor/proto/testdata:kzip_diff_test"),
),
"kindex_tool": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/tools:kindex_tool"),
),
},
test = True,
)
def extractor_golden_test(
name,
srcs,
deps = [],
opts = [],
extra_env = {},
extractor = "//kythe/cxx/extractor/proto:proto_extractor"):
"""Runs the extractor and compares the result to a golden file.
Args:
name: test name (note: _test will be appended to the end)
srcs: files to extract
deps: any other required deps
opts: arguments to pass to the extractor
extra_env: environment variables to configure extractor behavior
extractor: the extractor binary to use
"""
kzip = name + "_kzip"
extract_kzip(
name = kzip,
opts = opts,
deps = deps,
srcs = srcs,
extra_env = extra_env,
extractor = extractor,
testonly = True,
)
kzip_diff_test(
name = name + "_test",
kzip = kzip,
golden_file = name + ".UNIT",
)
| 31.891667 | 92 | 0.617455 |
load("@bazel_skylib//lib:dicts.bzl", "dicts")
def _extract_kzip_impl(ctx):
cmd = [ctx.executable.extractor.path] + [p.path for p in ctx.files.srcs] + ctx.attr.opts
ctx.actions.run_shell(
mnemonic = "Extract",
command = " ".join(cmd),
env = dicts.add(ctx.attr.extra_env, {"KYTHE_OUTPUT_FILE": ctx.outputs.kzip.path}),
outputs = [ctx.outputs.kzip],
tools = [ctx.executable.extractor],
inputs = ctx.files.srcs + ctx.files.deps,
)
return [DefaultInfo(runfiles = ctx.runfiles(files = [ctx.outputs.kzip]))]
extract_kzip = rule(
implementation = _extract_kzip_impl,
attrs = {
"srcs": attr.label_list(allow_files = True, mandatory = True),
"deps": attr.label_list(allow_files = True),
"extractor": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/extractor/proto:proto_extractor"),
),
"opts": attr.string_list(),
"extra_env": attr.string_dict(),
},
outputs = {"kzip": "%{name}.kzip"},
)
def _kzip_diff_test_impl(ctx):
script = " ".join([
ctx.executable.diff_bin.short_path,
ctx.executable.kindex_tool.short_path,
ctx.files.kzip[0].short_path,
ctx.files.golden_file[0].short_path,
])
ctx.actions.write(
output = ctx.outputs.executable,
content = script,
)
runfiles = ctx.runfiles(files = [
ctx.executable.diff_bin,
ctx.executable.kindex_tool,
ctx.file.kzip,
ctx.file.golden_file,
])
return [DefaultInfo(runfiles = runfiles)]
kzip_diff_test = rule(
implementation = _kzip_diff_test_impl,
attrs = {
"golden_file": attr.label(mandatory = True, allow_single_file = True),
"kzip": attr.label(mandatory = True, allow_single_file = True),
"diff_bin": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/extractor/proto/testdata:kzip_diff_test"),
),
"kindex_tool": attr.label(
cfg = "host",
executable = True,
default = Label("//kythe/cxx/tools:kindex_tool"),
),
},
test = True,
)
def extractor_golden_test(
name,
srcs,
deps = [],
opts = [],
extra_env = {},
extractor = "//kythe/cxx/extractor/proto:proto_extractor"):
kzip = name + "_kzip"
extract_kzip(
name = kzip,
opts = opts,
deps = deps,
srcs = srcs,
extra_env = extra_env,
extractor = extractor,
testonly = True,
)
kzip_diff_test(
name = name + "_test",
kzip = kzip,
golden_file = name + ".UNIT",
)
| true | true |
1c3f48aae9ca9fd09823987c78cc87743fd28899 | 13,330 | py | Python | kubernetes_asyncio/client/models/v1_node_system_info.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_node_system_info.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | kubernetes_asyncio/client/models/v1_node_system_info.py | PidgeyBE/kubernetes_asyncio | 14d15dc309890253c26b6274a022e84441e05217 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
OpenAPI spec version: v1.13.5
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class V1NodeSystemInfo(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None): # noqa: E501
"""V1NodeSystemInfo - a model defined in OpenAPI""" # noqa: E501
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
self.system_uuid = system_uuid
@property
def architecture(self):
"""Gets the architecture of this V1NodeSystemInfo. # noqa: E501
The Architecture reported by the node # noqa: E501
:return: The architecture of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._architecture
@architecture.setter
def architecture(self, architecture):
"""Sets the architecture of this V1NodeSystemInfo.
The Architecture reported by the node # noqa: E501
:param architecture: The architecture of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`") # noqa: E501
self._architecture = architecture
@property
def boot_id(self):
"""Gets the boot_id of this V1NodeSystemInfo. # noqa: E501
Boot ID reported by the node. # noqa: E501
:return: The boot_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
"""Sets the boot_id of this V1NodeSystemInfo.
Boot ID reported by the node. # noqa: E501
:param boot_id: The boot_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`") # noqa: E501
self._boot_id = boot_id
@property
def container_runtime_version(self):
"""Gets the container_runtime_version of this V1NodeSystemInfo. # noqa: E501
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:return: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
"""Sets the container_runtime_version of this V1NodeSystemInfo.
ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). # noqa: E501
:param container_runtime_version: The container_runtime_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`") # noqa: E501
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
"""Gets the kernel_version of this V1NodeSystemInfo. # noqa: E501
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:return: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
"""Sets the kernel_version of this V1NodeSystemInfo.
Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). # noqa: E501
:param kernel_version: The kernel_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`") # noqa: E501
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
"""Gets the kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
KubeProxy Version reported by the node. # noqa: E501
:return: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
"""Sets the kube_proxy_version of this V1NodeSystemInfo.
KubeProxy Version reported by the node. # noqa: E501
:param kube_proxy_version: The kube_proxy_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`") # noqa: E501
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
"""Gets the kubelet_version of this V1NodeSystemInfo. # noqa: E501
Kubelet Version reported by the node. # noqa: E501
:return: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
"""Sets the kubelet_version of this V1NodeSystemInfo.
Kubelet Version reported by the node. # noqa: E501
:param kubelet_version: The kubelet_version of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`") # noqa: E501
self._kubelet_version = kubelet_version
@property
def machine_id(self):
"""Gets the machine_id of this V1NodeSystemInfo. # noqa: E501
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:return: The machine_id of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
"""Sets the machine_id of this V1NodeSystemInfo.
MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html # noqa: E501
:param machine_id: The machine_id of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`") # noqa: E501
self._machine_id = machine_id
@property
def operating_system(self):
"""Gets the operating_system of this V1NodeSystemInfo. # noqa: E501
The Operating System reported by the node # noqa: E501
:return: The operating_system of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
"""Sets the operating_system of this V1NodeSystemInfo.
The Operating System reported by the node # noqa: E501
:param operating_system: The operating_system of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`") # noqa: E501
self._operating_system = operating_system
@property
def os_image(self):
"""Gets the os_image of this V1NodeSystemInfo. # noqa: E501
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:return: The os_image of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._os_image
@os_image.setter
def os_image(self, os_image):
"""Sets the os_image of this V1NodeSystemInfo.
OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). # noqa: E501
:param os_image: The os_image of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`") # noqa: E501
self._os_image = os_image
@property
def system_uuid(self):
"""Gets the system_uuid of this V1NodeSystemInfo. # noqa: E501
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html # noqa: E501
:return: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:rtype: str
"""
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
"""Sets the system_uuid of this V1NodeSystemInfo.
SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html # noqa: E501
:param system_uuid: The system_uuid of this V1NodeSystemInfo. # noqa: E501
:type: str
"""
if system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`") # noqa: E501
self._system_uuid = system_uuid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1NodeSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.35809 | 267 | 0.64051 |
import pprint
import re
import six
class V1NodeSystemInfo(object):
openapi_types = {
'architecture': 'str',
'boot_id': 'str',
'container_runtime_version': 'str',
'kernel_version': 'str',
'kube_proxy_version': 'str',
'kubelet_version': 'str',
'machine_id': 'str',
'operating_system': 'str',
'os_image': 'str',
'system_uuid': 'str'
}
attribute_map = {
'architecture': 'architecture',
'boot_id': 'bootID',
'container_runtime_version': 'containerRuntimeVersion',
'kernel_version': 'kernelVersion',
'kube_proxy_version': 'kubeProxyVersion',
'kubelet_version': 'kubeletVersion',
'machine_id': 'machineID',
'operating_system': 'operatingSystem',
'os_image': 'osImage',
'system_uuid': 'systemUUID'
}
def __init__(self, architecture=None, boot_id=None, container_runtime_version=None, kernel_version=None, kube_proxy_version=None, kubelet_version=None, machine_id=None, operating_system=None, os_image=None, system_uuid=None):
self._architecture = None
self._boot_id = None
self._container_runtime_version = None
self._kernel_version = None
self._kube_proxy_version = None
self._kubelet_version = None
self._machine_id = None
self._operating_system = None
self._os_image = None
self._system_uuid = None
self.discriminator = None
self.architecture = architecture
self.boot_id = boot_id
self.container_runtime_version = container_runtime_version
self.kernel_version = kernel_version
self.kube_proxy_version = kube_proxy_version
self.kubelet_version = kubelet_version
self.machine_id = machine_id
self.operating_system = operating_system
self.os_image = os_image
self.system_uuid = system_uuid
@property
def architecture(self):
return self._architecture
@architecture.setter
def architecture(self, architecture):
if architecture is None:
raise ValueError("Invalid value for `architecture`, must not be `None`")
self._architecture = architecture
@property
def boot_id(self):
return self._boot_id
@boot_id.setter
def boot_id(self, boot_id):
if boot_id is None:
raise ValueError("Invalid value for `boot_id`, must not be `None`")
self._boot_id = boot_id
@property
def container_runtime_version(self):
return self._container_runtime_version
@container_runtime_version.setter
def container_runtime_version(self, container_runtime_version):
if container_runtime_version is None:
raise ValueError("Invalid value for `container_runtime_version`, must not be `None`")
self._container_runtime_version = container_runtime_version
@property
def kernel_version(self):
return self._kernel_version
@kernel_version.setter
def kernel_version(self, kernel_version):
if kernel_version is None:
raise ValueError("Invalid value for `kernel_version`, must not be `None`")
self._kernel_version = kernel_version
@property
def kube_proxy_version(self):
return self._kube_proxy_version
@kube_proxy_version.setter
def kube_proxy_version(self, kube_proxy_version):
if kube_proxy_version is None:
raise ValueError("Invalid value for `kube_proxy_version`, must not be `None`")
self._kube_proxy_version = kube_proxy_version
@property
def kubelet_version(self):
return self._kubelet_version
@kubelet_version.setter
def kubelet_version(self, kubelet_version):
if kubelet_version is None:
raise ValueError("Invalid value for `kubelet_version`, must not be `None`")
self._kubelet_version = kubelet_version
@property
def machine_id(self):
return self._machine_id
@machine_id.setter
def machine_id(self, machine_id):
if machine_id is None:
raise ValueError("Invalid value for `machine_id`, must not be `None`")
self._machine_id = machine_id
@property
def operating_system(self):
return self._operating_system
@operating_system.setter
def operating_system(self, operating_system):
if operating_system is None:
raise ValueError("Invalid value for `operating_system`, must not be `None`")
self._operating_system = operating_system
@property
def os_image(self):
return self._os_image
@os_image.setter
def os_image(self, os_image):
if os_image is None:
raise ValueError("Invalid value for `os_image`, must not be `None`")
self._os_image = os_image
@property
def system_uuid(self):
return self._system_uuid
@system_uuid.setter
def system_uuid(self, system_uuid):
if system_uuid is None:
raise ValueError("Invalid value for `system_uuid`, must not be `None`")
self._system_uuid = system_uuid
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, V1NodeSystemInfo):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c3f49404e6dd0c5ad52a00fb049a2addc5ae17b | 725 | py | Python | udsoncan/configs.py | marchcui/pythUDS | 3012c716299730c23f58d7e545d5bb22f301d1c7 | [
"MIT"
] | null | null | null | udsoncan/configs.py | marchcui/pythUDS | 3012c716299730c23f58d7e545d5bb22f301d1c7 | [
"MIT"
] | null | null | null | udsoncan/configs.py | marchcui/pythUDS | 3012c716299730c23f58d7e545d5bb22f301d1c7 | [
"MIT"
] | null | null | null | default_client_config = {
'exception_on_negative_response' : True,
'exception_on_invalid_response' : True,
'exception_on_unexpected_response' : True,
'security_algo' : None,
'security_algo_params' : None,
'tolerate_zero_padding' : True,
'ignore_all_zero_dtc' : True,
'dtc_snapshot_did_size' : 2, # Not specified in standard. 2 bytes matches other services format.
'server_address_format' : None, # 8,16,24,32,40
'server_memorysize_format' : None, # 8,16,24,32,40
'data_identifiers' : {},
'input_output' : {},
'request_timeout' : 5,
'p2_timeout' : 1,
'p2_star_timeout' : 5,
}
| 40.277778 | 106 | 0.594483 | default_client_config = {
'exception_on_negative_response' : True,
'exception_on_invalid_response' : True,
'exception_on_unexpected_response' : True,
'security_algo' : None,
'security_algo_params' : None,
'tolerate_zero_padding' : True,
'ignore_all_zero_dtc' : True,
'dtc_snapshot_did_size' : 2,
'server_address_format' : None,
'server_memorysize_format' : None,
'data_identifiers' : {},
'input_output' : {},
'request_timeout' : 5,
'p2_timeout' : 1,
'p2_star_timeout' : 5,
}
| true | true |
1c3f49ed42a717f7d956ed34ca2195e2690c3b1b | 2,118 | py | Python | tests/test_plugin_collector.py | AdamGleave/pytest-notebook | 94df07bb0138bc677de9842aca8f5acd44c58677 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plugin_collector.py | AdamGleave/pytest-notebook | 94df07bb0138bc677de9842aca8f5acd44c58677 | [
"BSD-3-Clause"
] | null | null | null | tests/test_plugin_collector.py | AdamGleave/pytest-notebook | 94df07bb0138bc677de9842aca8f5acd44c58677 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Test the plugin collection and direct invocation of notebooks."""
import os
PATH = os.path.dirname(os.path.realpath(__file__))
def copy_nb_to_tempdir(in_name="different_outputs.ipynb", out_name="test_nb.ipynb"):
with open(os.path.join(PATH, "raw_files", in_name), "rb") as handle:
data = handle.read()
with open(out_name, "wb") as handle:
handle.write(data)
def test_collection(testdir):
copy_nb_to_tempdir()
result = testdir.runpytest("--nb-test-files", "--collect-only")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
[
"*<JupyterNbCollector*test_nb.ipynb>*",
"*<JupyterNbTest nbregression(test_nb)>*",
]
)
def test_setup_with_skip_meta(testdir):
copy_nb_to_tempdir("nb_with_skip_meta.ipynb")
result = testdir.runpytest("--nb-test-files", "--setup-plan", "-rs")
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
["*test_nb.ipynb*s*", "*I have my reasons*", "*1 skipped*"]
)
def test_run_fail(testdir):
copy_nb_to_tempdir("different_outputs_altered.ipynb")
result = testdir.runpytest(
"--nb-exec-cwd", os.path.join(PATH, "raw_files"), "--nb-test-files", "-v"
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(
["*::nbregression(test_nb) FAILED*", "*CellExecutionError:*"]
)
# result.stderr.fnmatch_lines(
# [
# "*## modified /cells/11/outputs/0/data/image/svg+xml*",
# ]
# )
# make sure that that we get a non '0' exit code for the testsuite
assert result.ret != 0
def test_run_pass_with_meta(testdir):
copy_nb_to_tempdir("different_outputs_with_metadata.ipynb")
result = testdir.runpytest(
"--nb-exec-cwd", os.path.join(PATH, "raw_files"), "--nb-test-files", "-v"
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines(["*::nbregression(test_nb) PASSED*"])
# make sure that that we get a non '0' exit code for the testsuite
assert result.ret == 0
| 32.584615 | 84 | 0.655807 |
import os
PATH = os.path.dirname(os.path.realpath(__file__))
def copy_nb_to_tempdir(in_name="different_outputs.ipynb", out_name="test_nb.ipynb"):
with open(os.path.join(PATH, "raw_files", in_name), "rb") as handle:
data = handle.read()
with open(out_name, "wb") as handle:
handle.write(data)
def test_collection(testdir):
copy_nb_to_tempdir()
result = testdir.runpytest("--nb-test-files", "--collect-only")
result.stdout.fnmatch_lines(
[
"*<JupyterNbCollector*test_nb.ipynb>*",
"*<JupyterNbTest nbregression(test_nb)>*",
]
)
def test_setup_with_skip_meta(testdir):
copy_nb_to_tempdir("nb_with_skip_meta.ipynb")
result = testdir.runpytest("--nb-test-files", "--setup-plan", "-rs")
result.stdout.fnmatch_lines(
["*test_nb.ipynb*s*", "*I have my reasons*", "*1 skipped*"]
)
def test_run_fail(testdir):
copy_nb_to_tempdir("different_outputs_altered.ipynb")
result = testdir.runpytest(
"--nb-exec-cwd", os.path.join(PATH, "raw_files"), "--nb-test-files", "-v"
)
result.stdout.fnmatch_lines(
["*::nbregression(test_nb) FAILED*", "*CellExecutionError:*"]
)
assert result.ret != 0
def test_run_pass_with_meta(testdir):
copy_nb_to_tempdir("different_outputs_with_metadata.ipynb")
result = testdir.runpytest(
"--nb-exec-cwd", os.path.join(PATH, "raw_files"), "--nb-test-files", "-v"
)
result.stdout.fnmatch_lines(["*::nbregression(test_nb) PASSED*"])
assert result.ret == 0
| true | true |
1c3f4a8ff00b30d0100ea2e67dc64c1c4a865a9a | 662 | py | Python | setup.py | JartC0ding/Encrypto | 1a094b8e657d48d335b1b9a2d419edbd311e1cc9 | [
"Apache-2.0"
] | null | null | null | setup.py | JartC0ding/Encrypto | 1a094b8e657d48d335b1b9a2d419edbd311e1cc9 | [
"Apache-2.0"
] | null | null | null | setup.py | JartC0ding/Encrypto | 1a094b8e657d48d335b1b9a2d419edbd311e1cc9 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup, find_packages
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Education",
"Operating System :: Microsoft :: Windows :: Windows 10",
"License :: Apache License 2.0",
"Programming Language :: Python :: 3"
]
setup(
name="Encrypto",
version="0.0.1",
description="A Encrypt/Decrypt Library",
long_description=open("README.md"),
url="",
author="Moritz Schittenhelm",
author_email="moritz5911@gmail.com",
license="Apache License 2.0",
classifiers=classifiers,
keywords="encryption",
packages=find_packages(),
install_requires=["random.py"]
)
| 26.48 | 59 | 0.676737 | from setuptools import setup, find_packages
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Education",
"Operating System :: Microsoft :: Windows :: Windows 10",
"License :: Apache License 2.0",
"Programming Language :: Python :: 3"
]
setup(
name="Encrypto",
version="0.0.1",
description="A Encrypt/Decrypt Library",
long_description=open("README.md"),
url="",
author="Moritz Schittenhelm",
author_email="moritz5911@gmail.com",
license="Apache License 2.0",
classifiers=classifiers,
keywords="encryption",
packages=find_packages(),
install_requires=["random.py"]
)
| true | true |
1c3f4b0354fb050dc4cc0435b92018ab52be6e22 | 477 | py | Python | wooey/__init__.py | 8dspaces/Wooey-Flask | 44d3ce02474859cdd8d6f1138ba48ce62b739524 | [
"BSD-3-Clause"
] | 1 | 2020-11-05T15:04:33.000Z | 2020-11-05T15:04:33.000Z | wooey/__init__.py | 8dspaces/Wooey-Flask | 44d3ce02474859cdd8d6f1138ba48ce62b739524 | [
"BSD-3-Clause"
] | null | null | null | wooey/__init__.py | 8dspaces/Wooey-Flask | 44d3ce02474859cdd8d6f1138ba48ce62b739524 | [
"BSD-3-Clause"
] | null | null | null | from . import version
import os
if version.DJANGO_VERSION >= version.DJ17:
default_app_config = 'wooey.apps.WooeyConfig'
else:
if os.environ.get('TESTING') != 'True':
from . import settings as wooey_settings
# we need to call from within wooey_settings so the celery/etc vars are setup
if not wooey_settings.settings.configured:
wooey_settings.settings.configure()
from .apps import WooeyConfig
WooeyConfig().ready()
| 36.692308 | 85 | 0.69392 | from . import version
import os
if version.DJANGO_VERSION >= version.DJ17:
default_app_config = 'wooey.apps.WooeyConfig'
else:
if os.environ.get('TESTING') != 'True':
from . import settings as wooey_settings
if not wooey_settings.settings.configured:
wooey_settings.settings.configure()
from .apps import WooeyConfig
WooeyConfig().ready()
| true | true |
1c3f4c8e6de51b62e6abfebe7b9516db38d53f2d | 279 | py | Python | tests/test_run_times/__init__.py | James-Montgomery/platea | 96188d34293d46ddc3f9935fe1349f83f72c13a8 | [
"MIT"
] | null | null | null | tests/test_run_times/__init__.py | James-Montgomery/platea | 96188d34293d46ddc3f9935fe1349f83f72c13a8 | [
"MIT"
] | null | null | null | tests/test_run_times/__init__.py | James-Montgomery/platea | 96188d34293d46ddc3f9935fe1349f83f72c13a8 | [
"MIT"
] | null | null | null | """
Run Times
=====
Run time evaluation of functions in Platea.
"""
# # Example of per line run time diagnostic
# import cProfile
# import platea.random_number_generators as rng
# ran = rng.Ran0(seed=-99999)
# ran_draw = ran.draw
# cProfile.run("ran_draw(100000)", sort="time")
| 21.461538 | 47 | 0.713262 | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.