hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f70742822966dd1f094d39360d4edf203161ebbe
| 2,033
|
py
|
Python
|
efls-console/console/resource/service.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 65
|
2021-09-30T01:54:34.000Z
|
2022-03-26T13:57:15.000Z
|
efls-console/console/resource/service.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 24
|
2021-09-30T09:25:43.000Z
|
2022-03-29T06:33:44.000Z
|
efls-console/console/resource/service.py
|
universe-hcy/Elastic-Federated-Learning-Solution
|
4e047fbbe6ae9809cd631499b7d3a3855dfe2208
|
[
"Apache-2.0"
] | 18
|
2021-09-30T09:04:08.000Z
|
2022-03-31T10:17:27.000Z
|
# -*- coding: utf8 -*-
import json
from console.models import Resource, resource_repo
from console.exceptions import NotFound, AlreadyExist, PermissionDenied
from console.factory import logger
class ResourceService:
resource_repo = resource_repo
def __init__(self, rid: str = None, task_intra_id: str = None, name: str = None):
if rid:
self.resource = self.resource_repo.get(rid)
elif task_intra_id and name:
self.resource = self.resource_repo.filter(task_intra_id=task_intra_id, name=name)
def create_resource(self, owner_id: str, task_intra_id: str, name: str, uri: str, comment: str, config: str):
if self.resource_repo.filter(task_intra_id=task_intra_id, name=name) is not None:
raise AlreadyExist(message='resource does already exist')
self.resource = Resource(owner_id=owner_id, task_intra_id=task_intra_id, name=name, uri=uri, comment=comment,
config=config)
self.resource_repo.insert_or_update(self.resource)
return self.resource
def update_resource(self, request_data: dict) -> Resource:
if self.resource is None:
raise NotFound(message='resource object init failed')
need_update = False
if 'comment' in request_data:
self.resource.comment = request_data['comment']
need_update = True
if 'config' in request_data:
config = json.loads(self.resource.config) if self.resource.config else {}
config.update(request_data['config'])
self.resource.config = json.dumps(config)
need_update = True
if need_update:
self.resource_repo.insert_or_update(self.resource)
return self.resource
def delete_resource(self):
if self.resource is None:
return
self.resource_repo.delete(self.resource)
def get_resource_list(self, task_intra_id: str):
return self.resource_repo.get_all(task_intra_id=task_intra_id)
| 36.303571
| 117
| 0.672897
|
import json
from console.models import Resource, resource_repo
from console.exceptions import NotFound, AlreadyExist, PermissionDenied
from console.factory import logger
class ResourceService:
resource_repo = resource_repo
def __init__(self, rid: str = None, task_intra_id: str = None, name: str = None):
if rid:
self.resource = self.resource_repo.get(rid)
elif task_intra_id and name:
self.resource = self.resource_repo.filter(task_intra_id=task_intra_id, name=name)
def create_resource(self, owner_id: str, task_intra_id: str, name: str, uri: str, comment: str, config: str):
if self.resource_repo.filter(task_intra_id=task_intra_id, name=name) is not None:
raise AlreadyExist(message='resource does already exist')
self.resource = Resource(owner_id=owner_id, task_intra_id=task_intra_id, name=name, uri=uri, comment=comment,
config=config)
self.resource_repo.insert_or_update(self.resource)
return self.resource
def update_resource(self, request_data: dict) -> Resource:
if self.resource is None:
raise NotFound(message='resource object init failed')
need_update = False
if 'comment' in request_data:
self.resource.comment = request_data['comment']
need_update = True
if 'config' in request_data:
config = json.loads(self.resource.config) if self.resource.config else {}
config.update(request_data['config'])
self.resource.config = json.dumps(config)
need_update = True
if need_update:
self.resource_repo.insert_or_update(self.resource)
return self.resource
def delete_resource(self):
if self.resource is None:
return
self.resource_repo.delete(self.resource)
def get_resource_list(self, task_intra_id: str):
return self.resource_repo.get_all(task_intra_id=task_intra_id)
| true
| true
|
f70743d48387633092e6468a3ee10f04cbf592ae
| 3,799
|
py
|
Python
|
simple-harmonic-oscillator/gui/sho.py
|
guiltygyoza/rk4-starknet
|
76d3205a9c480be902066704abd8a2e0ff8ac7e9
|
[
"MIT"
] | 18
|
2021-10-17T01:29:25.000Z
|
2022-02-04T03:33:03.000Z
|
simple-harmonic-oscillator/gui/sho.py
|
HZ2078/rk4-starknet
|
76d3205a9c480be902066704abd8a2e0ff8ac7e9
|
[
"MIT"
] | null | null | null |
simple-harmonic-oscillator/gui/sho.py
|
HZ2078/rk4-starknet
|
76d3205a9c480be902066704abd8a2e0ff8ac7e9
|
[
"MIT"
] | 4
|
2021-11-25T21:57:55.000Z
|
2022-03-29T03:17:46.000Z
|
import pygame, sys
import numpy as np
import subprocess
import time
import json
from timeit import default_timer as timer
# text box initialization
def update_message (message):
font = pygame.font.Font(None, 24)
text = font.render(message, 1, (0, 0, 0))
text_rect = text.get_rect(center =(WIDTH / 2, HEIGHT-50))
screen.fill ((255,255,255), (0, HEIGHT-100, WIDTH, 100))
screen.blit(text, text_rect)
pygame.display.update()
# utility function to execute shell command and parse result
def subprocess_run (cmd):
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8')[:-1] # remove trailing newline
return result
# redraw the screen and the mass at new x location
def update_figures(ball_x):
screen.fill (BG_COLOR, (0, 0, WIDTH, HEIGHT-100)) # reset screen first
# draw the point mass
pygame.draw.circle(
screen,
BALL_COLOR,
(ball_x, BALL_Y), # center coordinate
BALL_RADIUS,
0 # fill the circle
)
# draw the spring for fun
spring_circle_first_x = SPRING_CIRCLE_RADIUS
spring_circle_last_x = ball_x - BALL_RADIUS - SPRING_CIRCLE_RADIUS
spring_circle_distance = (spring_circle_last_x - spring_circle_first_x)/(N_SPRING_CIRCLE-1)
spring_circle_x = spring_circle_first_x
for i in range(N_SPRING_CIRCLE):
pygame.draw.circle(
screen,
SPRING_COLOR,
(spring_circle_x, BALL_Y), # center coordinate
SPRING_CIRCLE_RADIUS,
1
)
spring_circle_x += spring_circle_distance
pygame.display.update()
# scene setup
WIDTH = 600
HEIGHT = 600+100
BALL_RADIUS = 30
SPRING_CIRCLE_RADIUS = 10
N_SPRING_CIRCLE = 20
MESSAGE = "Simple Harmonic Oscillator on StarkNet LFG"
BG_COLOR = (25, 25, 112)
BALL_COLOR = (239, 231, 200)
SPRING_COLOR = (239, 231, 200)
BALL_X_OFFSET = 300
BALL_Y = 300
# contract setup
CONTRACT_ADDRESS = '0x3280705f884bb08c0fd6c53f67e51d1b06c8118397f68234072a78a63b13c9c'
SCALE_FP = 10000 # for fixed-point arithmetic
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
PRIME_HALF = PRIME//2
pygame.init()
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'SHO' )
screen.fill( BG_COLOR )
AMPLITUDE = 100
SCALE_X = (WIDTH/2.-BALL_RADIUS*2)/100
x = AMPLITUDE
xd = 0 # stationary at start
t_fp = int( 0 * SCALE_FP )
dt_fp = int( 0.02 * SCALE_FP )
x_fp = int( x * SCALE_FP )
xd_fp = int( xd * SCALE_FP )
update_figures(ball_x = BALL_X_OFFSET + x*SCALE_X)
update_message(MESSAGE)
while True:
# retrieve N sample from contract
N = 100
print(f'> Begin retrieval of {N} coordinates from StarkNet rk4 integrator.')
x_fp_s = [x_fp]
xd_fp_s = [xd_fp]
for i in range(N):
# sending side must mod P to send only positive felt values; receiving side could receive negative value
x_fp = x_fp if x_fp>=0 else x_fp+PRIME
xd_fp = xd_fp if xd_fp>=0 else xd_fp+PRIME
cmd = f"starknet call --network=alpha --address {CONTRACT_ADDRESS} --abi sho_contract_abi.json " + \
f"--function query_next_given_coordinates --inputs {t_fp} {dt_fp} {x_fp} {xd_fp}"
cmd = cmd.split(' ')
result = subprocess_run(cmd)
result = result.split(' ')
x_fp = int(result[0])
xd_fp = int(result[1])
t_fp += dt_fp
x_fp_s.append( x_fp )
xd_fp_s.append( xd_fp )
print(f'> {i+1}th/{N} coordinate retrieved from StarkNet rk4 integrator.')
print()
x_s = [x_fp/SCALE_FP for x_fp in x_fp_s]
print('> printing all retrieved coordinates: {x_s}\n')
# saving the last coordinate for next loop
x_fp = x_fp_s[-1]
xd_fp = xd_fp_s[-1]
# render animation from retrieved coordinates
print('>>> Begin animation rendering.')
for i in range(N):
update_figures(ball_x = BALL_X_OFFSET + x_s[i]*SCALE_X)
update_message(MESSAGE)
time.sleep(0.05)
# check for quit() event
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
| 26.943262
| 106
| 0.733088
|
import pygame, sys
import numpy as np
import subprocess
import time
import json
from timeit import default_timer as timer
def update_message (message):
font = pygame.font.Font(None, 24)
text = font.render(message, 1, (0, 0, 0))
text_rect = text.get_rect(center =(WIDTH / 2, HEIGHT-50))
screen.fill ((255,255,255), (0, HEIGHT-100, WIDTH, 100))
screen.blit(text, text_rect)
pygame.display.update()
def subprocess_run (cmd):
result = subprocess.run(cmd, stdout=subprocess.PIPE)
result = result.stdout.decode('utf-8')[:-1]
return result
def update_figures(ball_x):
screen.fill (BG_COLOR, (0, 0, WIDTH, HEIGHT-100))
pygame.draw.circle(
screen,
BALL_COLOR,
(ball_x, BALL_Y),
BALL_RADIUS,
0
)
spring_circle_first_x = SPRING_CIRCLE_RADIUS
spring_circle_last_x = ball_x - BALL_RADIUS - SPRING_CIRCLE_RADIUS
spring_circle_distance = (spring_circle_last_x - spring_circle_first_x)/(N_SPRING_CIRCLE-1)
spring_circle_x = spring_circle_first_x
for i in range(N_SPRING_CIRCLE):
pygame.draw.circle(
screen,
SPRING_COLOR,
(spring_circle_x, BALL_Y),
SPRING_CIRCLE_RADIUS,
1
)
spring_circle_x += spring_circle_distance
pygame.display.update()
WIDTH = 600
HEIGHT = 600+100
BALL_RADIUS = 30
SPRING_CIRCLE_RADIUS = 10
N_SPRING_CIRCLE = 20
MESSAGE = "Simple Harmonic Oscillator on StarkNet LFG"
BG_COLOR = (25, 25, 112)
BALL_COLOR = (239, 231, 200)
SPRING_COLOR = (239, 231, 200)
BALL_X_OFFSET = 300
BALL_Y = 300
CONTRACT_ADDRESS = '0x3280705f884bb08c0fd6c53f67e51d1b06c8118397f68234072a78a63b13c9c'
SCALE_FP = 10000
PRIME = 3618502788666131213697322783095070105623107215331596699973092056135872020481
PRIME_HALF = PRIME//2
pygame.init()
screen = pygame.display.set_mode( (WIDTH, HEIGHT) )
pygame.display.set_caption( 'SHO' )
screen.fill( BG_COLOR )
AMPLITUDE = 100
SCALE_X = (WIDTH/2.-BALL_RADIUS*2)/100
x = AMPLITUDE
xd = 0
t_fp = int( 0 * SCALE_FP )
dt_fp = int( 0.02 * SCALE_FP )
x_fp = int( x * SCALE_FP )
xd_fp = int( xd * SCALE_FP )
update_figures(ball_x = BALL_X_OFFSET + x*SCALE_X)
update_message(MESSAGE)
while True:
N = 100
print(f'> Begin retrieval of {N} coordinates from StarkNet rk4 integrator.')
x_fp_s = [x_fp]
xd_fp_s = [xd_fp]
for i in range(N):
x_fp = x_fp if x_fp>=0 else x_fp+PRIME
xd_fp = xd_fp if xd_fp>=0 else xd_fp+PRIME
cmd = f"starknet call --network=alpha --address {CONTRACT_ADDRESS} --abi sho_contract_abi.json " + \
f"--function query_next_given_coordinates --inputs {t_fp} {dt_fp} {x_fp} {xd_fp}"
cmd = cmd.split(' ')
result = subprocess_run(cmd)
result = result.split(' ')
x_fp = int(result[0])
xd_fp = int(result[1])
t_fp += dt_fp
x_fp_s.append( x_fp )
xd_fp_s.append( xd_fp )
print(f'> {i+1}th/{N} coordinate retrieved from StarkNet rk4 integrator.')
print()
x_s = [x_fp/SCALE_FP for x_fp in x_fp_s]
print('> printing all retrieved coordinates: {x_s}\n')
x_fp = x_fp_s[-1]
xd_fp = xd_fp_s[-1]
print('>>> Begin animation rendering.')
for i in range(N):
update_figures(ball_x = BALL_X_OFFSET + x_s[i]*SCALE_X)
update_message(MESSAGE)
time.sleep(0.05)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
| true
| true
|
f707449b00cb36d59a800350ebf6712735bf5f72
| 122
|
py
|
Python
|
DST/datasets/__init__.py
|
DunZhang/DomainSpecificThesaurus
|
539dcdbe618ade1864e56423667f28afb800e1e1
|
[
"MIT"
] | 13
|
2019-02-21T23:16:55.000Z
|
2022-03-03T16:27:24.000Z
|
DST/datasets/__init__.py
|
EzioQR/DomainSpecificThesaurus
|
a7233b039e8e0d1250b9fc1ebe147deb02efb502
|
[
"MIT"
] | 3
|
2021-04-28T11:49:20.000Z
|
2022-02-25T17:31:06.000Z
|
DST/datasets/__init__.py
|
EzioQR/DomainSpecificThesaurus
|
a7233b039e8e0d1250b9fc1ebe147deb02efb502
|
[
"MIT"
] | 6
|
2019-02-23T16:01:36.000Z
|
2021-09-05T10:27:54.000Z
|
from ..datasets import CleanData,DownloadData
# from .CleanData import cleanEngXml,cleanMathXml,CleanDataWiki,CleanDataSO
| 40.666667
| 75
| 0.860656
|
from ..datasets import CleanData,DownloadData
| true
| true
|
f70745fb4433febb663cfc4f6924bdf4b71efdcf
| 359
|
py
|
Python
|
app.py
|
bjongbloedt/menu
|
800b2045a28d1b3937ebefdda0b7a2ad45c09bb6
|
[
"MIT"
] | null | null | null |
app.py
|
bjongbloedt/menu
|
800b2045a28d1b3937ebefdda0b7a2ad45c09bb6
|
[
"MIT"
] | 1
|
2021-06-01T21:44:43.000Z
|
2021-06-01T21:44:43.000Z
|
app.py
|
bjongbloedt/menu
|
800b2045a28d1b3937ebefdda0b7a2ad45c09bb6
|
[
"MIT"
] | null | null | null |
from apistar.backends import sqlalchemy_backend
from apistar.frameworks.wsgi import WSGIApp as App
from project.routes import routes
from project.settings import settings
app = App(
routes=routes,
settings=settings,
commands=sqlalchemy_backend.commands,
components=sqlalchemy_backend.components
)
if __name__ == '__main__':
app.main()
| 21.117647
| 50
| 0.777159
|
from apistar.backends import sqlalchemy_backend
from apistar.frameworks.wsgi import WSGIApp as App
from project.routes import routes
from project.settings import settings
app = App(
routes=routes,
settings=settings,
commands=sqlalchemy_backend.commands,
components=sqlalchemy_backend.components
)
if __name__ == '__main__':
app.main()
| true
| true
|
f707466d66d09b7bd0ac6ab4c68d70ddf32d14c9
| 6,794
|
py
|
Python
|
perfkitbenchmarker/providers/aws/flags.py
|
dongbinghua/PerfKitBenchmarker
|
d3424af4b4d60b4a5c19009b8aee29ceab7132d4
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/providers/aws/flags.py
|
dongbinghua/PerfKitBenchmarker
|
d3424af4b4d60b4a5c19009b8aee29ceab7132d4
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/providers/aws/flags.py
|
dongbinghua/PerfKitBenchmarker
|
d3424af4b4d60b4a5c19009b8aee29ceab7132d4
|
[
"Apache-2.0"
] | 1
|
2022-02-20T14:46:56.000Z
|
2022-02-20T14:46:56.000Z
|
# Copyright 2015 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing flags applicable across benchmark run on AWS."""
from absl import flags
from perfkitbenchmarker.providers.aws import util
flags.DEFINE_string(
'aws_user_name', '', 'This determines the user name that Perfkit will '
'attempt to use. Defaults are OS specific.')
flags.DEFINE_integer('aws_provisioned_iops', None,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('aws_provisioned_throughput', None,
'Provisioned throughput (MB/s) for (SSD) volumes in AWS.')
flags.DEFINE_string('aws_dax_node_type', 'dax.r4.large',
'The node type used for creating AWS DAX cluster.')
flags.DEFINE_integer('aws_dax_replication_factor', 3,
'The replication factor of AWS DAX cluster.')
flags.DEFINE_string('aws_emr_loguri', None,
'The log-uri parameter to pass to AWS when creating a '
'cluster. If not set, a bucket will be created.')
flags.DEFINE_integer('aws_emr_job_wait_time', 18000,
'The time to wait for an EMR job to finish, in seconds')
flags.DEFINE_boolean('aws_spot_instances', False,
'Whether to use AWS spot instances for any AWS VMs.')
flags.DEFINE_float('aws_spot_price', None,
'The spot price to bid for AWS spot instances. Defaults '
'to on-demand price when left as None.')
flags.DEFINE_enum('aws_spot_block_duration_minutes', None,
['60', '120', '180', '240', '300', '360'], 'The required '
'duration for the Spot Instances (also known as Spot blocks),'
' in minutes. This value must be a multiple of 60.')
flags.DEFINE_integer('aws_boot_disk_size', None,
'The boot disk size in GiB for AWS VMs.')
flags.DEFINE_string('kops', 'kops',
'The path to the kops binary.')
flags.DEFINE_string('aws_image_name_filter', None,
'The filter to use when searching for an image for a VM. '
'See usage details in aws_virtual_machine.py around '
'IMAGE_NAME_FILTER.')
flags.DEFINE_string('aws_image_name_regex', None,
'The Python regex to use to further filter images for a '
'VM. This applies after the aws_image_name_filter. See '
'usage details in aws_virtual_machine.py around '
'IMAGE_NAME_REGEX.')
flags.DEFINE_string('aws_preprovisioned_data_bucket', None,
'AWS bucket where pre-provisioned data has been copied.')
flags.DEFINE_string('cache_node_type',
'cache.m4.large',
'The AWS cache node type to use for elasticache clusters.')
flags.DEFINE_string('aws_elasticache_failover_zone',
None,
'AWS elasticache failover zone')
flags.DEFINE_string('aws_efs_token', None,
'The creation token used to create the EFS resource. '
'If the file system already exists, it will use that '
'instead of creating a new one.')
flags.DEFINE_boolean('aws_delete_file_system', True,
'Whether to delete the EFS file system.')
flags.DEFINE_list('eks_zones', [],
'DEPRECATED: Set container_cluster.vm_spec.AWS.zone instead.'
'The single region or multiple zones into which the EKS '
'cluster will be deployed. If a region is passed zones will '
'be decided by EKS. All zones must be from the same region.')
flags.register_validator('eks_zones',
util.EksZonesValidator)
flags.DEFINE_enum('efs_throughput_mode', 'provisioned',
['provisioned', 'bursting'],
'The throughput mode to use for EFS.')
flags.DEFINE_float('efs_provisioned_throughput', 1024.0,
'The throughput limit of EFS (in MiB/s) when run in '
'provisioned mode.')
flags.DEFINE_boolean('provision_athena', False,
'Whether to provision the Athena database.')
flags.DEFINE_boolean('teardown_athena', True,
'Whether to teardown the Athena database.')
flags.DEFINE_string(
'athena_output_location_prefix', 'athena-cli-results',
'Prefix of the S3 bucket name for Athena Query Output. Suffix will be the '
'region and the run URI, and the bucket will be dynamically created and '
'deleted during the test.')
flags.DEFINE_string('eksctl', 'eksctl', 'Path to eksctl.')
flags.DEFINE_enum('redshift_client_interface', 'JDBC', ['JDBC'],
'The Runtime Interface used when interacting with Redshift.')
flags.DEFINE_enum('athena_client_interface', 'JAVA', ['JAVA'],
'The Runtime Interface used when interacting with Athena.')
flags.DEFINE_string('athena_query_timeout', '600', 'Query timeout in seconds.')
flags.DEFINE_string('athena_workgroup', '',
'Use athena workgroup to separate applications and choose '
'execution configuration like the engine version.')
flags.DEFINE_boolean(
'athena_metrics_collection', False,
'Should the cloud watch metrics be collected for Athena query executions.')
flags.DEFINE_boolean(
'athena_workgroup_delete', True,
'Should the dedicated athena workgroups be deleted or kept alive for investigations.'
)
flags.DEFINE_enum('aws_credit_specification', None,
['CpuCredits=unlimited', 'CpuCredits=standard'],
'Credit specification for burstable vms.')
flags.DEFINE_boolean('aws_vm_hibernate', False,
'Whether to hibernate(suspend) an aws vm'
'instance.')
flags.DEFINE_string(
'aws_glue_crawler_role', None,
"Role's ARN to be used by the crawler. Must have policies that grant "
'permission for using AWS Glue and read access to S3.')
flags.DEFINE_integer(
'aws_glue_crawler_sample_size', None,
'Sets how many files will be crawled in each leaf directory. If left '
'unset, all the files will be crawled. May range from 1 to 249.',
1, 249
)
| 53.920635
| 89
| 0.656609
|
from absl import flags
from perfkitbenchmarker.providers.aws import util
flags.DEFINE_string(
'aws_user_name', '', 'This determines the user name that Perfkit will '
'attempt to use. Defaults are OS specific.')
flags.DEFINE_integer('aws_provisioned_iops', None,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('aws_provisioned_throughput', None,
'Provisioned throughput (MB/s) for (SSD) volumes in AWS.')
flags.DEFINE_string('aws_dax_node_type', 'dax.r4.large',
'The node type used for creating AWS DAX cluster.')
flags.DEFINE_integer('aws_dax_replication_factor', 3,
'The replication factor of AWS DAX cluster.')
flags.DEFINE_string('aws_emr_loguri', None,
'The log-uri parameter to pass to AWS when creating a '
'cluster. If not set, a bucket will be created.')
flags.DEFINE_integer('aws_emr_job_wait_time', 18000,
'The time to wait for an EMR job to finish, in seconds')
flags.DEFINE_boolean('aws_spot_instances', False,
'Whether to use AWS spot instances for any AWS VMs.')
flags.DEFINE_float('aws_spot_price', None,
'The spot price to bid for AWS spot instances. Defaults '
'to on-demand price when left as None.')
flags.DEFINE_enum('aws_spot_block_duration_minutes', None,
['60', '120', '180', '240', '300', '360'], 'The required '
'duration for the Spot Instances (also known as Spot blocks),'
' in minutes. This value must be a multiple of 60.')
flags.DEFINE_integer('aws_boot_disk_size', None,
'The boot disk size in GiB for AWS VMs.')
flags.DEFINE_string('kops', 'kops',
'The path to the kops binary.')
flags.DEFINE_string('aws_image_name_filter', None,
'The filter to use when searching for an image for a VM. '
'See usage details in aws_virtual_machine.py around '
'IMAGE_NAME_FILTER.')
flags.DEFINE_string('aws_image_name_regex', None,
'The Python regex to use to further filter images for a '
'VM. This applies after the aws_image_name_filter. See '
'usage details in aws_virtual_machine.py around '
'IMAGE_NAME_REGEX.')
flags.DEFINE_string('aws_preprovisioned_data_bucket', None,
'AWS bucket where pre-provisioned data has been copied.')
flags.DEFINE_string('cache_node_type',
'cache.m4.large',
'The AWS cache node type to use for elasticache clusters.')
flags.DEFINE_string('aws_elasticache_failover_zone',
None,
'AWS elasticache failover zone')
flags.DEFINE_string('aws_efs_token', None,
'The creation token used to create the EFS resource. '
'If the file system already exists, it will use that '
'instead of creating a new one.')
flags.DEFINE_boolean('aws_delete_file_system', True,
'Whether to delete the EFS file system.')
flags.DEFINE_list('eks_zones', [],
'DEPRECATED: Set container_cluster.vm_spec.AWS.zone instead.'
'The single region or multiple zones into which the EKS '
'cluster will be deployed. If a region is passed zones will '
'be decided by EKS. All zones must be from the same region.')
flags.register_validator('eks_zones',
util.EksZonesValidator)
flags.DEFINE_enum('efs_throughput_mode', 'provisioned',
['provisioned', 'bursting'],
'The throughput mode to use for EFS.')
flags.DEFINE_float('efs_provisioned_throughput', 1024.0,
'The throughput limit of EFS (in MiB/s) when run in '
'provisioned mode.')
flags.DEFINE_boolean('provision_athena', False,
'Whether to provision the Athena database.')
flags.DEFINE_boolean('teardown_athena', True,
'Whether to teardown the Athena database.')
flags.DEFINE_string(
'athena_output_location_prefix', 'athena-cli-results',
'Prefix of the S3 bucket name for Athena Query Output. Suffix will be the '
'region and the run URI, and the bucket will be dynamically created and '
'deleted during the test.')
flags.DEFINE_string('eksctl', 'eksctl', 'Path to eksctl.')
flags.DEFINE_enum('redshift_client_interface', 'JDBC', ['JDBC'],
'The Runtime Interface used when interacting with Redshift.')
flags.DEFINE_enum('athena_client_interface', 'JAVA', ['JAVA'],
'The Runtime Interface used when interacting with Athena.')
flags.DEFINE_string('athena_query_timeout', '600', 'Query timeout in seconds.')
flags.DEFINE_string('athena_workgroup', '',
'Use athena workgroup to separate applications and choose '
'execution configuration like the engine version.')
flags.DEFINE_boolean(
'athena_metrics_collection', False,
'Should the cloud watch metrics be collected for Athena query executions.')
flags.DEFINE_boolean(
'athena_workgroup_delete', True,
'Should the dedicated athena workgroups be deleted or kept alive for investigations.'
)
flags.DEFINE_enum('aws_credit_specification', None,
['CpuCredits=unlimited', 'CpuCredits=standard'],
'Credit specification for burstable vms.')
flags.DEFINE_boolean('aws_vm_hibernate', False,
'Whether to hibernate(suspend) an aws vm'
'instance.')
flags.DEFINE_string(
'aws_glue_crawler_role', None,
"Role's ARN to be used by the crawler. Must have policies that grant "
'permission for using AWS Glue and read access to S3.')
flags.DEFINE_integer(
'aws_glue_crawler_sample_size', None,
'Sets how many files will be crawled in each leaf directory. If left '
'unset, all the files will be crawled. May range from 1 to 249.',
1, 249
)
| true
| true
|
f70746b84258f285a896a73614aec7d279d5502b
| 145
|
py
|
Python
|
pedal/__main__.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 14
|
2019-08-22T03:40:23.000Z
|
2022-03-13T00:30:53.000Z
|
pedal/__main__.py
|
pedal-edu/pedal
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 74
|
2019-09-12T04:35:56.000Z
|
2022-01-26T19:21:32.000Z
|
pedal/__main__.py
|
acbart/python-analysis
|
3cd2cc22d50a414ae6b62c74d2643be4742238d4
|
[
"MIT"
] | 2
|
2021-01-11T06:34:00.000Z
|
2021-07-21T12:48:07.000Z
|
"""
Runs pedal as a toplevel module
"""
import sys
from pedal.command_line.command_line import parse_args, main
args = parse_args()
main(args)
| 14.5
| 60
| 0.758621
|
import sys
from pedal.command_line.command_line import parse_args, main
args = parse_args()
main(args)
| true
| true
|
f70746e2a0f4f5440fe577c9e2560ccce6a8d8da
| 1,709
|
py
|
Python
|
app/core/migrations/0001_initial.py
|
said017/satucan-app-api
|
cc4973aef3eeac2fd91e6b871480b6c0c5550d90
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
said017/satucan-app-api
|
cc4973aef3eeac2fd91e6b871480b6c0c5550d90
|
[
"MIT"
] | null | null | null |
app/core/migrations/0001_initial.py
|
said017/satucan-app-api
|
cc4973aef3eeac2fd91e6b871480b6c0c5550d90
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2020-04-21 12:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.264706
| 266
| 0.63897
|
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| true
| true
|
f70746e6dd5575877a660963cdcf29b89a4f5106
| 1,438
|
py
|
Python
|
pcat2py/class/2301f556-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/2301f556-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
pcat2py/class/2301f556-5cc5-11e4-af55-00155d01fe08.py
|
phnomcobra/PCAT2PY
|
937c3b365cdc5ac69b78f59070be0a21bdb53db0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
################################################################################
# 2301f556-5cc5-11e4-af55-00155d01fe08
#
# Justin Dierking
# justindierking@hardbitsolutions.com
# phnomcobra@gmail.com
#
# 10/24/2014 Original Construction
################################################################################
class Finding:
def __init__(self):
self.output = []
self.is_compliant = False
self.uuid = "2301f556-5cc5-11e4-af55-00155d01fe08"
def check(self, cli):
# Initialize Compliance
self.is_compliant = False
# Get Registry DWORD
dword = cli.get_reg_dword(r'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main', 'EnableAutoUpgrade')
# Output Lines
self.output = [r'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main', ('EnableAutoUpgrade=' + str(dword))]
if dword == 0:
self.is_compliant = True
return self.is_compliant
def fix(self, cli):
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer'")
cli.powershell(r"New-Item -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main'")
cli.powershell(r"Set-ItemProperty -path 'HKLM:\Software\Policies\Microsoft\Internet Explorer\Main' -name 'EnableAutoUpgrade' -value 0 -Type DWord")
| 37.842105
| 155
| 0.605007
| true
| true
|
|
f707477c13e4afcc374ebc5e785cec3cea21660d
| 3,708
|
py
|
Python
|
lib/galaxy/tool_util/cwl/cwltool_deps.py
|
patrice-dehais/galaxy
|
63ab54651ae2a65bd656b10302213085237a3b86
|
[
"CC-BY-3.0"
] | null | null | null |
lib/galaxy/tool_util/cwl/cwltool_deps.py
|
patrice-dehais/galaxy
|
63ab54651ae2a65bd656b10302213085237a3b86
|
[
"CC-BY-3.0"
] | 2
|
2017-05-18T16:12:55.000Z
|
2022-03-08T12:08:43.000Z
|
lib/galaxy/tool_util/cwl/cwltool_deps.py
|
patrice-dehais/galaxy
|
63ab54651ae2a65bd656b10302213085237a3b86
|
[
"CC-BY-3.0"
] | null | null | null |
"""Logic for dealing with cwltool as an optional dependency.
Use this as the import interface for cwltool and just call
:func:`ensure_cwltool_available` before using any of the imported
functionality at runtime.
"""
import re
import warnings
warnings.filterwarnings("ignore", message=r"[\n.]DEPRECATION: Python 2", module="cwltool")
import requests
try:
from cwltool import (
job,
main,
pathmapper,
process,
workflow,
)
except ImportError:
main = None # type: ignore[assignment]
workflow = None # type: ignore[assignment]
job = None # type: ignore[assignment]
process = None # type: ignore[assignment]
pathmapper = None # type: ignore[assignment]
try:
from cwltool.context import (
getdefault,
LoadingContext,
RuntimeContext,
)
from cwltool.job import relink_initialworkdir
from cwltool.stdfsaccess import StdFsAccess
except ImportError:
getdefault = None # type: ignore[assignment]
LoadingContext = None # type: ignore[assignment,misc]
relink_initialworkdir = None # type: ignore[assignment]
RuntimeContext = None # type: ignore[assignment,misc]
StdFsAccess = None # type: ignore[assignment,misc]
try:
from cwltool import load_tool
from cwltool.load_tool import (
default_loader,
resolve_and_validate_document,
)
except ImportError:
default_loader = None # type: ignore[assignment]
load_tool = None # type: ignore[assignment]
resolve_and_validate_document = None # type: ignore[assignment]
try:
from cwltool import command_line_tool
except ImportError:
command_line_tool = None # type: ignore[assignment]
try:
from cwltool.load_tool import resolve_and_validate_document
except ImportError:
resolve_and_validate_document = None # type: ignore[assignment]
try:
import shellescape
except ImportError:
shellescape = None
try:
import schema_salad
from schema_salad import (
ref_resolver,
sourceline,
)
except ImportError:
schema_salad = None # type: ignore[assignment]
ref_resolver = None # type: ignore[assignment]
sourceline = None # type: ignore[assignment]
needs_shell_quoting = re.compile(r"""(^$|[\s|&;()<>\'"$@])""").search
# if set to True, file format checking is not performed.
beta_relaxed_fmt_check = True
def ensure_cwltool_available():
"""Assert optional dependencies proxied via this module are available at runtime.
Throw an ImportError with a description of the problem if they do not exist.
"""
if main is None or workflow is None or shellescape is None:
message = "This feature requires cwltool and dependencies to be available, they are not."
if main is None:
message += " cwltool is not unavailable."
elif resolve_and_validate_document is None:
message += " cwltool.load_tool.resolve_and_validate_document is unavailable - cwltool version is too old."
if requests is None:
message += " Library 'requests' unavailable."
if shellescape is None:
message += " Library 'shellescape' unavailable."
if schema_salad is None:
message += " Library 'schema_salad' unavailable."
raise ImportError(message)
__all__ = (
"default_loader",
"ensure_cwltool_available",
"getdefault",
"load_tool",
"LoadingContext",
"main",
"needs_shell_quoting",
"pathmapper",
"process",
"ref_resolver",
"relink_initialworkdir",
"resolve_and_validate_document",
"RuntimeContext",
"schema_salad",
"shellescape",
"sourceline",
"StdFsAccess",
"workflow",
)
| 28.96875
| 118
| 0.685275
|
import re
import warnings
warnings.filterwarnings("ignore", message=r"[\n.]DEPRECATION: Python 2", module="cwltool")
import requests
try:
from cwltool import (
job,
main,
pathmapper,
process,
workflow,
)
except ImportError:
main = None
workflow = None
job = None
process = None
pathmapper = None
try:
from cwltool.context import (
getdefault,
LoadingContext,
RuntimeContext,
)
from cwltool.job import relink_initialworkdir
from cwltool.stdfsaccess import StdFsAccess
except ImportError:
getdefault = None
LoadingContext = None
relink_initialworkdir = None
RuntimeContext = None
StdFsAccess = None
try:
from cwltool import load_tool
from cwltool.load_tool import (
default_loader,
resolve_and_validate_document,
)
except ImportError:
default_loader = None
load_tool = None
resolve_and_validate_document = None
try:
from cwltool import command_line_tool
except ImportError:
command_line_tool = None
try:
from cwltool.load_tool import resolve_and_validate_document
except ImportError:
resolve_and_validate_document = None
try:
import shellescape
except ImportError:
shellescape = None
try:
import schema_salad
from schema_salad import (
ref_resolver,
sourceline,
)
except ImportError:
schema_salad = None
ref_resolver = None
sourceline = None
needs_shell_quoting = re.compile(r"""(^$|[\s|&;()<>\'"$@])""").search
# if set to True, file format checking is not performed.
beta_relaxed_fmt_check = True
def ensure_cwltool_available():
if main is None or workflow is None or shellescape is None:
message = "This feature requires cwltool and dependencies to be available, they are not."
if main is None:
message += " cwltool is not unavailable."
elif resolve_and_validate_document is None:
message += " cwltool.load_tool.resolve_and_validate_document is unavailable - cwltool version is too old."
if requests is None:
message += " Library 'requests' unavailable."
if shellescape is None:
message += " Library 'shellescape' unavailable."
if schema_salad is None:
message += " Library 'schema_salad' unavailable."
raise ImportError(message)
__all__ = (
"default_loader",
"ensure_cwltool_available",
"getdefault",
"load_tool",
"LoadingContext",
"main",
"needs_shell_quoting",
"pathmapper",
"process",
"ref_resolver",
"relink_initialworkdir",
"resolve_and_validate_document",
"RuntimeContext",
"schema_salad",
"shellescape",
"sourceline",
"StdFsAccess",
"workflow",
)
| true
| true
|
f707493cff486a675ac1a9a45c41170b717423f0
| 1,004
|
py
|
Python
|
polls/models.py
|
ibowditch/polls
|
1428b87bf3a83c40083b0e248c1718a89078f11f
|
[
"MIT"
] | null | null | null |
polls/models.py
|
ibowditch/polls
|
1428b87bf3a83c40083b0e248c1718a89078f11f
|
[
"MIT"
] | 1
|
2021-06-10T21:51:24.000Z
|
2021-06-10T21:51:24.000Z
|
polls/models.py
|
ibowditch/polls
|
1428b87bf3a83c40083b0e248c1718a89078f11f
|
[
"MIT"
] | null | null | null |
import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
# def was_published_recently(self):
# now = timezone.now()
# return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| 33.466667
| 73
| 0.721116
|
import datetime
from django.db import models
from django.utils import timezone
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
def __str__(self):
return self.question_text
def was_published_recently(self):
now = timezone.now()
return now - datetime.timedelta(days=1) <= self.pub_date <= now
was_published_recently.admin_order_field = 'pub_date'
was_published_recently.boolean = True
was_published_recently.short_description = 'Published recently?'
class Choice(models.Model):
question = models.ForeignKey(Question, on_delete=models.CASCADE)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
| true
| true
|
f707494e82d8073d9df728440f3917ae002d3325
| 380
|
py
|
Python
|
tests/plugins/test_ard_live.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 10
|
2017-04-10T18:25:41.000Z
|
2021-09-15T20:14:58.000Z
|
tests/plugins/test_ard_live.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 9
|
2020-04-04T09:49:52.000Z
|
2020-04-21T01:52:02.000Z
|
tests/plugins/test_ard_live.py
|
xcgx/streamlink
|
b635e0d9d0fe9363817a96ec7d31faefed95cb57
|
[
"BSD-2-Clause"
] | 12
|
2022-01-30T23:34:18.000Z
|
2022-03-26T17:09:43.000Z
|
from streamlink.plugins.ard_live import ARDLive
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlARDLive(PluginCanHandleUrl):
__plugin__ = ARDLive
should_match = [
'https://daserste.de/live/index.html',
'https://www.daserste.de/live/index.html',
]
should_not_match = [
'http://mediathek.daserste.de/live',
]
| 23.75
| 56
| 0.697368
|
from streamlink.plugins.ard_live import ARDLive
from tests.plugins import PluginCanHandleUrl
class TestPluginCanHandleUrlARDLive(PluginCanHandleUrl):
__plugin__ = ARDLive
should_match = [
'https://daserste.de/live/index.html',
'https://www.daserste.de/live/index.html',
]
should_not_match = [
'http://mediathek.daserste.de/live',
]
| true
| true
|
f70749ba117cc3eba7f524a371e92fe3e669750b
| 9,195
|
py
|
Python
|
gdc.py
|
mkol5222/ShiftLeftTraining-ImageScan
|
2ee75cd3637c6a3c0c4f6c8a7a17088b299e903d
|
[
"Apache-2.0"
] | null | null | null |
gdc.py
|
mkol5222/ShiftLeftTraining-ImageScan
|
2ee75cd3637c6a3c0c4f6c8a7a17088b299e903d
|
[
"Apache-2.0"
] | null | null | null |
gdc.py
|
mkol5222/ShiftLeftTraining-ImageScan
|
2ee75cd3637c6a3c0c4f6c8a7a17088b299e903d
|
[
"Apache-2.0"
] | 1
|
2022-03-16T08:59:42.000Z
|
2022-03-16T08:59:42.000Z
|
#!/usr/bin/python3
import sys
import json
import os
import getopt
import ipaddress
import uuid
ip = ''
function = ''
gdc = None
description = ''
listofips = None
if len(sys.argv) <= 4:
print("Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -d <GDC_Description>")
print("")
print("This simple tool will update a JSON file with ip addresses (v4 and v6) used with the Generic Data Center Objects as described in SK167210 for R81.")
print("Examples:")
print("Add a new IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -i 10.2.0.1")
print("")
print("Add a new IP addresses to a Generic Data Center to an existing JSON file from a list of ip's")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -l listofip_address.txt")
print("")
print("Delete an IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f DelIP -i 10.2.0.1")
print("")
print("Add a new Generic Data Center to an existing JSON file. IP address must be included.")
print("gdc.py -g GDC_LIST_New -j gdc.json -f AddGDC -d GDC_LIST_NEW_Description -i 10.2.0.1")
print("")
print("Delete a Generic Data Center in an existing JSON file. ")
print("gdc.py -g GDC_LIST_New -j gdc.json -f DelGDC")
print("")
exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:],"g:j:f:i:d:l:", ['gdc=','function=','ip=','desc=','listofips' 'help'])
except getopt.GetoptError:
print('Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit()
elif opt in ("-g", "--gdc"):
gdc = arg
elif opt in ("-f", "--function"):
function = arg
elif opt in ("-j", "--json"):
jsonfile = arg
elif opt in ('-i', '--ip'):
ip = arg
elif opt in ('-d', '--desc'):
desc = arg
elif opt in ('-l', '--listofips'):
listofips = arg
### Functions
# Function to Remove Duplicates - Used to make sure IP's are uniuqe
def remove_dupe_dicts(l):
list_of_strings = [
json.dumps(d, sort_keys=True)
for d in l
]
list_of_strings = set(list_of_strings)
return [
json.loads(s)
for s in list_of_strings
]
# Function to Check for name in json
def gdc_exist(gdc,jsondata):
match = False
for dc in jsondata:
if dc["name"] == gdc:
match = True
return match
# Function to check if JSON file exists
def fileexists(fn):
try:
open(fn,"r")
except IOError:
print('File: %s - specified does not appear to exist' % fn)
sys.exit()
# Function to check for valid ip address
def check_ip(checkip):
# Check if range is provided by a dash in the ip address
isrange = ("-" in checkip)
if isrange == True:
range = checkip.split("-")
# Check if range ip 1 is less than 2
ip = (ipaddress.ip_address(range[0]) < ipaddress.ip_address(range[1]))
if ip == True:
return
else:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
try:
ip = ipaddress.ip_address(checkip)
except ValueError:
try:
ip = ipaddress.ip_network(checkip)
except ValueError:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
#### Verify that GDC was passed from CLI ####
if not gdc:
print("Generic Data Center was not passed as a flag to the command. Include -g <Data_Center_Name>")
sys.exit()
#### Add IP to Data Center ####
if function == "AddIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Remove IP from Data Center ####
if function == "DelIP":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check and see if the name of the Data Center exists
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
item = obj['objects']
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
for a in item['ranges'][:]:
if (a == ip):
item['ranges'].remove(a)
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
for t in iplist:
try:
item['ranges'].remove(t)
except:
print('IP address %s is not in the file %s.' % (t, listofips))
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Add Data Center ####
if function == "AddGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
item = obj['objects']
uuid = uuid.uuid4()
# Make sure Description is set
try:
desc
except NameError:
print("Description was not provided as a paramater, please use -d to add the description while adding a new Data Center")
sys.exit()
# Check and see if the name of the Data Center already exists
match = gdc_exist(gdc,obj['objects'])
if match == True:
print('Data Center Object : %s already exists in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
# Add GDC data to JSON
item = obj['objects']
add = {"description": desc,
"id": str(uuid),
"name": gdc,
"ranges": []}
item.append(add)
# Check to see if this is a list of ips from a file
if not listofips:
# Add an IP to the list
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
# Read list of ip addresses from file and extend
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
# Output the updated file with pretty JSON
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
#### Delete Data Center ####
if function == "DelGDC":
filecheck = fileexists(jsonfile)
obj = json.load(open(jsonfile))
# Check if Data Center exists before deletion
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s does not exist in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
for i in range(len(obj['objects'])):
if obj['objects'][i]['name'] == gdc:
obj['objects'].pop(i)
break
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
| 34.309701
| 174
| 0.588907
|
import sys
import json
import os
import getopt
import ipaddress
import uuid
ip = ''
function = ''
gdc = None
description = ''
listofips = None
if len(sys.argv) <= 4:
print("Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -d <GDC_Description>")
print("")
print("This simple tool will update a JSON file with ip addresses (v4 and v6) used with the Generic Data Center Objects as described in SK167210 for R81.")
print("Examples:")
print("Add a new IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -i 10.2.0.1")
print("")
print("Add a new IP addresses to a Generic Data Center to an existing JSON file from a list of ip's")
print("gdc.py -g GDC_LIST1 -j gdc.json -f AddIP -l listofip_address.txt")
print("")
print("Delete an IP address to a Generic Data Center to an existing JSON file")
print("gdc.py -g GDC_LIST1 -j gdc.json -f DelIP -i 10.2.0.1")
print("")
print("Add a new Generic Data Center to an existing JSON file. IP address must be included.")
print("gdc.py -g GDC_LIST_New -j gdc.json -f AddGDC -d GDC_LIST_NEW_Description -i 10.2.0.1")
print("")
print("Delete a Generic Data Center in an existing JSON file. ")
print("gdc.py -g GDC_LIST_New -j gdc.json -f DelGDC")
print("")
exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:],"g:j:f:i:d:l:", ['gdc=','function=','ip=','desc=','listofips' 'help'])
except getopt.GetoptError:
print('Error - Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
print('Format should be - gdc.py -g <Generic_Data_Center_Name> -j <Json_File> -f <AddGDC/DelGDC/AddIP/DelIP> -i <ip> -l <list_of_ip_in_File> -d <GDC_Description>')
sys.exit()
elif opt in ("-g", "--gdc"):
gdc = arg
elif opt in ("-f", "--function"):
function = arg
elif opt in ("-j", "--json"):
jsonfile = arg
elif opt in ('-i', '--ip'):
ip = arg
elif opt in ('-d', '--desc'):
desc = arg
elif opt in ('-l', '--listofips'):
listofips = arg
### Functions
# Function to Remove Duplicates - Used to make sure IP's are uniuqe
def remove_dupe_dicts(l):
list_of_strings = [
json.dumps(d, sort_keys=True)
for d in l
]
list_of_strings = set(list_of_strings)
return [
json.loads(s)
for s in list_of_strings
]
def gdc_exist(gdc,jsondata):
match = False
for dc in jsondata:
if dc["name"] == gdc:
match = True
return match
def fileexists(fn):
try:
open(fn,"r")
except IOError:
print('File: %s - specified does not appear to exist' % fn)
sys.exit()
def check_ip(checkip):
isrange = ("-" in checkip)
if isrange == True:
range = checkip.split("-")
ip = (ipaddress.ip_address(range[0]) < ipaddress.ip_address(range[1]))
if ip == True:
return
else:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
try:
ip = ipaddress.ip_address(checkip)
except ValueError:
try:
ip = ipaddress.ip_network(checkip)
except ValueError:
print('address/netmask is invalid: %s' % checkip)
print('If adding a new Generic Data Center Object an IP has to be defined!')
sys.exit()
tch = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
c_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s was not found in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
item = obj['objects']
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
for a in item['ranges'][:]:
if (a == ip):
item['ranges'].remove(a)
else:
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
for t in iplist:
try:
item['ranges'].remove(t)
except:
print('IP address %s is not in the file %s.' % (t, listofips))
item['ranges'] = remove_dupe_dicts(item['ranges'])
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
open(jsonfile))
item = obj['objects']
uuid = uuid.uuid4()
try:
desc
except NameError:
print("Description was not provided as a paramater, please use -d to add the description while adding a new Data Center")
sys.exit()
match = gdc_exist(gdc,obj['objects'])
if match == True:
print('Data Center Object : %s already exists in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
item = obj['objects']
add = {"description": desc,
"id": str(uuid),
"name": gdc,
"ranges": []}
item.append(add)
if not listofips:
check_ip(ip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].append(ip)
item['ranges'] = remove_dupe_dicts(item['ranges'])
else:
filecheck = fileexists(listofips)
iplist = {}
with open(listofips) as f:
iplist = f.read().splitlines()
for checkip in iplist:
check_ip(checkip)
for item in obj['objects']:
if item["name"] == gdc:
item['ranges'].extend(iplist)
item['ranges'] = remove_dupe_dicts(item['ranges'])
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
jsonfile))
match = gdc_exist(gdc,obj['objects'])
if match == False:
print('Data Center Object : %s does not exist in file : %s' % (gdc,jsonfile))
print('No updates were made')
sys.exit()
for i in range(len(obj['objects'])):
if obj['objects'][i]['name'] == gdc:
obj['objects'].pop(i)
break
open(jsonfile, "w").write(
json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
)
| true
| true
|
f7074a019eb3125fa22bf1609ead420d0cc2278e
| 3,374
|
py
|
Python
|
bert/wtfml/data_loaders/nlp/classification.py
|
jphacks/C_2111
|
df87580614d7e5c225ea30746e5f2cd0576bbc98
|
[
"MIT"
] | 1
|
2021-10-19T07:10:16.000Z
|
2021-10-19T07:10:16.000Z
|
bert/wtfml/data_loaders/nlp/classification.py
|
jphacks/C_2111
|
df87580614d7e5c225ea30746e5f2cd0576bbc98
|
[
"MIT"
] | null | null | null |
bert/wtfml/data_loaders/nlp/classification.py
|
jphacks/C_2111
|
df87580614d7e5c225ea30746e5f2cd0576bbc98
|
[
"MIT"
] | null | null | null |
import pandas as pd
import torch
from transformers import BertJapaneseTokenizer
from wtfml.data_loaders.nlp.utils import clean_sentence
import transformers
class BERTSimpleDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = BertJapaneseTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
class DistilBERTDataset:
"""
Dataset for bert which can accept clearning function
"""
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144 # twitter
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
# return_tensors="pt"
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long), # floatからlongに変更
}
| 33.405941
| 80
| 0.602549
|
import pandas as pd
import torch
from transformers import BertJapaneseTokenizer
from wtfml.data_loaders.nlp.utils import clean_sentence
import transformers
class BERTSimpleDataset:
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = BertJapaneseTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
token_type_ids = inputs["token_type_ids"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
"token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long),
}
class DistilBERTDataset:
def __init__(self, input_texts, target, clearning_function=clean_sentence):
if isinstance(input_texts, pd.Series):
input_texts = list(input_texts)
self.input_texts = input_texts
self.target = target
self.tokenizer = transformers.DistilBertTokenizer.from_pretrained(
"cl-tohoku/bert-base-japanese-whole-word-masking"
)
self.max_len = 144
self.clearning_function = clearning_function
def __len__(self):
return len(self.input_texts)
def __getitem__(self, item):
input_text = str(self.input_texts[item])
if self.clearning_function:
input_text = self.clearning_function(input_text)
inputs = self.tokenizer.encode_plus(
input_text,
None,
add_special_tokens=True,
max_length=self.max_len,
padding="max_length",
truncation=True,
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
target = self.target[item]
return {
"ids": torch.tensor(ids, dtype=torch.long),
"mask": torch.tensor(mask, dtype=torch.long),
"targets": torch.tensor(target, dtype=torch.long),
}
| true
| true
|
f7074a14daec1b2c07974cf17dcc384aa1a40a51
| 2,763
|
py
|
Python
|
src/cec2017/utils.py
|
Evelkos/CellularEvolutionaryAlgorithm
|
9633337a00e20cb0c4d8a679e72755e165113468
|
[
"MIT"
] | null | null | null |
src/cec2017/utils.py
|
Evelkos/CellularEvolutionaryAlgorithm
|
9633337a00e20cb0c4d8a679e72755e165113468
|
[
"MIT"
] | null | null | null |
src/cec2017/utils.py
|
Evelkos/CellularEvolutionaryAlgorithm
|
9633337a00e20cb0c4d8a679e72755e165113468
|
[
"MIT"
] | null | null | null |
# cec2017.utils
# Author: Duncan Tilley
# Additional functions for graphing and benchmarking
def surface_plot(function, domain=(-100, 100), points=30, dimension=2, ax=None):
"""
Creates a surface plot of a function.
Args:
function (function): The objective function to be called at each point.
domain (num, num): The inclusive (min, max) domain for each dimension.
points (int): The number of points to collect on each dimension. A total
of points^2 function evaluations will be performed.
dimension (int): The dimension to pass to the function. If this is more
than 2, the elements after the first 2 will simply be zero,
providing a slice at x_3 = 0, ..., x_n = 0.
ax (matplotlib axes): Optional axes to use (must have projection='3d').
Note, if specified plt.show() will not be called.
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits import mplot3d
# create points^2 tuples of (x,y) and populate z
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
if dimension > 2:
# concatenate remaining zeros
tail = np.zeros(dimension - 2)
for i in range(0, xys.shape[0]):
zs[i] = function(np.concatenate([xys[i], tail]))
else:
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
# create the plot
ax_in = ax
if ax is None:
ax = plt.axes(projection="3d")
X = xys[:, 0].reshape((points, points))
Y = xys[:, 1].reshape((points, points))
Z = zs.reshape((points, points))
ax.plot_surface(X, Y, Z, cmap="gist_ncar", edgecolor="none")
ax.set_title(function.__name__)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
if ax_in is None:
plt.show()
def time(function, domain=(-100, 100), points=30):
"""
Returns the time in seconds to calculate points^2 evaluations of the
given function.
function
The objective function to be called at each point.
domain
The inclusive (min, max) domain for each dimension.
points
The number of points to collect on each dimension. A total of points^2
function evaluations will be performed.
"""
from time import time
import numpy as np
# create points^2 tuples of (x,y) and populate z
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
before = time()
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
return time() - before
| 33.289157
| 80
| 0.624321
|
def surface_plot(function, domain=(-100, 100), points=30, dimension=2, ax=None):
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits import mplot3d
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
if dimension > 2:
tail = np.zeros(dimension - 2)
for i in range(0, xys.shape[0]):
zs[i] = function(np.concatenate([xys[i], tail]))
else:
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
ax_in = ax
if ax is None:
ax = plt.axes(projection="3d")
X = xys[:, 0].reshape((points, points))
Y = xys[:, 1].reshape((points, points))
Z = zs.reshape((points, points))
ax.plot_surface(X, Y, Z, cmap="gist_ncar", edgecolor="none")
ax.set_title(function.__name__)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
if ax_in is None:
plt.show()
def time(function, domain=(-100, 100), points=30):
from time import time
import numpy as np
xys = np.linspace(domain[0], domain[1], points)
xys = np.transpose([np.tile(xys, len(xys)), np.repeat(xys, len(xys))])
zs = np.zeros(points * points)
before = time()
for i in range(0, xys.shape[0]):
zs[i] = function(xys[i])
return time() - before
| true
| true
|
f7074b3b1b22b3bd6da7e5ca01ce7e3d7f518bfc
| 8,049
|
py
|
Python
|
NT_UDA/demo_syn_atdoc.py
|
chamwen/NT-Benchmark
|
d5a17a07fdfa89d80d47843c35ecf3e078b94371
|
[
"MIT"
] | 1
|
2022-03-21T16:30:40.000Z
|
2022-03-21T16:30:40.000Z
|
NT_UDA/demo_syn_atdoc.py
|
chamwen/NT-Benchmark
|
d5a17a07fdfa89d80d47843c35ecf3e078b94371
|
[
"MIT"
] | null | null | null |
NT_UDA/demo_syn_atdoc.py
|
chamwen/NT-Benchmark
|
d5a17a07fdfa89d80d47843c35ecf3e078b94371
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# A Survey on Negative Transfer
# https://github.com/chamwen/NT-Benchmark
import numpy as np
import argparse
import os
import torch as tr
import torch.nn as nn
import torch.optim as optim
from utils import network, loss, utils
from utils.network import calc_coeff
from utils.dataloader import read_syn_src_tar
from utils.utils import lr_scheduler_full, fix_random_seed, add_label_noise_noimg
from utils.loss import CELabelSmooth, CDANE, Entropy, RandomLayer
import torch.utils.data as Data
def data_load(Xs, Ys, Xt, Yt, args):
dset_loaders = {}
train_bs = args.batch_size
if args.noise_rate > 0:
Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)
sample_idx_tar = tr.from_numpy(np.arange(len(Yt))).long()
data_src = Data.TensorDataset(Xs, Ys)
data_tar = Data.TensorDataset(Xt, Yt)
data_tar_idx = Data.TensorDataset(Xt, Yt, sample_idx_tar)
# for DAN/DANN/CDAN/MCC
dset_loaders["source"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["target"] = Data.DataLoader(data_tar_idx, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["Target"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)
return dset_loaders
def train_target(args):
X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)
dset_loaders = data_load(X_src, y_src, X_tar, y_tar, args)
netF, netC = network.backbone_net(args, args.bottleneck)
netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))
netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))
base_network = nn.Sequential(netF, netC)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
args.max_iter = args.max_epoch * max_len
ad_net = network.AdversarialNetwork(args.bottleneck, 20).cuda()
ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_full.pt'))
random_layer = RandomLayer([args.bottleneck, args.class_num], args.bottleneck)
random_layer.cuda()
optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)
optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)
optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
max_iter = args.max_epoch * max_len
interval_iter = max_iter // 10
iter_num = 0
base_network.train()
class_num = args.class_num
mem_fea = tr.rand(len(dset_loaders["target"].dataset), args.bottleneck).cuda()
mem_fea = mem_fea / tr.norm(mem_fea, p=2, dim=1, keepdim=True)
mem_cls = tr.ones(len(dset_loaders["target"].dataset), class_num).cuda() / class_num
while iter_num < max_iter:
try:
inputs_source, labels_source = iter_source.next()
except:
iter_source = iter(dset_loaders["source"])
inputs_source, labels_source = iter_source.next()
try:
inputs_target, _, idx = iter_target.next()
except:
iter_target = iter(dset_loaders["target"])
inputs_target, _, idx = iter_target.next()
if inputs_source.size(0) == 1:
continue
iter_num += 1
lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
features_target, outputs_target = base_network(inputs_target)
features = tr.cat((features_source, features_target), dim=0)
# new version img loss
args.loss_trade_off = 1.0
outputs = tr.cat((outputs_source, outputs_target), dim=0)
softmax_out = nn.Softmax(dim=1)(outputs)
entropy = Entropy(softmax_out)
transfer_loss = CDANE([features, softmax_out], ad_net, entropy, calc_coeff(iter_num), random_layer=random_layer)
classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)
# ATDOC
dis = -tr.mm(features_target.detach(), mem_fea.t())
for di in range(dis.size(0)):
dis[di, idx[di]] = tr.max(dis)
_, p1 = tr.sort(dis, dim=1)
w = tr.zeros(features_target.size(0), mem_fea.size(0)).cuda()
for wi in range(w.size(0)):
for wj in range(args.K):
w[wi][p1[wi, wj]] = 1 / args.K
weight_, pred = tr.max(w.mm(mem_cls), 1)
loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred)
classifier_loss_atdoc = tr.sum(weight_ * loss_) / (tr.sum(weight_).item())
eff = iter_num / args.max_iter
total_loss = args.loss_trade_off * transfer_loss + classifier_loss + args.tar_par * eff * classifier_loss_atdoc
optimizer_f.zero_grad()
optimizer_c.zero_grad()
optimizer_d.zero_grad()
total_loss.backward()
optimizer_f.step()
optimizer_c.step()
optimizer_d.step()
# label memory
netF.eval()
netC.eval()
with tr.no_grad():
features_target, outputs_target = netC(netF(inputs_target))
features_target = features_target / tr.norm(features_target, p=2, dim=1, keepdim=True)
softmax_out = nn.Softmax(dim=1)(outputs_target)
outputs_target = softmax_out ** 2 / ((softmax_out ** 2).sum(dim=0))
mem_fea[idx] = (1.0 - args.momentum) * mem_fea[idx] + args.momentum * features_target.clone()
mem_cls[idx] = (1.0 - args.momentum) * mem_cls[idx] + args.momentum * outputs_target.clone()
if iter_num % interval_iter == 0 or iter_num == max_iter:
base_network.eval()
acc_t_te = utils.cal_acc_base(dset_loaders["Target"], base_network)
log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)
print(log_str)
base_network.train()
return acc_t_te
if __name__ == '__main__':
data_name = 'moon'
if data_name == 'moon': num_class = 2
base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']
domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']
file_list = [data_name + i for i in base_name_list]
num_domain = len(domain_list)
args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,
epsilon=1e-05, layer='wn', class_num=num_class, smooth=0)
args.K = 5
args.momentum = 1.0
args.tar_par = 0.2
args.method = 'CDANE-ATDOC'
args.dset = data_name
args.backbone = 'ShallowNet'
args.batch_size = 32
args.max_epoch = 50
args.input_dim = 2
args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'
args.noise_rate = 0
dset_n = args.dset + '_' + str(args.noise_rate)
os.environ["CUDA_VISIBLE_DEVICES"] = '5'
args.data_env = 'gpu' # 'local'
args.seed = 2022
fix_random_seed(args.seed)
tr.backends.cudnn.deterministic = True
print(dset_n, args.method)
args.root_path = './data_synth/'
args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'
args.result_dir = 'results/target/'
acc_all = np.zeros((len(domain_list) - 1))
for s in range(1, num_domain): # source
for t in [0]: # target
itr_idx = s - 1
info_str = '\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])
print(info_str)
args.src, args.tar = file_list[s], file_list[t]
args.task_str = domain_list[s] + '_' + domain_list[t]
print(args)
acc_all[itr_idx] = train_target(args)
print('All acc: ', np.round(acc_all, 2))
print('Avg acc: ', np.round(np.mean(acc_all), 2))
| 39.455882
| 120
| 0.653
|
import numpy as np
import argparse
import os
import torch as tr
import torch.nn as nn
import torch.optim as optim
from utils import network, loss, utils
from utils.network import calc_coeff
from utils.dataloader import read_syn_src_tar
from utils.utils import lr_scheduler_full, fix_random_seed, add_label_noise_noimg
from utils.loss import CELabelSmooth, CDANE, Entropy, RandomLayer
import torch.utils.data as Data
def data_load(Xs, Ys, Xt, Yt, args):
dset_loaders = {}
train_bs = args.batch_size
if args.noise_rate > 0:
Ys = add_label_noise_noimg(Ys, args.seed, args.class_num, args.noise_rate)
sample_idx_tar = tr.from_numpy(np.arange(len(Yt))).long()
data_src = Data.TensorDataset(Xs, Ys)
data_tar = Data.TensorDataset(Xt, Yt)
data_tar_idx = Data.TensorDataset(Xt, Yt, sample_idx_tar)
dset_loaders["source"] = Data.DataLoader(data_src, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["target"] = Data.DataLoader(data_tar_idx, batch_size=train_bs, shuffle=True, drop_last=True)
dset_loaders["Target"] = Data.DataLoader(data_tar, batch_size=train_bs * 3, shuffle=False, drop_last=False)
return dset_loaders
def train_target(args):
X_src, y_src, X_tar, y_tar = read_syn_src_tar(args)
dset_loaders = data_load(X_src, y_src, X_tar, y_tar, args)
netF, netC = network.backbone_net(args, args.bottleneck)
netF.load_state_dict(tr.load(args.mdl_init_dir + 'netF.pt'))
netC.load_state_dict(tr.load(args.mdl_init_dir + 'netC.pt'))
base_network = nn.Sequential(netF, netC)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
args.max_iter = args.max_epoch * max_len
ad_net = network.AdversarialNetwork(args.bottleneck, 20).cuda()
ad_net.load_state_dict(tr.load(args.mdl_init_dir + 'netD_full.pt'))
random_layer = RandomLayer([args.bottleneck, args.class_num], args.bottleneck)
random_layer.cuda()
optimizer_f = optim.SGD(netF.parameters(), lr=args.lr * 0.1)
optimizer_c = optim.SGD(netC.parameters(), lr=args.lr)
optimizer_d = optim.SGD(ad_net.parameters(), lr=args.lr)
max_len = max(len(dset_loaders["source"]), len(dset_loaders["target"]))
max_iter = args.max_epoch * max_len
interval_iter = max_iter // 10
iter_num = 0
base_network.train()
class_num = args.class_num
mem_fea = tr.rand(len(dset_loaders["target"].dataset), args.bottleneck).cuda()
mem_fea = mem_fea / tr.norm(mem_fea, p=2, dim=1, keepdim=True)
mem_cls = tr.ones(len(dset_loaders["target"].dataset), class_num).cuda() / class_num
while iter_num < max_iter:
try:
inputs_source, labels_source = iter_source.next()
except:
iter_source = iter(dset_loaders["source"])
inputs_source, labels_source = iter_source.next()
try:
inputs_target, _, idx = iter_target.next()
except:
iter_target = iter(dset_loaders["target"])
inputs_target, _, idx = iter_target.next()
if inputs_source.size(0) == 1:
continue
iter_num += 1
lr_scheduler_full(optimizer_f, init_lr=args.lr * 0.1, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_c, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
lr_scheduler_full(optimizer_d, init_lr=args.lr, iter_num=iter_num, max_iter=args.max_iter)
inputs_source, inputs_target, labels_source = inputs_source.cuda(), inputs_target.cuda(), labels_source.cuda()
features_source, outputs_source = base_network(inputs_source)
features_target, outputs_target = base_network(inputs_target)
features = tr.cat((features_source, features_target), dim=0)
args.loss_trade_off = 1.0
outputs = tr.cat((outputs_source, outputs_target), dim=0)
softmax_out = nn.Softmax(dim=1)(outputs)
entropy = Entropy(softmax_out)
transfer_loss = CDANE([features, softmax_out], ad_net, entropy, calc_coeff(iter_num), random_layer=random_layer)
classifier_loss = CELabelSmooth(num_classes=args.class_num, epsilon=args.smooth)(outputs_source, labels_source)
dis = -tr.mm(features_target.detach(), mem_fea.t())
for di in range(dis.size(0)):
dis[di, idx[di]] = tr.max(dis)
_, p1 = tr.sort(dis, dim=1)
w = tr.zeros(features_target.size(0), mem_fea.size(0)).cuda()
for wi in range(w.size(0)):
for wj in range(args.K):
w[wi][p1[wi, wj]] = 1 / args.K
weight_, pred = tr.max(w.mm(mem_cls), 1)
loss_ = nn.CrossEntropyLoss(reduction='none')(outputs_target, pred)
classifier_loss_atdoc = tr.sum(weight_ * loss_) / (tr.sum(weight_).item())
eff = iter_num / args.max_iter
total_loss = args.loss_trade_off * transfer_loss + classifier_loss + args.tar_par * eff * classifier_loss_atdoc
optimizer_f.zero_grad()
optimizer_c.zero_grad()
optimizer_d.zero_grad()
total_loss.backward()
optimizer_f.step()
optimizer_c.step()
optimizer_d.step()
netF.eval()
netC.eval()
with tr.no_grad():
features_target, outputs_target = netC(netF(inputs_target))
features_target = features_target / tr.norm(features_target, p=2, dim=1, keepdim=True)
softmax_out = nn.Softmax(dim=1)(outputs_target)
outputs_target = softmax_out ** 2 / ((softmax_out ** 2).sum(dim=0))
mem_fea[idx] = (1.0 - args.momentum) * mem_fea[idx] + args.momentum * features_target.clone()
mem_cls[idx] = (1.0 - args.momentum) * mem_cls[idx] + args.momentum * outputs_target.clone()
if iter_num % interval_iter == 0 or iter_num == max_iter:
base_network.eval()
acc_t_te = utils.cal_acc_base(dset_loaders["Target"], base_network)
log_str = 'Task: {}, Iter:{}/{}; Acc = {:.2f}%'.format(args.task_str, iter_num, max_iter, acc_t_te)
print(log_str)
base_network.train()
return acc_t_te
if __name__ == '__main__':
data_name = 'moon'
if data_name == 'moon': num_class = 2
base_name_list = ['0', '1', '2', '3_45', '4_15', '6', '7', '8', '9']
domain_list = ['Raw', 'Tl', 'Sl', 'Rt', 'Sh', 'Sk', 'Ns', 'Ol', 'Sc']
file_list = [data_name + i for i in base_name_list]
num_domain = len(domain_list)
args = argparse.Namespace(bottleneck=64, lr=0.01, lr_decay1=0.1, lr_decay2=1.0,
epsilon=1e-05, layer='wn', class_num=num_class, smooth=0)
args.K = 5
args.momentum = 1.0
args.tar_par = 0.2
args.method = 'CDANE-ATDOC'
args.dset = data_name
args.backbone = 'ShallowNet'
args.batch_size = 32
args.max_epoch = 50
args.input_dim = 2
args.mdl_init_dir = 'outputs/mdl_init/' + args.dset + '/'
args.noise_rate = 0
dset_n = args.dset + '_' + str(args.noise_rate)
os.environ["CUDA_VISIBLE_DEVICES"] = '5'
args.data_env = 'gpu'
args.seed = 2022
fix_random_seed(args.seed)
tr.backends.cudnn.deterministic = True
print(dset_n, args.method)
args.root_path = './data_synth/'
args.local_dir = r'/mnt/ssd2/wenz/NT-Benchmark/NT_UDA/'
args.result_dir = 'results/target/'
acc_all = np.zeros((len(domain_list) - 1))
for s in range(1, num_domain):
for t in [0]:
itr_idx = s - 1
info_str = '\n%s: %s --> %s' % (itr_idx, domain_list[s], domain_list[t])
print(info_str)
args.src, args.tar = file_list[s], file_list[t]
args.task_str = domain_list[s] + '_' + domain_list[t]
print(args)
acc_all[itr_idx] = train_target(args)
print('All acc: ', np.round(acc_all, 2))
print('Avg acc: ', np.round(np.mean(acc_all), 2))
| true
| true
|
f7074e5af3f28bfa6d2637e405fe9a2e435dfad9
| 9,020
|
py
|
Python
|
plot1.py
|
yangwenbo99/UNIQUE
|
50136f3169b82f20c8677f36c1b0882905b6d809
|
[
"Apache-2.0"
] | null | null | null |
plot1.py
|
yangwenbo99/UNIQUE
|
50136f3169b82f20c8677f36c1b0882905b6d809
|
[
"Apache-2.0"
] | null | null | null |
plot1.py
|
yangwenbo99/UNIQUE
|
50136f3169b82f20c8677f36c1b0882905b6d809
|
[
"Apache-2.0"
] | null | null | null |
#!/bin/python3
'''
This file is to plot a graph with the following setting.
1. We first select an image x_0
2. We then add some pertubation to the image to get x_1 (its type shall
configurable in the future, but we set it to be random or loaded from file
currently)
3. Next, we plot f(x) for all x on the segment x_0 to x_1
4. Finally, we optionally save the pertuabation for future work
Example:
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01
python plot1.py --train '' --network lfc --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
python plot1.py --train '' --network lfc_relu --ranking True --fidelity True --std_modeling True --std_loss '' --margin 0.025 --batch_size 128 --batch_size2 32 --image_size 384 --max_epochs 3 --lr 1e-4 --decay_interval 3 --decay_ratio 0.1 --fixvar --max_epochs2 12 --batch_size=16 --batch_size2=16 --ckpt_path=checkpoints_many/lfc_relu_nom_lip -x /data_partition/yang/fyp/adv_1/IQA_database_syn/databaserelease2/jp2k/img4.bmp --pertubation_length 0.01 --force_normalization
'''
import argparse
import TrainModel
import scipy.io as sio
import os
import torch
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--img', type=str, help='the base image')
parser.add_argument('-p', '--pertubation', type=str, default='',
help='the pertubation of the image, will be randomly generated if not presented')
parser.add_argument('--pertubation_length', type=float, default=0.01,
help='the length of the pertubataion, if random generation is nessesary')
parser.add_argument('-s', '--save_pertubation', type=str, default='',
help='whether the pertubation should be saved')
parser.add_argument("--train", type=bool, default=True)
parser.add_argument('--get_scores', type=bool, default=False)
parser.add_argument("--use_cuda", type=bool, default=True)
# parser.add_argument("--device", type=str, default="cuda")
parser.add_argument("--resume", action='store_true')
parser.add_argument("--seed", type=int, default=19901116)
parser.add_argument("--backbone", type=str, default='resnet34')
parser.add_argument("--fc", type=bool, default=True)
parser.add_argument('--scnn_root', type=str, default='saved_weights/scnn.pkl')
parser.add_argument("--network", type=str, default="basecnn",
help='basecnn or dbcnn or lfc')
parser.add_argument("--representation", type=str, default="BCNN")
parser.add_argument("--ranking", type=bool, default=True,
help='True for learning-to-rank False for regular regression')
parser.add_argument("--fidelity", type=bool, default=True,
help='True for fidelity loss False for regular ranknet with CE loss')
parser.add_argument("--std_modeling", type=bool,
default=True) # True for modeling std False for not
parser.add_argument("--std_loss", type=bool, default=True)
parser.add_argument("--fixvar", action='store_true') #+
parser.add_argument("--force_normalization", action='store_true')
parser.add_argument("--lipschitz", action='store_true')
parser.add_argument("--margin", type=float, default=0.025)
parser.add_argument("--split", type=int, default=1)
parser.add_argument("--trainset", type=str, default="./IQA_database/")
parser.add_argument("--live_set", type=str, default="./IQA_database/databaserelease2/")
parser.add_argument("--csiq_set", type=str, default="./IQA_database/CSIQ/")
parser.add_argument("--tid2013_set", type=str, default="./IQA_database/TID2013/")
parser.add_argument("--bid_set", type=str, default="./IQA_database/BID/")
#parser.add_argument("--cid_set", type=str, default="./IQA_database/CID2013_camera/")
parser.add_argument("--clive_set", type=str, default="./IQA_database/ChallengeDB_release/")
parser.add_argument("--koniq10k_set", type=str, default="./IQA_database/koniq-10k/")
parser.add_argument("--kadid10k_set", type=str, default="./IQA_database/kadid10k/")
parser.add_argument("--eval_live", type=bool, default=True)
parser.add_argument("--eval_csiq", type=bool, default=True)
parser.add_argument("--eval_tid2013", type=bool, default=False)
parser.add_argument("--eval_kadid10k", type=bool, default=True)
parser.add_argument("--eval_bid", type=bool, default=True)
parser.add_argument("--eval_clive", type=bool, default=True)
parser.add_argument("--eval_koniq10k", type=bool, default=True)
parser.add_argument("--split_modeling", type=bool, default=False)
parser.add_argument('--ckpt_path', default='./checkpoint', type=str,
metavar='PATH', help='path to checkpoints')
parser.add_argument('--ckpt', default=None, type=str, help='name of the checkpoint to load')
parser.add_argument("--train_txt", type=str, default='train.txt') # train.txt | train_synthetic.txt | train_authentic.txt | train_sub2.txt | train_score.txt
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--batch_size2", type=int, default=32)
parser.add_argument("--image_size", type=int, default=384, help='None means random resolution')
parser.add_argument("--max_epochs", type=int, default=3)
parser.add_argument("--max_epochs2", type=int, default=12)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--decay_interval", type=int, default=3)
parser.add_argument("--decay_ratio", type=float, default=0.1)
parser.add_argument("--epochs_per_eval", type=int, default=1)
parser.add_argument("--epochs_per_save", type=int, default=1)
parser.add_argument("--verbose", action='store_true')
config = parser.parse_args()
config.to_test = []
return config
def main(config):
t = TrainModel.Trainer(config)
# checking compatability
if config.fixvar and not config.network.startswith('lfc'):
raise NotImplementedError()
if str(config.backbone).startswith('lfc') and not config.std_modeling:
raise NotImplementedError()
model = t.model
pil_img = Image.open(config.img)
# pil_img = pil_img.reshape((1,) + tuple(pil_img.shape))
img = t.test_transform(pil_img).to(t.device)
if config.pertubation:
with open(config.pertubation, 'rb') as f:
pertubation = torch.load(f)
else:
pertubation = torch.rand(img.shape) * config.pertubation_length
pertubation = pertubation.to(t.device)
img = img.unsqueeze(0)
print(img.shape)
if config.save_pertubation:
with open(config.save_pertubation, 'wb') as f:
torch.save(pertubation, f)
should_normalize = not config.network.startswith('lfc') or config.force_normalization
if should_normalize:
normalization_transform = \
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
pertubation = normalization_transform(pertubation)
x = list(np.linspace(0, 1, 100))
y = [t.predict_single_image(img + p * pertubation).detach().cpu().numpy() for p in x]
plt.plot(x, y)
plt.show()
if __name__ == "__main__":
config = parse_config()
main(config)
| 53.690476
| 480
| 0.708093
|
import argparse
import TrainModel
import scipy.io as sio
import os
import torch
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from torchvision import transforms
def parse_config():
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--img', type=str, help='the base image')
parser.add_argument('-p', '--pertubation', type=str, default='',
help='the pertubation of the image, will be randomly generated if not presented')
parser.add_argument('--pertubation_length', type=float, default=0.01,
help='the length of the pertubataion, if random generation is nessesary')
parser.add_argument('-s', '--save_pertubation', type=str, default='',
help='whether the pertubation should be saved')
parser.add_argument("--train", type=bool, default=True)
parser.add_argument('--get_scores', type=bool, default=False)
parser.add_argument("--use_cuda", type=bool, default=True)
parser.add_argument("--resume", action='store_true')
parser.add_argument("--seed", type=int, default=19901116)
parser.add_argument("--backbone", type=str, default='resnet34')
parser.add_argument("--fc", type=bool, default=True)
parser.add_argument('--scnn_root', type=str, default='saved_weights/scnn.pkl')
parser.add_argument("--network", type=str, default="basecnn",
help='basecnn or dbcnn or lfc')
parser.add_argument("--representation", type=str, default="BCNN")
parser.add_argument("--ranking", type=bool, default=True,
help='True for learning-to-rank False for regular regression')
parser.add_argument("--fidelity", type=bool, default=True,
help='True for fidelity loss False for regular ranknet with CE loss')
parser.add_argument("--std_modeling", type=bool,
default=True)
parser.add_argument("--std_loss", type=bool, default=True)
parser.add_argument("--fixvar", action='store_true')
parser.add_argument("--force_normalization", action='store_true')
parser.add_argument("--lipschitz", action='store_true')
parser.add_argument("--margin", type=float, default=0.025)
parser.add_argument("--split", type=int, default=1)
parser.add_argument("--trainset", type=str, default="./IQA_database/")
parser.add_argument("--live_set", type=str, default="./IQA_database/databaserelease2/")
parser.add_argument("--csiq_set", type=str, default="./IQA_database/CSIQ/")
parser.add_argument("--tid2013_set", type=str, default="./IQA_database/TID2013/")
parser.add_argument("--bid_set", type=str, default="./IQA_database/BID/")
parser.add_argument("--clive_set", type=str, default="./IQA_database/ChallengeDB_release/")
parser.add_argument("--koniq10k_set", type=str, default="./IQA_database/koniq-10k/")
parser.add_argument("--kadid10k_set", type=str, default="./IQA_database/kadid10k/")
parser.add_argument("--eval_live", type=bool, default=True)
parser.add_argument("--eval_csiq", type=bool, default=True)
parser.add_argument("--eval_tid2013", type=bool, default=False)
parser.add_argument("--eval_kadid10k", type=bool, default=True)
parser.add_argument("--eval_bid", type=bool, default=True)
parser.add_argument("--eval_clive", type=bool, default=True)
parser.add_argument("--eval_koniq10k", type=bool, default=True)
parser.add_argument("--split_modeling", type=bool, default=False)
parser.add_argument('--ckpt_path', default='./checkpoint', type=str,
metavar='PATH', help='path to checkpoints')
parser.add_argument('--ckpt', default=None, type=str, help='name of the checkpoint to load')
parser.add_argument("--train_txt", type=str, default='train.txt')
parser.add_argument("--batch_size", type=int, default=128)
parser.add_argument("--batch_size2", type=int, default=32)
parser.add_argument("--image_size", type=int, default=384, help='None means random resolution')
parser.add_argument("--max_epochs", type=int, default=3)
parser.add_argument("--max_epochs2", type=int, default=12)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--decay_interval", type=int, default=3)
parser.add_argument("--decay_ratio", type=float, default=0.1)
parser.add_argument("--epochs_per_eval", type=int, default=1)
parser.add_argument("--epochs_per_save", type=int, default=1)
parser.add_argument("--verbose", action='store_true')
config = parser.parse_args()
config.to_test = []
return config
def main(config):
t = TrainModel.Trainer(config)
if config.fixvar and not config.network.startswith('lfc'):
raise NotImplementedError()
if str(config.backbone).startswith('lfc') and not config.std_modeling:
raise NotImplementedError()
model = t.model
pil_img = Image.open(config.img)
img = t.test_transform(pil_img).to(t.device)
if config.pertubation:
with open(config.pertubation, 'rb') as f:
pertubation = torch.load(f)
else:
pertubation = torch.rand(img.shape) * config.pertubation_length
pertubation = pertubation.to(t.device)
img = img.unsqueeze(0)
print(img.shape)
if config.save_pertubation:
with open(config.save_pertubation, 'wb') as f:
torch.save(pertubation, f)
should_normalize = not config.network.startswith('lfc') or config.force_normalization
if should_normalize:
normalization_transform = \
transforms.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225))
pertubation = normalization_transform(pertubation)
x = list(np.linspace(0, 1, 100))
y = [t.predict_single_image(img + p * pertubation).detach().cpu().numpy() for p in x]
plt.plot(x, y)
plt.show()
if __name__ == "__main__":
config = parse_config()
main(config)
| true
| true
|
f7074ef9bdbaa19127201bdaec5ddd602f46976f
| 5,190
|
py
|
Python
|
apps/core/flask/errorhandler.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | 1
|
2019-05-12T14:54:40.000Z
|
2019-05-12T14:54:40.000Z
|
apps/core/flask/errorhandler.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
apps/core/flask/errorhandler.py
|
yeayee/osroom
|
f7084843ea4b75505283f8b23da60471ba8fc9bb
|
[
"BSD-2-Clause"
] | null | null | null |
# -*-coding:utf-8-*-
import os
from flask import request, render_template, g
from flask_babel import gettext
from flask_wtf.csrf import CSRFError
from werkzeug.utils import redirect
from apps.configs.sys_config import DEFAULT_ADMIN_LOGIN_PAGE
from apps.core.auth.rest_token_auth import OsrTokenError, SecretTokenError, AccessTokenError
from apps.core.blueprint import api, theme_view, admin_view
from apps.core.flask.login_manager import LoginReqError
from apps.core.flask.response import response_format
from apps.core.template.template import render_absolute_path_template
from apps.core.utils.get_config import get_config
from apps.modules.global_data.process.global_data import get_global_site_data
__author__ = "Allen Woo"
class ErrorHandler:
"""
配置各种异常状态返回数据 http status
"""
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
@app.errorhandler(401)
def internal_server_error_401(e):
return internal_server_error(e)
@app.errorhandler(404)
def internal_server_error_404(e):
return internal_server_error(e)
@app.errorhandler(500)
def internal_server_error_500(e):
return internal_server_error(e)
@app.errorhandler(SecretTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40101}
return response_format(data)
@app.errorhandler(AccessTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40102}
return response_format(data)
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40103}
return response_format(data)
@app.errorhandler(OsrTokenError)
def handle_osr_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40104,
"help": gettext(
"Please add the 'OSR-RestToken' or 'X-CSRFToken' request header,"
" the specific use please refer to the osroom system documentation:"
" http://osroom.com")}
return response_format(data)
@app.errorhandler(LoginReqError)
def handle_login_error(e):
data = {
"custom_status": e.code,
"msg": gettext("Not logged in"),
"error_msg": e.description,
"msg_type": "e",
"to_url": get_config(
"login_manager",
"LOGIN_VIEW"),
"error_id": 40105}
if request.headers.get('OSR-RestToken'):
data["to_url"] = get_config("login_manager", "LOGIN_VIEW")
if request.path.startswith(api.url_prefix):
# api 响应Json数据
return response_format(data)
# 页面, 跳转到登录
if request.path.startswith("/osr-admin"):
return redirect(DEFAULT_ADMIN_LOGIN_PAGE)
else:
return redirect(data["to_url"])
def internal_server_error(e):
"""
处理服务器错误
:param e:
:return:
"""
try:
code = e.code
except BaseException:
code = 500
msg_type = "w"
msg = gettext("An error occurred. Please contact the administrator")
if code == 401:
msg = gettext("Permission denied")
elif code == 404:
msg = gettext("The api does not exist or has been deprecated")
elif code == 500:
msg = gettext("Server error")
msg_type = "e"
elif isinstance(code, int) and code // 500 == 1:
msg = gettext(
"Server error, please check whether the third-party plug-in is normal")
msg_type = "e"
data = {
"http_status": code,
"custom_status": None,
"request_id": g.weblog_id,
"msg": msg,
"msg_type": msg_type}
if request.path.startswith(api.url_prefix):
return response_format(data)
else:
g.site_global = dict(g.site_global,
**get_global_site_data(req_type="view"))
path = "{}/pages/{}.html".format(get_config("theme",
"CURRENT_THEME_NAME"), code)
absolute_path = os.path.abspath(
"{}/{}".format(theme_view.template_folder, path))
if not os.path.isfile(absolute_path):
# 主题不存在<e.code>错误页面(如404页面),使用系统自带的页面
path = "{}/module/exception/{}.html".format(
admin_view.template_folder, code)
return render_absolute_path_template(path, data=data), 404
return render_template(path, data=data), code
| 32.848101
| 92
| 0.57052
|
import os
from flask import request, render_template, g
from flask_babel import gettext
from flask_wtf.csrf import CSRFError
from werkzeug.utils import redirect
from apps.configs.sys_config import DEFAULT_ADMIN_LOGIN_PAGE
from apps.core.auth.rest_token_auth import OsrTokenError, SecretTokenError, AccessTokenError
from apps.core.blueprint import api, theme_view, admin_view
from apps.core.flask.login_manager import LoginReqError
from apps.core.flask.response import response_format
from apps.core.template.template import render_absolute_path_template
from apps.core.utils.get_config import get_config
from apps.modules.global_data.process.global_data import get_global_site_data
__author__ = "Allen Woo"
class ErrorHandler:
def __init__(self, app=None):
if app:
self.init_app(app)
def init_app(self, app):
@app.errorhandler(401)
def internal_server_error_401(e):
return internal_server_error(e)
@app.errorhandler(404)
def internal_server_error_404(e):
return internal_server_error(e)
@app.errorhandler(500)
def internal_server_error_500(e):
return internal_server_error(e)
@app.errorhandler(SecretTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40101}
return response_format(data)
@app.errorhandler(AccessTokenError)
def handle_rest_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40102}
return response_format(data)
@app.errorhandler(CSRFError)
def handle_csrf_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40103}
return response_format(data)
@app.errorhandler(OsrTokenError)
def handle_osr_token_error(e):
data = {
"custom_status": e.code,
"msg": e.description,
"msg_type": "e",
"error_id": 40104,
"help": gettext(
"Please add the 'OSR-RestToken' or 'X-CSRFToken' request header,"
" the specific use please refer to the osroom system documentation:"
" http://osroom.com")}
return response_format(data)
@app.errorhandler(LoginReqError)
def handle_login_error(e):
data = {
"custom_status": e.code,
"msg": gettext("Not logged in"),
"error_msg": e.description,
"msg_type": "e",
"to_url": get_config(
"login_manager",
"LOGIN_VIEW"),
"error_id": 40105}
if request.headers.get('OSR-RestToken'):
data["to_url"] = get_config("login_manager", "LOGIN_VIEW")
if request.path.startswith(api.url_prefix):
return response_format(data)
if request.path.startswith("/osr-admin"):
return redirect(DEFAULT_ADMIN_LOGIN_PAGE)
else:
return redirect(data["to_url"])
def internal_server_error(e):
try:
code = e.code
except BaseException:
code = 500
msg_type = "w"
msg = gettext("An error occurred. Please contact the administrator")
if code == 401:
msg = gettext("Permission denied")
elif code == 404:
msg = gettext("The api does not exist or has been deprecated")
elif code == 500:
msg = gettext("Server error")
msg_type = "e"
elif isinstance(code, int) and code // 500 == 1:
msg = gettext(
"Server error, please check whether the third-party plug-in is normal")
msg_type = "e"
data = {
"http_status": code,
"custom_status": None,
"request_id": g.weblog_id,
"msg": msg,
"msg_type": msg_type}
if request.path.startswith(api.url_prefix):
return response_format(data)
else:
g.site_global = dict(g.site_global,
**get_global_site_data(req_type="view"))
path = "{}/pages/{}.html".format(get_config("theme",
"CURRENT_THEME_NAME"), code)
absolute_path = os.path.abspath(
"{}/{}".format(theme_view.template_folder, path))
if not os.path.isfile(absolute_path):
path = "{}/module/exception/{}.html".format(
admin_view.template_folder, code)
return render_absolute_path_template(path, data=data), 404
return render_template(path, data=data), code
| true
| true
|
f7074f86527ff43cd05a3deae7642e669561ed3a
| 352
|
py
|
Python
|
quotes/views.py
|
k4rtik/alpo
|
2afffd62ccbc5865493b28bfc6926a8bea7dcf9a
|
[
"MIT"
] | null | null | null |
quotes/views.py
|
k4rtik/alpo
|
2afffd62ccbc5865493b28bfc6926a8bea7dcf9a
|
[
"MIT"
] | null | null | null |
quotes/views.py
|
k4rtik/alpo
|
2afffd62ccbc5865493b28bfc6926a8bea7dcf9a
|
[
"MIT"
] | null | null | null |
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from quotes.models import Quote
class QuoteCreate(CreateView):
model = Quote
class QuoteUpdate(UpdateView):
model = Quote
class QuoteDelete(DeleteView):
model = Quote
success_url = reverse_lazy('quotes:index')
| 25.142857
| 72
| 0.78125
|
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.core.urlresolvers import reverse_lazy
from quotes.models import Quote
class QuoteCreate(CreateView):
model = Quote
class QuoteUpdate(UpdateView):
model = Quote
class QuoteDelete(DeleteView):
model = Quote
success_url = reverse_lazy('quotes:index')
| true
| true
|
f70750abed121bc2185c541e75504692681bdb2b
| 49,723
|
py
|
Python
|
django/db/models/sql/compiler.py
|
makinacorpus/django
|
3632d289dedc2e83cde1976e5a4cd00b08c799ee
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/models/sql/compiler.py
|
makinacorpus/django
|
3632d289dedc2e83cde1976e5a4cd00b08c799ee
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/models/sql/compiler.py
|
makinacorpus/django
|
3632d289dedc2e83cde1976e5a4cd00b08c799ee
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
# When ordering a queryset with distinct on a column not part of the
# select set, the ordering column needs to be added to the select
# clause. This information is needed both in SQL construction and
# masking away the ordering selects from the returned row.
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
# TODO: after the query has been executed, the altered state should be
# cleaned. We are not using a clone() of the query here.
"""
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering' and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# If we've been asked for a NOWAIT query but the backend does not support it,
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
"""
Returns the list of columns to use in the select statement, as well as
a list any extra parameters that need to be included. If no columns
have been specified, returns all columns relating to fields in the
model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguity with nested queries.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
"""
Returns a tuple containing a list representing the SQL elements in the
"order by" clause, and the list of SQL elements that need to be added
to the GROUP BY clause as a result of the ordering.
Also sets the ordering_aliases attribute on this instance to a list of
extra aliases needed in the select.
Determining the ordering SQL can change the tables we need to include,
so this should be run *before* get_from_clause().
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
# to include the same field more than once in the ordering. We track
# the table/column pairs we use and discard any after the first use.
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model.
if field.rel and len(joins) > 1 and opts.ordering:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_ordering and get_distinct. This method will
call query.setup_joins, handle refcounts and then promote the joins.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
# We will later on need to promote those joins that were added to the
# query afresh above.
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
# To avoid inadvertent trimming of a necessary alias, use the
# refcount to show that we are referencing a non-relation field on
# the model.
self.query.ref_alias(alias)
# Must use left outer joins for nullable fields and their relations.
# Ordering or distinct must not affect the returned set, and INNER
# JOINS for nullable fields could do this.
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
"""
A helper method for get_distinct and get_ordering. This method will
trim extra not-needed joins from the tail of the join chain.
This is very similar to what is done in trim_joins, but we will
trim LEFT JOINS here. It would be a good idea to consolidate this
method and query.trim_joins().
"""
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
"""
Returns a tuple representing the SQL elements in the "group by" clause.
"""
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# The get_fields_with_model() returns None for fields that live
# in the field's local model. So, for those fields we want to use
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
# Use True here because we are looking at the _reverse_ side of
# the relation, which is always nullable.
new_nullable = True
table = model._meta.db_table
self.fill_related_selections(model._meta, table, cur_depth+1,
next, restricted, new_nullable)
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# We only set this up here because
# related_select_cols isn't populated until
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
# The MULTI case.
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
# Datetimes are artifically returned in UTC on databases that
# don't support time zone. Restore the zone used in the query.
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
"""
Yields blocks of rows from a cursor. We use this iterator in the special
case when extra output columns have been added to support ordering
requirements. We must trim those extra columns before anything else can use
the results, since they're only needed to make the SQL valid.
"""
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| 44.395536
| 136
| 0.577761
|
import datetime
from django.conf import settings
from django.core.exceptions import FieldError
from django.db.backends.util import truncate_name
from django.db.models.constants import LOOKUP_SEP
from django.db.models.query_utils import select_related_descend, QueryWrapper
from django.db.models.sql.constants import (SINGLE, MULTI, ORDER_DIR,
GET_ITERATOR_CHUNK_SIZE, SelectInfo)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.expressions import SQLEvaluator
from django.db.models.sql.query import get_order_dir, Query
from django.db.utils import DatabaseError
from django.utils import six
from django.utils.six.moves import zip
from django.utils import timezone
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {}
self.ordering_aliases = []
self.ordering_params = []
def pre_sql_setup(self):
if not self.query.tables:
self.query.join((None, self.query.get_meta().db_table, None))
if (not self.query.select and self.query.default_cols and not
self.query.included_inherited_models):
self.query.setup_inherited_models()
if self.query.select_related and not self.query.related_select_cols:
self.fill_related_selections()
def quote_name_unless_alias(self, name):
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def as_sql(self, with_limits=True, with_col_aliases=False):
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
self.pre_sql_setup()
self.refcounts_before = self.query.alias_refcount.copy()
out_cols, s_params = self.get_columns(with_col_aliases)
ordering, o_params, ordering_group_by = self.get_ordering()
distinct_fields = self.get_distinct()
from_, f_params = self.get_from_clause()
qn = self.quote_name_unless_alias
where, w_params = self.query.where.as_sql(qn=qn, connection=self.connection)
having, h_params = self.query.having.as_sql(qn=qn, connection=self.connection)
having_group_by = self.query.having.get_cols()
params = []
for val in six.itervalues(self.query.extra_select):
params.extend(val[1])
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
params.extend(o_params)
result.append(', '.join(out_cols + self.ordering_aliases))
params.extend(s_params)
params.extend(self.ordering_params)
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping, gb_params = self.get_grouping(having_group_by, ordering_group_by)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) not implemented.")
if not ordering:
ordering = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
params.extend(gb_params)
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if ordering:
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
# raise a DatabaseError otherwise we could get an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(self.refcounts_before)
return ' '.join(result), tuple(params)
def as_nested_sql(self):
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
obj.bump_prefix()
return obj.get_compiler(connection=self.connection).as_sql()
def get_columns(self, with_aliases=False):
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (col[0], qn2(alias)) for alias, col in six.iteritems(self.query.extra_select)]
params = []
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
for col, _ in self.query.select:
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = '%s.%s' % (qn(alias), qn(column))
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
col_sql, col_params = col.as_sql(qn, self.connection)
result.append(col_sql)
params.extend(col_params)
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
for alias, aggregate in self.query.aggregate_select.items():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
if alias is None:
result.append(agg_sql)
else:
result.append('%s AS %s' % (agg_sql, qn(truncate_name(alias, max_name_length))))
params.extend(agg_params)
for (table, col), _ in self.query.related_select_cols:
r = '%s.%s' % (qn(table), qn(col))
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result, params
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, from_parent=None):
result = []
if opts is None:
opts = self.query.get_meta()
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
aliases = set()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field, model in opts.get_concrete_fields_with_model():
if from_parent and model is not None and issubclass(from_parent, model):
# Avoid loading data for already loaded parents.
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s.%s AS %s' % (qn(alias),
qn2(field.column), c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = '%s.%s' % (qn(alias), qn2(field.column))
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def get_distinct(self):
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
field, cols, alias, _, _ = self._setup_joins(parts, opts, None)
cols, alias = self._final_join_removal(cols, alias)
for col in cols:
result.append("%s.%s" % (qn(alias), qn2(col)))
return result
def get_ordering(self):
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by
or self.query.get_meta().ordering
or [])
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
distinct = self.query.distinct
select_aliases = self._select_aliases
result = []
group_by = []
ordering_aliases = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
# It's possible, due to model inheritance, that normal usage might try
processed_pairs = set()
params = []
ordering_params = []
for pos, field in enumerate(ordering):
if field == '?':
result.append(self.connection.ops.random_function_sql())
continue
if isinstance(field, int):
if field < 0:
order = desc
field = -field
else:
order = asc
result.append('%s %s' % (field, order))
group_by.append((str(field), []))
continue
col, order = get_order_dir(field, asc)
if col in self.query.aggregate_select:
result.append('%s %s' % (qn(col), order))
continue
if '.' in field:
table, col = col.split('.', 1)
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), col)
processed_pairs.add((table, col))
if not distinct or elt in select_aliases:
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
elif get_order_dir(field)[0] not in self.query.extra:
for table, cols, order in self.find_ordering_name(field,
self.query.get_meta(), default_order=asc):
for col in cols:
if (table, col) not in processed_pairs:
elt = '%s.%s' % (qn(table), qn2(col))
processed_pairs.add((table, col))
if distinct and elt not in select_aliases:
ordering_aliases.append(elt)
result.append('%s %s' % (elt, order))
group_by.append((elt, []))
else:
elt = qn2(col)
if col not in self.query.extra_select:
sql = "(%s) AS %s" % (self.query.extra[col][0], elt)
ordering_aliases.append(sql)
ordering_params.extend(self.query.extra[col][1])
else:
if distinct and col not in select_aliases:
ordering_aliases.append(elt)
ordering_params.extend(params)
result.append('%s %s' % (elt, order))
group_by.append(self.query.extra[col])
self.ordering_aliases = ordering_aliases
self.ordering_params = ordering_params
return result, params, group_by
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
name, order = get_order_dir(name, default_order)
pieces = name.split(LOOKUP_SEP)
field, cols, alias, joins, opts = self._setup_joins(pieces, opts, alias)
if field.rel and len(joins) > 1 and opts.ordering:
if not already_seen:
already_seen = set()
join_tuple = tuple([self.query.alias_map[j].table_name for j in joins])
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
cols, alias = self._final_join_removal(cols, alias)
return [(alias, cols, order)]
def _setup_joins(self, pieces, opts, alias):
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, _ = self.query.setup_joins(
pieces, opts, alias)
joins_to_promote = [j for j in joins if self.query.alias_refcount[j] < 2]
alias = joins[-1]
cols = [target.column for target in targets]
if not field.rel:
self.query.ref_alias(alias)
self.query.promote_joins(joins_to_promote)
return field, cols, alias, joins, opts
def _final_join_removal(self, cols, alias):
if alias:
while 1:
join = self.query.alias_map[alias]
lhs_cols, rhs_cols = zip(*[(lhs_col, rhs_col) for lhs_col, rhs_col in join.join_cols])
if set(cols) != set(rhs_cols):
break
cols = [lhs_cols[rhs_cols.index(col)] for col in cols]
self.query.unref_alias(alias)
alias = join.lhs_alias
return cols, alias
def get_from_clause(self):
result = []
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
first = True
from_params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
name, alias, join_type, lhs, join_cols, _, join_field = self.query.alias_map[alias]
except KeyError:
continue
alias_str = '' if alias == name else (' %s' % alias)
if join_type and not first:
extra_cond = join_field.get_extra_restriction(
self.query.where_class, alias, lhs)
if extra_cond:
extra_sql, extra_params = extra_cond.as_sql(
qn, self.connection)
extra_sql = 'AND (%s)' % extra_sql
from_params.extend(extra_params)
else:
extra_sql = ""
result.append('%s %s%s ON ('
% (join_type, qn(name), alias_str))
for index, (lhs_col, rhs_col) in enumerate(join_cols):
if index != 0:
result.append(' AND ')
result.append('%s.%s = %s.%s' %
(qn(lhs), qn2(lhs_col), qn(alias), qn2(rhs_col)))
result.append('%s)' % extra_sql)
else:
connector = '' if first else ', '
result.append('%s%s%s' % (connector, qn(name), alias_str))
first = False
for t in self.query.extra_tables:
alias, unused = self.query.table_alias(t)
# calls increments the refcount, so an alias refcount of one means
# this is the only reference.
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
connector = '' if first else ', '
result.append('%s%s' % (connector, qn(alias)))
first = False
return result, from_params
def get_grouping(self, having_group_by, ordering_group_by):
qn = self.quote_name_unless_alias
result, params = [], []
if self.query.group_by is not None:
select_cols = self.query.select + self.query.related_select_cols
# Just the column, not the fields.
select_cols = [s[0] for s in select_cols]
if (len(self.query.get_meta().concrete_fields) == len(self.query.select)
and self.connection.features.allows_group_by_pk):
self.query.group_by = [
(self.query.get_meta().db_table, self.query.get_meta().pk.column)
]
select_cols = []
seen = set()
cols = self.query.group_by + having_group_by + select_cols
for col in cols:
col_params = ()
if isinstance(col, (list, tuple)):
sql = '%s.%s' % (qn(col[0]), qn(col[1]))
elif hasattr(col, 'as_sql'):
sql, col_params = col.as_sql(qn, self.connection)
else:
sql = '(%s)' % str(col)
if sql not in seen:
result.append(sql)
params.extend(col_params)
seen.add(sql)
# Still, we need to add all stuff in ordering (except if the backend can
# group by just by PK).
if ordering_group_by and not self.connection.features.allows_group_by_pk:
for order, order_params in ordering_group_by:
# Even if we have seen the same SQL string, it might have
# different params, so, we add same SQL in "has params" case.
if order not in seen or params:
result.append(order)
params.extend(order_params)
seen.add(order)
# Unconditionally add the extra_select items.
for extra_select, extra_params in self.query.extra_select.values():
sql = '(%s)' % str(extra_select)
result.append(sql)
params.extend(extra_params)
return result, params
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None, nullable=None):
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
self.query.related_select_cols = []
only_load = self.query.get_loaded_field_names()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
for f, model in opts.get_fields_with_model():
# the f.model - that is the field's local model.
field_model = model or f.model
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
promote = nullable or f.null
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias, outer_if_first=promote)
alias = joins[-1]
columns, aliases = self.get_default_columns(start_alias=alias,
opts=f.rel.to._meta, as_pairs=True)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field in zip(columns, f.rel.to._meta.concrete_fields))
if restricted:
next = requested.get(f.name, {})
else:
next = False
new_nullable = f.null or promote
self.fill_related_selections(f.rel.to._meta, alias, cur_depth + 1,
next, restricted, new_nullable)
if restricted:
related_fields = [
(o.field, o.model)
for o in opts.get_all_related_objects()
if o.field.unique
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
_, _, _, joins, _ = self.query.setup_joins(
[f.related_query_name()], opts, root_alias, outer_if_first=True)
alias = joins[-1]
from_parent = (opts.model if issubclass(model, opts.model)
else None)
columns, aliases = self.get_default_columns(start_alias=alias,
opts=model._meta, as_pairs=True, from_parent=from_parent)
self.query.related_select_cols.extend(
SelectInfo(col, field) for col, field
in zip(columns, model._meta.concrete_fields))
next = requested.get(f.related_query_name(), {})
new_nullable = True
table = model._meta.db_table
self.fill_related_selections(model._meta, table, cur_depth+1,
next, restricted, new_nullable)
def deferred_to_columns(self):
columns = {}
self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb)
return columns
def results_iter(self):
resolve_columns = hasattr(self, 'resolve_columns')
fields = None
has_aggregate_select = bool(self.query.aggregate_select)
for rows in self.execute_sql(MULTI):
for row in rows:
if resolve_columns:
if fields is None:
# execute_sql() has been called.
# We also include types of fields of related models that
# will be included via select_related() for the benefit
# of MySQL/MySQLdb when boolean fields are involved
# (#15040).
# This code duplicates the logic for the order of fields
# found in get_columns(). It would be nice to clean this up.
if self.query.select:
fields = [f.field for f in self.query.select]
else:
fields = self.query.get_meta().concrete_fields
fields = fields + [f.field for f in self.query.related_select_cols]
# If the field was deferred, exclude it from being passed
# into `resolve_columns` because it wasn't selected.
only_load = self.deferred_to_columns()
if only_load:
db_table = self.query.get_meta().db_table
fields = [f for f in fields if db_table in only_load and
f.column in only_load[db_table]]
row = self.resolve_columns(row, fields)
if has_aggregate_select:
loaded_fields = self.query.get_loaded_field_names().get(self.query.model, set()) or self.query.select
aggregate_start = len(self.query.extra_select) + len(loaded_fields)
aggregate_end = aggregate_start + len(self.query.aggregate_select)
row = tuple(row[:aggregate_start]) + tuple([
self.query.resolve_aggregate(value, aggregate, self.connection)
for (alias, aggregate), value
in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])
]) + tuple(row[aggregate_end:])
yield row
def execute_sql(self, result_type=MULTI):
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
cursor.execute(sql, params)
if not result_type:
return cursor
if result_type == SINGLE:
if self.ordering_aliases:
return cursor.fetchone()[:-len(self.ordering_aliases)]
return cursor.fetchone()
if self.ordering_aliases:
result = order_modified_iter(cursor, len(self.ordering_aliases),
self.connection.features.empty_fetchmany_value)
else:
result = iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
self.connection.features.empty_fetchmany_value)
if not self.connection.features.can_use_chunked_reads:
return list(result)
return result
def as_subquery_condition(self, alias, columns, qn):
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs = '%s.%s' % (qn(select_col.col[0]), qn2(select_col.col[1]))
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs, rhs), []), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
return val
elif hasattr(field, 'get_placeholder'):
return field.get_placeholder(val, self.connection)
else:
return '%s'
def as_sql(self):
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join([qn(f.column) for f in fields]))
if has_fields:
params = values = [
[
f.get_db_prep_save(getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True), connection=self.connection)
for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behaviour for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple([v for val in values for v in val]))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
cursor = self.connection.cursor()
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'prepare_database_save'):
val = val.prepare_database_save(field)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self.connection)
else:
placeholder = '%s'
if hasattr(val, 'evaluate'):
val = SQLEvaluator(val, self.query, allow_joins=False)
name = field.column
if hasattr(val, 'as_sql'):
sql, params = val.as_sql(qn, self.connection)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.query.where.as_sql(qn=qn, connection=self.connection)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
del cursor
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
self.query.select_related = False
self.query.clear_ordering(True)
super(SQLUpdateCompiler, self).pre_sql_setup()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.query.clone(klass=Query)
query.bump_prefix()
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
# Recheck the count - it is possible that fiddling with the select
# fields above removes tables from the query. Refs #18304.
count = query.count_active_tables()
if not self.query.related_updates and count == 1:
return
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
self.query.add_filter(('pk__in', query))
for alias in self.query.tables[1:]:
self.query.alias_refcount[alias] = 0
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self, qn=None):
if qn is None:
qn = self.quote_name_unless_alias
sql, params = [], []
for aggregate in self.query.aggregate_select.values():
agg_sql, agg_params = aggregate.as_sql(qn, self.connection)
sql.append(agg_sql)
params.extend(agg_params)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
class SQLDateCompiler(SQLCompiler):
def results_iter(self):
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateField
fields = [DateField()]
else:
from django.db.backends.util import typecast_date
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_date(str(date))
if isinstance(date, datetime.datetime):
date = date.date()
yield date
class SQLDateTimeCompiler(SQLCompiler):
def results_iter(self):
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
datetime = row[offset]
if resolve_columns:
datetime = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
datetime = typecast_timestamp(str(datetime))
if settings.USE_TZ:
if datetime is None:
raise ValueError("Database returned an invalid value "
"in QuerySet.dates(). Are time zone "
"definitions and pytz installed?")
datetime = datetime.replace(tzinfo=None)
datetime = timezone.make_aware(datetime, self.query.tzinfo)
yield datetime
def order_modified_iter(cursor, trim, sentinel):
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[:-trim] for r in rows]
| true
| true
|
f707512dbcc6d2e4322e81a713e62fad09555c31
| 283
|
py
|
Python
|
kolibri/tasks/permissions.py
|
rtibbles/kolibri
|
7efdf0497738c793f281013f9913f8ecc1a55f10
|
[
"MIT"
] | null | null | null |
kolibri/tasks/permissions.py
|
rtibbles/kolibri
|
7efdf0497738c793f281013f9913f8ecc1a55f10
|
[
"MIT"
] | 7
|
2016-06-23T16:01:02.000Z
|
2018-12-01T22:15:13.000Z
|
kolibri/tasks/permissions.py
|
MingDai/kolibri
|
e4719b7d41a40e0cc9fc4150bc137017643fea62
|
[
"MIT"
] | 1
|
2021-06-01T23:15:26.000Z
|
2021-06-01T23:15:26.000Z
|
from rest_framework.permissions import BasePermission
class IsDeviceOwnerOnly(BasePermission):
def has_permission(self, request, view):
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
return request.user.is_superuser
| 25.727273
| 56
| 0.766784
|
from rest_framework.permissions import BasePermission
class IsDeviceOwnerOnly(BasePermission):
def has_permission(self, request, view):
return request.user.is_superuser
def has_object_permission(self, request, view, obj):
return request.user.is_superuser
| true
| true
|
f707513df08df3e3ef4db3b743256ac0e7b4dd65
| 13,413
|
py
|
Python
|
lib/datasets/dist_fake.py
|
AlexErfan/Image_manipulation_detection
|
f07008b86112ae7d40a3728c715c53b6054ecc70
|
[
"MIT"
] | null | null | null |
lib/datasets/dist_fake.py
|
AlexErfan/Image_manipulation_detection
|
f07008b86112ae7d40a3728c715c53b6054ecc70
|
[
"MIT"
] | null | null | null |
lib/datasets/dist_fake.py
|
AlexErfan/Image_manipulation_detection
|
f07008b86112ae7d40a3728c715c53b6054ecc70
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Peng Zhou
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from lib.datasets.imdb import imdb
import lib.datasets.ds_utils as ds_utils
import numpy as np
import scipy.sparse
import scipy.io as sio
import lib.utils.cython_bbox
import pickle
import subprocess
import uuid
import pdb
from .voc_eval import voc_eval
from lib.config import config as cfg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class dist_fake(imdb):
def __init__(self, image_set, year, dist_path=None):
imdb.__init__(self, image_set)
self._year = year
self._image_set = image_set.split('dist_')[1]
self._dist_path = self._get_default_path() if dist_path is None \
else dist_path
self._data_path=self._dist_path
self._classes = ('__background__', # always index 0
'tamper','authentic')
self._classes = ('authentic', # always index 0
'tamper')
#self.classes =('authentic', # always index 0
#'splicing','removal')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
for ext in self._image_ext:
#image_path = os.path.join('/home-3/pengzhou@umd.edu/work/xintong/medifor/portrait/test_data',
#index + ext)
image_path = os.path.join(self._data_path,
index + ext)
image_path1=os.path.join('/home-3/pengzhou@umd.edu/work/pengzhou/dataset/NC2016_Test0613',
index + ext)
if os.path.isfile(image_path):
return image_path
elif os.path.isfile(image_path1):
return image_path1
else:
continue
assert os.path.isfile(image_path) and os.path.isfile(image_path1), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path,
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
#print(image_index)
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self.roidb_gt(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def roidb_gt(self,image_id):
num_objs = int(len(image_id.split(' ')[1:])/5)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix in range(num_objs):
bbox = image_id.split(' ')[ix*5+1:ix*5+5]
# Make pixel indexes 0-based
x1 = float(bbox[0])
y1 = float(bbox[1])
x2 = float(bbox[2])
y2 = float(bbox[3])
if x1<0:
x1=0
if y1<0:
y1=0
try:
cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]
except:
if int(image_id.split(' ')[ix*5+5])==0:
print('authentic')
cls=2
else:
cls = int(image_id.split(' ')[ix*5+5])
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 ) * (y2 - y1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'JPGed':False,
'noised':False,
'seg_areas': seg_areas}
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}.txt'
path = os.path.join(
'.',
filename)
return path
def _get_voc_noise_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}_noise.txt'
path = os.path.join(
'.',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
print(filename)
with open(filename, 'w') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
#pdb.set_trace()
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index.split(' ')[0], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
#pdb.set_trace()
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
#if self.config['matlab_eval']:
#self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
#os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.dist_fake import dist_fake
d = dist_fake('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| 35.672872
| 104
| 0.600239
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from lib.datasets.imdb import imdb
import lib.datasets.ds_utils as ds_utils
import numpy as np
import scipy.sparse
import scipy.io as sio
import lib.utils.cython_bbox
import pickle
import subprocess
import uuid
import pdb
from .voc_eval import voc_eval
from lib.config import config as cfg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class dist_fake(imdb):
def __init__(self, image_set, year, dist_path=None):
imdb.__init__(self, image_set)
self._year = year
self._image_set = image_set.split('dist_')[1]
self._dist_path = self._get_default_path() if dist_path is None \
else dist_path
self._data_path=self._dist_path
self._classes = ('__background__',
'tamper','authentic')
self._classes = ('authentic',
'tamper')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}
self._image_index = self._load_image_set_index()
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])
def image_path_from_index(self, index):
for ext in self._image_ext:
image_path = os.path.join(self._data_path,
index + ext)
image_path1=os.path.join('/home-3/pengzhou@umd.edu/work/pengzhou/dataset/NC2016_Test0613',
index + ext)
if os.path.isfile(image_path):
return image_path
elif os.path.isfile(image_path1):
return image_path1
else:
continue
assert os.path.isfile(image_path) and os.path.isfile(image_path1), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
image_set_file = os.path.join(self._data_path,
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')
def gt_roidb(self):
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self.roidb_gt(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def roidb_gt(self,image_id):
num_objs = int(len(image_id.split(' ')[1:])/5)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
for ix in range(num_objs):
bbox = image_id.split(' ')[ix*5+1:ix*5+5]
x1 = float(bbox[0])
y1 = float(bbox[1])
x2 = float(bbox[2])
y2 = float(bbox[3])
if x1<0:
x1=0
if y1<0:
y1=0
try:
cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]
except:
if int(image_id.split(' ')[ix*5+5])==0:
print('authentic')
cls=2
else:
cls = int(image_id.split(' ')[ix*5+5])
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 ) * (y2 - y1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'JPGed':False,
'noised':False,
'seg_areas': seg_areas}
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
seg_areas = np.zeros((num_objs), dtype=np.float32)
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
filename = 'nist_' + self._image_set + '_{:s}.txt'
path = os.path.join(
'.',
filename)
return path
def _get_voc_noise_results_file_template(self):
filename = 'nist_' + self._image_set + '_{:s}_noise.txt'
path = os.path.join(
'.',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
print(filename)
with open(filename, 'w') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index.split(' ')[0], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.dist_fake import dist_fake
d = dist_fake('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| true
| true
|
f707518f375e2f34b650d7b3d43ac6c81aa6942b
| 3,263
|
py
|
Python
|
data/tools/download.py
|
leonnnop/VAR
|
59be66371e9a8a8a3e239cb5bdb1cf7ac8e86cae
|
[
"MIT"
] | 6
|
2022-03-25T20:52:23.000Z
|
2022-03-30T11:38:52.000Z
|
data/tools/download.py
|
leonnnop/VAR
|
59be66371e9a8a8a3e239cb5bdb1cf7ac8e86cae
|
[
"MIT"
] | null | null | null |
data/tools/download.py
|
leonnnop/VAR
|
59be66371e9a8a8a3e239cb5bdb1cf7ac8e86cae
|
[
"MIT"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
# This scripts is copied from
# https://github.com/activitynet/ActivityNet/blob/master/Crawler/Kinetics/download.py # noqa: E501
# The code is licensed under the MIT licence.
import argparse
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
data_file = './data'
output_dir = f'{data_file}/videos'
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
"""Download a video from youtube if exists and is not blocked.
arguments:
---------
video_identifier: str
Unique YouTube video identifier (11 characters)
output_filename: str
File path where the video will be stored.
"""
# Defensive argument checking.
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
# Check if the video was successfully saved.
status = os.path.exists(output_filename)
return status, 'Downloaded'
def download_clip_wrapper(youtube_id, output_dir):
"""Wrapper for parallel processing purposes."""
# we do this to align with names in annotations
output_filename = os.path.join(output_dir, youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple([youtube_id, True, 'Exists'])
return status
downloaded, log = download_clip(youtube_id, output_filename)
status = tuple([youtube_id, downloaded, log])
return status
def main(youtube_ids, output_dir, num_jobs=24):
# Creates folders where videos will be saved later.
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Download all clips.
if num_jobs == 1:
status_list = []
for index in youtube_ids:
status_list.append(download_clip_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_clip_wrapper)(index, output_dir)
for index in youtube_ids)
# Save download report.
mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
f = open(f'{data_file}/tools/var_videos.txt', 'r')
youtube_ids = [s.strip() for s in list(f.readlines())]
main(youtube_ids, output_dir, 24)
| 32.959596
| 99
| 0.644192
|
parse
import os
import ssl
import subprocess
import mmcv
from joblib import Parallel, delayed
ssl._create_default_https_context = ssl._create_unverified_context
data_file = './data'
output_dir = f'{data_file}/videos'
def download_clip(video_identifier,
output_filename,
num_attempts=5,
url_base='https://www.youtube.com/watch?v='):
assert isinstance(video_identifier, str), 'video_identifier must be string'
assert isinstance(output_filename, str), 'output_filename must be string'
assert len(video_identifier) == 11, 'video_identifier must have length 11'
status = False
if not os.path.exists(output_filename):
command = [
'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',
'-f', 'mp4', '-o',
'"%s"' % output_filename,
'"%s"' % (url_base + video_identifier)
]
command = ' '.join(command)
print(command)
attempts = 0
while True:
try:
subprocess.check_output(
command, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
attempts += 1
if attempts == num_attempts:
return status, 'Fail'
else:
break
status = os.path.exists(output_filename)
return status, 'Downloaded'
def download_clip_wrapper(youtube_id, output_dir):
output_filename = os.path.join(output_dir, youtube_id + '.mp4')
if os.path.exists(output_filename):
status = tuple([youtube_id, True, 'Exists'])
return status
downloaded, log = download_clip(youtube_id, output_filename)
status = tuple([youtube_id, downloaded, log])
return status
def main(youtube_ids, output_dir, num_jobs=24):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if num_jobs == 1:
status_list = []
for index in youtube_ids:
status_list.append(download_clip_wrapper(index, output_dir))
else:
status_list = Parallel(n_jobs=num_jobs)(
delayed(download_clip_wrapper)(index, output_dir)
for index in youtube_ids)
mmcv.dump(status_list, 'download_report.json')
if __name__ == '__main__':
f = open(f'{data_file}/tools/var_videos.txt', 'r')
youtube_ids = [s.strip() for s in list(f.readlines())]
main(youtube_ids, output_dir, 24)
| true
| true
|
f70752ebca45f492453c7c567e68c3544ba9c320
| 4,130
|
py
|
Python
|
plugins/modules/oci_container_engine_cluster_migrate_to_native_vcn_status_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_container_engine_cluster_migrate_to_native_vcn_status_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
plugins/modules/oci_container_engine_cluster_migrate_to_native_vcn_status_facts.py
|
sagar2938/oci-ansible-collection
|
5b8ce583a0d5d0aabf14494d61aea4649e18d1e6
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright (c) 2020, 2021 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_container_engine_cluster_migrate_to_native_vcn_status_facts
short_description: Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
- Get details on a cluster's migration to native VCN.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
cluster_id:
description:
- The OCID of the cluster.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific cluster_migrate_to_native_vcn_status
oci_container_engine_cluster_migrate_to_native_vcn_status_facts:
# required
cluster_id: "ocid1.cluster.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
cluster_migrate_to_native_vcn_status:
description:
- ClusterMigrateToNativeVcnStatus resource
returned: on success
type: complex
contains:
time_decommission_scheduled:
description:
- The date and time the non-native VCN is due to be decommissioned.
returned: on success
type: str
sample: "2017-07-21T16:11:29Z"
state:
description:
- The current migration status of the cluster.
returned: on success
type: str
sample: IN_PROGRESS
sample: {
"time_decommission_scheduled": "2017-07-21T16:11:29Z",
"state": "IN_PROGRESS"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.container_engine import ContainerEngineClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ClusterMigrateToNativeVcnStatusFactsHelperGen(OCIResourceFactsHelperBase):
"""Supported operations: get"""
def get_required_params_for_get(self):
return [
"cluster_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_cluster_migrate_to_native_vcn_status,
cluster_id=self.module.params.get("cluster_id"),
)
ClusterMigrateToNativeVcnStatusFactsHelperCustom = get_custom_class(
"ClusterMigrateToNativeVcnStatusFactsHelperCustom"
)
class ResourceFactsHelper(
ClusterMigrateToNativeVcnStatusFactsHelperCustom,
ClusterMigrateToNativeVcnStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(cluster_id=dict(aliases=["id"], type="str", required=True),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="cluster_migrate_to_native_vcn_status",
service_client_class=ContainerEngineClient,
namespace="container_engine",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(cluster_migrate_to_native_vcn_status=result)
if __name__ == "__main__":
main()
| 28.680556
| 114
| 0.715012
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_container_engine_cluster_migrate_to_native_vcn_status_facts
short_description: Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
description:
- Fetches details about a ClusterMigrateToNativeVcnStatus resource in Oracle Cloud Infrastructure
- Get details on a cluster's migration to native VCN.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
cluster_id:
description:
- The OCID of the cluster.
type: str
aliases: ["id"]
required: true
extends_documentation_fragment: [ oracle.oci.oracle ]
"""
EXAMPLES = """
- name: Get a specific cluster_migrate_to_native_vcn_status
oci_container_engine_cluster_migrate_to_native_vcn_status_facts:
# required
cluster_id: "ocid1.cluster.oc1..xxxxxxEXAMPLExxxxxx"
"""
RETURN = """
cluster_migrate_to_native_vcn_status:
description:
- ClusterMigrateToNativeVcnStatus resource
returned: on success
type: complex
contains:
time_decommission_scheduled:
description:
- The date and time the non-native VCN is due to be decommissioned.
returned: on success
type: str
sample: "2017-07-21T16:11:29Z"
state:
description:
- The current migration status of the cluster.
returned: on success
type: str
sample: IN_PROGRESS
sample: {
"time_decommission_scheduled": "2017-07-21T16:11:29Z",
"state": "IN_PROGRESS"
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import oci_common_utils
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceFactsHelperBase,
get_custom_class,
)
try:
from oci.container_engine import ContainerEngineClient
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class ClusterMigrateToNativeVcnStatusFactsHelperGen(OCIResourceFactsHelperBase):
def get_required_params_for_get(self):
return [
"cluster_id",
]
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_cluster_migrate_to_native_vcn_status,
cluster_id=self.module.params.get("cluster_id"),
)
ClusterMigrateToNativeVcnStatusFactsHelperCustom = get_custom_class(
"ClusterMigrateToNativeVcnStatusFactsHelperCustom"
)
class ResourceFactsHelper(
ClusterMigrateToNativeVcnStatusFactsHelperCustom,
ClusterMigrateToNativeVcnStatusFactsHelperGen,
):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec()
module_args.update(
dict(cluster_id=dict(aliases=["id"], type="str", required=True),)
)
module = AnsibleModule(argument_spec=module_args)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_facts_helper = ResourceFactsHelper(
module=module,
resource_type="cluster_migrate_to_native_vcn_status",
service_client_class=ContainerEngineClient,
namespace="container_engine",
)
result = []
if resource_facts_helper.is_get():
result = resource_facts_helper.get()
else:
resource_facts_helper.fail()
module.exit_json(cluster_migrate_to_native_vcn_status=result)
if __name__ == "__main__":
main()
| true
| true
|
f707533305b80bf36cdd6ae41ab2b2aee1f24d73
| 216
|
py
|
Python
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | 1
|
2019-06-09T00:04:56.000Z
|
2019-06-09T00:04:56.000Z
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | 19
|
2019-06-09T14:45:52.000Z
|
2019-06-17T18:52:53.000Z
|
Domains/Python/04 - Sets/Introduction to Sets/solution.py
|
abhinavgunwant/hackerrank-solutions
|
e016366cb6a9fac562a754d2b230fef907080733
|
[
"MIT"
] | null | null | null |
def average(array):
array = list(set(array))
return sum(array)/len(array)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
| 24
| 41
| 0.606481
|
def average(array):
array = list(set(array))
return sum(array)/len(array)
if __name__ == '__main__':
n = int(input())
arr = list(map(int, input().split()))
result = average(arr)
print(result)
| true
| true
|
f70753e6a5e249323b5a318751b32afa0841e720
| 2,588
|
py
|
Python
|
app.py
|
irvingpop/mode_wle_signer
|
8d0789620d847155388f3713c2eaf4a39b6d35a5
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
irvingpop/mode_wle_signer
|
8d0789620d847155388f3713c2eaf4a39b6d35a5
|
[
"Apache-2.0"
] | null | null | null |
app.py
|
irvingpop/mode_wle_signer
|
8d0789620d847155388f3713c2eaf4a39b6d35a5
|
[
"Apache-2.0"
] | null | null | null |
#!env python3
from flask import Flask, request, redirect
from hashlib import sha256
import hmac
import base64
import time
import urllib
# allow for relative importing if run directly
if __name__ == "__main__":
from config import secrets, reports, listen_port
else:
from .config import secrets, reports, listen_port
app = Flask(__name__)
@app.route('/report/<report>')
def sign_report_url(report):
# check for a valid token
provided_token = request.args.get('token') or 'missing'
if provided_token != secrets.get('access_token'):
return "Missing or incorrect token provided"
# lookup report and generate URL from values
if report in reports:
this_report = reports.get(report)
# Generating the embed URL
mode_report_id = this_report.get('mode_report')
param_name = this_report.get('param_name')
param_value = request.args.get(
'account_id') or this_report.get('param_default_value')
do_iframe = request.args.get('iframe') or False
timestamp = str(int(time.time())) # current time in unix time
url = make_url('https://app.mode.com', secrets.get('mode_team'), 'reports',
mode_report_id, 'embed', access_key=secrets.get('mode_access_key'),
max_age=3600, **{param_name: param_value}, run='now', timestamp=timestamp)
else:
return f"Missing report {report}"
request_type = 'GET'
content_type = ''
# the MD5 digest of an empty content body, always the same, :shrug:
content_digest = '1B2M2Y8AsgTpgAmY7PhCfg=='
# signature fodder
request_string = ','.join(
[request_type, content_type, str(content_digest), url, timestamp])
signature = hmac.new(bytes(secrets.get('mode_access_secret'), 'utf-8'),
bytes(request_string, 'utf-8'), digestmod=sha256).hexdigest()
signed_url = '%s&signature=%s' % (url, signature)
if do_iframe is not False:
# return the signed URL as an iframe
return f"""
<iframe src='{signed_url}' width='100%' height='100%' frameborder='0' </iframe>
"""
else:
# return the signed URL as a redirect
return redirect(signed_url, code=302)
def make_url(base_url, *res, **params):
url = base_url
for r in res:
url = '{}/{}'.format(url, r)
if params:
url = '{}?{}'.format(url, urllib.parse.urlencode(params))
return url
@app.route('/status')
def status():
return 'Success'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=listen_port)
| 31.950617
| 97
| 0.645672
|
from flask import Flask, request, redirect
from hashlib import sha256
import hmac
import base64
import time
import urllib
if __name__ == "__main__":
from config import secrets, reports, listen_port
else:
from .config import secrets, reports, listen_port
app = Flask(__name__)
@app.route('/report/<report>')
def sign_report_url(report):
provided_token = request.args.get('token') or 'missing'
if provided_token != secrets.get('access_token'):
return "Missing or incorrect token provided"
if report in reports:
this_report = reports.get(report)
mode_report_id = this_report.get('mode_report')
param_name = this_report.get('param_name')
param_value = request.args.get(
'account_id') or this_report.get('param_default_value')
do_iframe = request.args.get('iframe') or False
timestamp = str(int(time.time()))
url = make_url('https://app.mode.com', secrets.get('mode_team'), 'reports',
mode_report_id, 'embed', access_key=secrets.get('mode_access_key'),
max_age=3600, **{param_name: param_value}, run='now', timestamp=timestamp)
else:
return f"Missing report {report}"
request_type = 'GET'
content_type = ''
content_digest = '1B2M2Y8AsgTpgAmY7PhCfg=='
request_string = ','.join(
[request_type, content_type, str(content_digest), url, timestamp])
signature = hmac.new(bytes(secrets.get('mode_access_secret'), 'utf-8'),
bytes(request_string, 'utf-8'), digestmod=sha256).hexdigest()
signed_url = '%s&signature=%s' % (url, signature)
if do_iframe is not False:
return f"""
<iframe src='{signed_url}' width='100%' height='100%' frameborder='0' </iframe>
"""
else:
return redirect(signed_url, code=302)
def make_url(base_url, *res, **params):
url = base_url
for r in res:
url = '{}/{}'.format(url, r)
if params:
url = '{}?{}'.format(url, urllib.parse.urlencode(params))
return url
@app.route('/status')
def status():
return 'Success'
if __name__ == "__main__":
app.run(host='0.0.0.0', port=listen_port)
| true
| true
|
f7075484bbedf4f8ffddb00ba857491fd45e88a6
| 52,253
|
py
|
Python
|
pysnmp/CISCO-DYNAMIC-TEMPLATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-DYNAMIC-TEMPLATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-DYNAMIC-TEMPLATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-DYNAMIC-TEMPLATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-DYNAMIC-TEMPLATE-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:39:09 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
CbpElementName, = mibBuilder.importSymbols("CISCO-CBP-TC-MIB", "CbpElementName")
DynamicTemplateName, DynamicTemplateType, DynamicTemplateTargetId, DynamicTemplateTargetType = mibBuilder.importSymbols("CISCO-DYNAMIC-TEMPLATE-TC-MIB", "DynamicTemplateName", "DynamicTemplateType", "DynamicTemplateTargetId", "DynamicTemplateTargetType")
UnicastRpfOptions, UnicastRpfType = mibBuilder.importSymbols("CISCO-IP-URPF-MIB", "UnicastRpfOptions", "UnicastRpfType")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoVrfName, = mibBuilder.importSymbols("CISCO-TC", "CiscoVrfName")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
InetAddressIPv4, InetAddressIPv6, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv4", "InetAddressIPv6", "InetAddressPrefixLength")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
MibIdentifier, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity, Counter64, ObjectIdentity, TimeTicks, Bits, Counter32, Integer32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity", "Counter64", "ObjectIdentity", "TimeTicks", "Bits", "Counter32", "Integer32", "IpAddress", "iso")
RowStatus, TruthValue, TextualConvention, StorageType, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "TextualConvention", "StorageType", "MacAddress", "DisplayString")
ciscoDynamicTemplateMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 784))
ciscoDynamicTemplateMIB.setRevisions(('2007-09-06 00:00',))
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setLastUpdated('200709060000Z')
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setOrganization('Cisco Systems, Inc.')
ciscoDynamicTemplateMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 0))
ciscoDynamicTemplateMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1))
ciscoDynamicTemplateMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2))
cdtBase = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1))
cdtCommonIf = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2))
cdtPpp = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3))
cdtEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4))
cdtIpSubscriber = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 5))
cdtService = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6))
cdtSubscriberGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 7))
cdtTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1), )
if mibBuilder.loadTexts: cdtTemplateTable.setStatus('current')
cdtTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateEntry.setStatus('current')
cdtTemplateName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateName.setStatus('current')
cdtTemplateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStatus.setStatus('current')
cdtTemplateStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStorage.setStatus('current')
cdtTemplateType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 4), DynamicTemplateType().clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateType.setStatus('current')
cdtTemplateSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("derived", 2), ("local", 3), ("aaaUserProfile", 4), ("aaaServiceProfile", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateSrc.setStatus('current')
cdtTemplateUsageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageCount.setStatus('current')
cdtTemplateTargetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2), )
if mibBuilder.loadTexts: cdtTemplateTargetTable.setStatus('current')
cdtTemplateTargetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"))
if mibBuilder.loadTexts: cdtTemplateTargetEntry.setStatus('current')
cdtTemplateTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 1), DynamicTemplateTargetType())
if mibBuilder.loadTexts: cdtTemplateTargetType.setStatus('current')
cdtTemplateTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 2), DynamicTemplateTargetId())
if mibBuilder.loadTexts: cdtTemplateTargetId.setStatus('current')
cdtTemplateTargetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStatus.setStatus('current')
cdtTemplateTargetStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 4), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStorage.setStatus('current')
cdtTemplateAssociationTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3), )
if mibBuilder.loadTexts: cdtTemplateAssociationTable.setStatus('current')
cdtTemplateAssociationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationName"))
if mibBuilder.loadTexts: cdtTemplateAssociationEntry.setStatus('current')
cdtTemplateAssociationName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationName.setStatus('current')
cdtTemplateAssociationPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationPrecedence.setStatus('current')
cdtTemplateUsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4), )
if mibBuilder.loadTexts: cdtTemplateUsageTable.setStatus('current')
cdtTemplateUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if mibBuilder.loadTexts: cdtTemplateUsageEntry.setStatus('current')
cdtTemplateUsageTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 1), DynamicTemplateTargetType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetType.setStatus('current')
cdtTemplateUsageTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 2), DynamicTemplateTargetId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetId.setStatus('current')
cdtTemplateCommonTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5), )
if mibBuilder.loadTexts: cdtTemplateCommonTable.setStatus('current')
cdtTemplateCommonEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateCommonEntry.setStatus('current')
cdtCommonValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 1), Bits().clone(namedValues=NamedValues(("descr", 0), ("keepalive", 1), ("vrf", 2), ("addrPool", 3), ("ipv4AccessGroup", 4), ("ipv4Unreachables", 5), ("ipv6AccessGroup", 6), ("ipv6Unreachables", 7), ("srvSubControl", 8), ("srvRedirect", 9), ("srvAcct", 10), ("srvQos", 11), ("srvNetflow", 12)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonValid.setStatus('current')
cdtCommonDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonDescr.setStatus('current')
cdtCommonKeepaliveInt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveInt.setStatus('current')
cdtCommonKeepaliveRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(5)).setUnits('retries').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveRetries.setStatus('current')
cdtCommonVrf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 5), CiscoVrfName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonVrf.setStatus('current')
cdtCommonAddrPool = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonAddrPool.setStatus('current')
cdtCommonIpv4AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4AccessGroup.setStatus('current')
cdtCommonIpv4Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 8), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4Unreachables.setStatus('current')
cdtCommonIpv6AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6AccessGroup.setStatus('current')
cdtCommonIpv6Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 10), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6Unreachables.setStatus('current')
cdtCommonSrvSubControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 11), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvSubControl.setStatus('current')
cdtCommonSrvRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 12), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvRedirect.setStatus('current')
cdtCommonSrvAcct = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 13), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvAcct.setStatus('current')
cdtCommonSrvQos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 14), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvQos.setStatus('current')
cdtCommonSrvNetflow = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 15), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvNetflow.setStatus('current')
cdtIfTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1), )
if mibBuilder.loadTexts: cdtIfTemplateTable.setStatus('current')
cdtIfTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtIfTemplateEntry.setStatus('current')
cdtIfValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 1), Bits().clone(namedValues=NamedValues(("mtu", 0), ("cdpEnable", 1), ("flowMonitor", 2), ("ipv4Unnumbered", 3), ("ipv4SubEnable", 4), ("ipv4Mtu", 5), ("ipv4TcpMssAdjust", 6), ("ipv4VerifyUniRpf", 7), ("ipv4VerifyUniRpfAcl", 8), ("ipv4VerifyUniRpfOpts", 9), ("ipv6Enable", 10), ("ipv6SubEnable", 11), ("ipv6TcpMssAdjust", 12), ("ipv6VerifyUniRpf", 13), ("ipv6VerifyUniRpfAcl", 14), ("ipv6VerifyUniRpfOpts", 15), ("ipv6NdPrefix", 16), ("ipv6NdValidLife", 17), ("ipv6NdPreferredLife", 18), ("ipv6NdOpts", 19), ("ipv6NdDadAttempts", 20), ("ipv6NdNsInterval", 21), ("ipv6NdReachableTime", 22), ("ipv6NdRaIntervalMax", 23), ("ipv6NdRaIntervalMin", 24), ("ipv6NdRaLife", 25), ("ipv6NdRaRouterPreference", 26)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfValid.setStatus('current')
cdtIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(64, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfMtu.setStatus('current')
cdtIfCdpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfCdpEnable.setStatus('current')
cdtIfFlowMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfFlowMonitor.setStatus('current')
cdtIfIpv4Unnumbered = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 5), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Unnumbered.setStatus('current')
cdtIfIpv4SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4SubEnable.setStatus('current')
cdtIfIpv4Mtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(128, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Mtu.setStatus('current')
cdtIfIpv4TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4TcpMssAdjust.setStatus('current')
cdtIfIpv4VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 9), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpf.setStatus('current')
cdtIfIpv4VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfAcl.setStatus('current')
cdtIfIpv4VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 11), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6Enable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6Enable.setStatus('current')
cdtIfIpv6SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 13), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6SubEnable.setStatus('current')
cdtIfIpv6TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 14), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6TcpMssAdjust.setStatus('current')
cdtIfIpv6VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 15), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpf.setStatus('current')
cdtIfIpv6VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 16), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfAcl.setStatus('current')
cdtIfIpv6VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 17), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6NdPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 18), InetAddressIPv6().clone(hexValue="00000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefix.setStatus('current')
cdtIfIpv6NdPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 19), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefixLength.setStatus('current')
cdtIfIpv6NdValidLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2592000)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdValidLife.setStatus('current')
cdtIfIpv6NdPreferredLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(604800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPreferredLife.setStatus('current')
cdtIfIpv6NdOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 22), Bits().clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3), ("advertisementInterval", 4), ("managedConfigFlag", 5), ("otherConfigFlag", 6), ("framedIpv6Prefix", 7), ("raSuppress", 8))).clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdOpts.setStatus('current')
cdtIfIpv6NdDadAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 600)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdDadAttempts.setStatus('current')
cdtIfIpv6NdNsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 3600000)).clone(1000)).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdNsInterval.setStatus('current')
cdtIfIpv6NdReachableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdReachableTime.setStatus('current')
cdtIfIpv6NdRaIntervalUnits = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("seconds", 1), ("milliseconds", 2))).clone('seconds')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalUnits.setStatus('current')
cdtIfIpv6NdRaIntervalMax = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMax.setStatus('current')
cdtIfIpv6NdRaIntervalMin = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMin.setStatus('current')
cdtIfIpv6NdRaLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 29), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(1800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaLife.setStatus('current')
cdtIfIpv6NdRouterPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("high", 1), ("medium", 2), ("low", 3))).clone('medium')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRouterPreference.setStatus('current')
cdtPppTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1), )
if mibBuilder.loadTexts: cdtPppTemplateTable.setStatus('current')
cdtPppTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtPppTemplateEntry.setStatus('current')
cdtPppValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 1), Bits().clone(namedValues=NamedValues(("valid", 0), ("accounting", 1), ("authentication", 2), ("autthenticationMethods", 3), ("authorization", 4), ("loopbackIgnore", 5), ("maxBadAuth", 6), ("maxConfigure", 7), ("maxFailure", 8), ("maxTerminate", 9), ("timeoutAuthentication", 10), ("timeoutRetry", 11), ("chapOpts", 12), ("chapHostname", 13), ("chapPassword", 14), ("msChapV1Opts", 15), ("msChapV1Hostname", 16), ("msChapV1Password", 17), ("msChapV2Opts", 18), ("msChapV2Hostname", 19), ("msChapV2Password", 20), ("papOpts", 21), ("papUsername", 22), ("papPassword", 23), ("eapOpts", 24), ("eapIdentity", 25), ("eapPassword", 26), ("ipcpAddrOption", 27), ("ipcpDnsOption", 28), ("ipcpDnsPrimary", 29), ("ipcpDnsSecondary", 30), ("ipcpWinsOption", 31), ("ipcpWinsPrimary", 32), ("ipcpWinsSecondary", 33), ("ipcpMaskOption", 34), ("ipcpMask", 35), ("peerDefIpAddrOpts", 36), ("peerDefIpAddrSrc", 37), ("peerDefIpAddr", 38)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppValid.setStatus('current')
cdtPppAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAccounting.setStatus('current')
cdtPppAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 3), Bits().clone(namedValues=NamedValues(("chap", 0), ("msChap", 1), ("msChapV2", 2), ("pap", 3), ("eap", 4), ("optional", 5), ("callin", 6), ("oneTime", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthentication.setStatus('current')
cdtPppAuthenticationMethods = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthenticationMethods.setStatus('current')
cdtPppAuthorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 5), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthorization.setStatus('current')
cdtPppLoopbackIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppLoopbackIgnore.setStatus('current')
cdtPppMaxBadAuth = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxBadAuth.setStatus('current')
cdtPppMaxConfigure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxConfigure.setStatus('current')
cdtPppMaxFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(5)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxFailure.setStatus('current')
cdtPppMaxTerminate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxTerminate.setStatus('current')
cdtPppTimeoutAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutAuthentication.setStatus('current')
cdtPppTimeoutRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(3)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutRetry.setStatus('current')
cdtPppChapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 13), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapOpts.setStatus('current')
cdtPppChapHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 14), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapHostname.setStatus('current')
cdtPppChapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 15), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapPassword.setStatus('current')
cdtPppMsChapV1Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 16), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Opts.setStatus('current')
cdtPppMsChapV1Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 17), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Hostname.setStatus('current')
cdtPppMsChapV1Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 18), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Password.setStatus('current')
cdtPppMsChapV2Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 19), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Opts.setStatus('current')
cdtPppMsChapV2Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 20), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Hostname.setStatus('current')
cdtPppMsChapV2Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 21), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Password.setStatus('current')
cdtPppPapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 22), Bits().clone(namedValues=NamedValues(("refuse", 0), ("encrypted", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapOpts.setStatus('current')
cdtPppPapUsername = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 23), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapUsername.setStatus('current')
cdtPppPapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 24), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapPassword.setStatus('current')
cdtPppEapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 25), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("local", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapOpts.setStatus('current')
cdtPppEapIdentity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 26), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapIdentity.setStatus('current')
cdtPppEapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 27), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapPassword.setStatus('current')
cdtPppIpcpAddrOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("required", 3), ("unique", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpAddrOption.setStatus('current')
cdtPppIpcpDnsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsOption.setStatus('current')
cdtPppIpcpDnsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 30), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsPrimary.setStatus('current')
cdtPppIpcpDnsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 31), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsSecondary.setStatus('current')
cdtPppIpcpWinsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsOption.setStatus('current')
cdtPppIpcpWinsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 33), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsPrimary.setStatus('current')
cdtPppIpcpWinsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 34), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsSecondary.setStatus('current')
cdtPppIpcpMaskOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("request", 2), ("reject", 3))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMaskOption.setStatus('current')
cdtPppIpcpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 36), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMask.setStatus('current')
cdtPppPeerDefIpAddrOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 37), Bits().clone(namedValues=NamedValues(("ipAddrForced", 0), ("matchAaaPools", 1), ("staticPool", 2))).clone(namedValues=NamedValues(("ipAddrForced", 0)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrOpts.setStatus('current')
cdtPppPeerDefIpAddrSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("static", 1), ("pool", 2), ("dhcp", 3))).clone('pool')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrSrc.setStatus('current')
cdtPppPeerDefIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 39), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddr.setStatus('current')
cdtPppPeerIpAddrPoolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2), )
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolTable.setStatus('current')
cdtPppPeerIpAddrPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolPriority"))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolEntry.setStatus('current')
cdtPppPeerIpAddrPoolPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolPriority.setStatus('current')
cdtPppPeerIpAddrPoolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStatus.setStatus('current')
cdtPppPeerIpAddrPoolStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStorage.setStatus('current')
cdtPppPeerIpAddrPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolName.setStatus('current')
cdtEthernetTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1), )
if mibBuilder.loadTexts: cdtEthernetTemplateTable.setStatus('current')
cdtEthernetTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtEthernetTemplateEntry.setStatus('current')
cdtEthernetValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 1), Bits().clone(namedValues=NamedValues(("bridgeDomain", 0), ("pppoeEnable", 1), ("ipv4PointToPoint", 2), ("macAddr", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetValid.setStatus('current')
cdtEthernetBridgeDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetBridgeDomain.setStatus('current')
cdtEthernetPppoeEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetPppoeEnable.setStatus('current')
cdtEthernetIpv4PointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetIpv4PointToPoint.setStatus('current')
cdtEthernetMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 5), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetMacAddr.setStatus('current')
cdtSrvTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1), )
if mibBuilder.loadTexts: cdtSrvTemplateTable.setStatus('current')
cdtSrvTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtSrvTemplateEntry.setStatus('current')
cdtSrvValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 1), Bits().clone(namedValues=NamedValues(("networkSrv", 0), ("vpdnGroup", 1), ("sgSrvGroup", 2), ("sgSrvType", 3), ("multicast", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvValid.setStatus('current')
cdtSrvNetworkSrv = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("local", 3), ("vpdn", 4))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvNetworkSrv.setStatus('current')
cdtSrvVpdnGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvVpdnGroup.setStatus('current')
cdtSrvSgSrvGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvGroup.setStatus('current')
cdtSrvSgSrvType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2))).clone('secondary')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvType.setStatus('current')
cdtSrvMulticast = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvMulticast.setStatus('current')
ciscoDynamicTemplateMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1))
ciscoDynamicTemplateMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2))
ciscoDynamicTemplateR1Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtBaseGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDynamicTemplateR1Compliance = ciscoDynamicTemplateR1Compliance.setStatus('current')
cdtBaseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageCount"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationPrecedence"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtBaseGroup = cdtBaseGroup.setStatus('current')
cdtCommonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 2)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonDescr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveInt"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveRetries"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonVrf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonAddrPool"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvSubControl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvRedirect"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvAcct"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvQos"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvNetflow"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtCommonGroup = cdtCommonGroup.setStatus('current')
cdtIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 3)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfMtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfCdpEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfFlowMonitor"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Unnumbered"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Mtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6Enable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefix"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefixLength"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdValidLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPreferredLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdDadAttempts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdNsInterval"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdReachableTime"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalUnits"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMax"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMin"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRouterPreference"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtIfGroup = cdtIfGroup.setStatus('current')
cdtPppGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 4)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAccounting"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthenticationMethods"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthorization"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppLoopbackIgnore"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxBadAuth"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxConfigure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxFailure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxTerminate"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutRetry"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapHostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapUsername"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapIdentity"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpAddrOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMaskOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMask"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtPppGroup = cdtPppGroup.setStatus('current')
cdtEthernetGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 5)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetBridgeDomain"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetPppoeEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetIpv4PointToPoint"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetMacAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtEthernetGroup = cdtEthernetGroup.setStatus('current')
cdtSrvGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 6)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvNetworkSrv"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvVpdnGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvMulticast"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtSrvGroup = cdtSrvGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-DYNAMIC-TEMPLATE-MIB", cdtTemplateUsageEntry=cdtTemplateUsageEntry, cdtPppMaxTerminate=cdtPppMaxTerminate, cdtTemplateTargetId=cdtTemplateTargetId, cdtPppMsChapV1Hostname=cdtPppMsChapV1Hostname, cdtService=cdtService, cdtPppGroup=cdtPppGroup, cdtIfIpv6NdRouterPreference=cdtIfIpv6NdRouterPreference, cdtSrvSgSrvGroup=cdtSrvSgSrvGroup, cdtTemplateTargetEntry=cdtTemplateTargetEntry, cdtTemplateUsageTargetId=cdtTemplateUsageTargetId, ciscoDynamicTemplateMIBObjects=ciscoDynamicTemplateMIBObjects, cdtPppMaxBadAuth=cdtPppMaxBadAuth, cdtPppIpcpDnsPrimary=cdtPppIpcpDnsPrimary, cdtIfIpv6NdValidLife=cdtIfIpv6NdValidLife, cdtPppPeerIpAddrPoolStorage=cdtPppPeerIpAddrPoolStorage, cdtBaseGroup=cdtBaseGroup, cdtSrvTemplateTable=cdtSrvTemplateTable, cdtTemplateStatus=cdtTemplateStatus, cdtIfIpv4VerifyUniRpfAcl=cdtIfIpv4VerifyUniRpfAcl, cdtPppEapOpts=cdtPppEapOpts, cdtEthernetTemplateEntry=cdtEthernetTemplateEntry, cdtEthernetMacAddr=cdtEthernetMacAddr, cdtCommonKeepaliveInt=cdtCommonKeepaliveInt, cdtPppChapHostname=cdtPppChapHostname, cdtPppMsChapV2Password=cdtPppMsChapV2Password, cdtPppEapPassword=cdtPppEapPassword, cdtCommonIpv4Unreachables=cdtCommonIpv4Unreachables, cdtCommonIf=cdtCommonIf, cdtTemplateStorage=cdtTemplateStorage, cdtCommonKeepaliveRetries=cdtCommonKeepaliveRetries, cdtIfIpv6NdRaLife=cdtIfIpv6NdRaLife, cdtCommonSrvRedirect=cdtCommonSrvRedirect, cdtEthernetTemplateTable=cdtEthernetTemplateTable, cdtTemplateSrc=cdtTemplateSrc, cdtPppIpcpDnsSecondary=cdtPppIpcpDnsSecondary, cdtPppPeerDefIpAddr=cdtPppPeerDefIpAddr, cdtIfIpv6VerifyUniRpfAcl=cdtIfIpv6VerifyUniRpfAcl, cdtIfIpv6NdOpts=cdtIfIpv6NdOpts, cdtSrvMulticast=cdtSrvMulticast, cdtPppPeerIpAddrPoolTable=cdtPppPeerIpAddrPoolTable, ciscoDynamicTemplateR1Compliance=ciscoDynamicTemplateR1Compliance, cdtPppIpcpDnsOption=cdtPppIpcpDnsOption, cdtPppLoopbackIgnore=cdtPppLoopbackIgnore, cdtPppTimeoutAuthentication=cdtPppTimeoutAuthentication, cdtTemplateCommonTable=cdtTemplateCommonTable, cdtIfIpv6Enable=cdtIfIpv6Enable, PYSNMP_MODULE_ID=ciscoDynamicTemplateMIB, cdtTemplateEntry=cdtTemplateEntry, cdtPppMsChapV2Opts=cdtPppMsChapV2Opts, cdtIfIpv6TcpMssAdjust=cdtIfIpv6TcpMssAdjust, cdtEthernetIpv4PointToPoint=cdtEthernetIpv4PointToPoint, cdtCommonSrvNetflow=cdtCommonSrvNetflow, cdtTemplateAssociationPrecedence=cdtTemplateAssociationPrecedence, cdtIfIpv6NdDadAttempts=cdtIfIpv6NdDadAttempts, cdtIfTemplateEntry=cdtIfTemplateEntry, cdtIfIpv6VerifyUniRpf=cdtIfIpv6VerifyUniRpf, cdtIpSubscriber=cdtIpSubscriber, ciscoDynamicTemplateMIBGroups=ciscoDynamicTemplateMIBGroups, cdtPppPeerIpAddrPoolName=cdtPppPeerIpAddrPoolName, cdtIfFlowMonitor=cdtIfFlowMonitor, cdtTemplateUsageCount=cdtTemplateUsageCount, cdtPppEapIdentity=cdtPppEapIdentity, ciscoDynamicTemplateMIBNotifs=ciscoDynamicTemplateMIBNotifs, cdtCommonIpv4AccessGroup=cdtCommonIpv4AccessGroup, cdtSrvTemplateEntry=cdtSrvTemplateEntry, cdtPppTimeoutRetry=cdtPppTimeoutRetry, cdtCommonSrvAcct=cdtCommonSrvAcct, cdtIfIpv6NdPrefixLength=cdtIfIpv6NdPrefixLength, cdtPppTemplateTable=cdtPppTemplateTable, cdtPppAuthorization=cdtPppAuthorization, cdtPppIpcpAddrOption=cdtPppIpcpAddrOption, cdtPppMaxFailure=cdtPppMaxFailure, cdtPppValid=cdtPppValid, cdtTemplateTargetStorage=cdtTemplateTargetStorage, ciscoDynamicTemplateMIBConform=ciscoDynamicTemplateMIBConform, cdtTemplateAssociationTable=cdtTemplateAssociationTable, cdtIfIpv6NdReachableTime=cdtIfIpv6NdReachableTime, cdtIfGroup=cdtIfGroup, cdtSrvValid=cdtSrvValid, cdtPpp=cdtPpp, cdtPppTemplateEntry=cdtPppTemplateEntry, cdtSrvGroup=cdtSrvGroup, cdtIfIpv4Mtu=cdtIfIpv4Mtu, cdtCommonDescr=cdtCommonDescr, cdtTemplateUsageTable=cdtTemplateUsageTable, cdtIfIpv4TcpMssAdjust=cdtIfIpv4TcpMssAdjust, cdtIfIpv6VerifyUniRpfOpts=cdtIfIpv6VerifyUniRpfOpts, cdtSrvNetworkSrv=cdtSrvNetworkSrv, cdtPppAuthenticationMethods=cdtPppAuthenticationMethods, cdtPppChapOpts=cdtPppChapOpts, cdtCommonValid=cdtCommonValid, cdtCommonSrvQos=cdtCommonSrvQos, cdtIfIpv6NdRaIntervalMin=cdtIfIpv6NdRaIntervalMin, cdtEthernetGroup=cdtEthernetGroup, cdtTemplateTargetType=cdtTemplateTargetType, cdtTemplateName=cdtTemplateName, cdtCommonIpv6AccessGroup=cdtCommonIpv6AccessGroup, cdtPppMaxConfigure=cdtPppMaxConfigure, cdtIfIpv4VerifyUniRpf=cdtIfIpv4VerifyUniRpf, cdtPppIpcpMask=cdtPppIpcpMask, cdtIfIpv6SubEnable=cdtIfIpv6SubEnable, cdtIfIpv6NdPrefix=cdtIfIpv6NdPrefix, cdtIfValid=cdtIfValid, ciscoDynamicTemplateMIB=ciscoDynamicTemplateMIB, cdtEthernetBridgeDomain=cdtEthernetBridgeDomain, cdtPppIpcpWinsSecondary=cdtPppIpcpWinsSecondary, cdtCommonAddrPool=cdtCommonAddrPool, cdtPppMsChapV2Hostname=cdtPppMsChapV2Hostname, cdtIfIpv4SubEnable=cdtIfIpv4SubEnable, cdtPppMsChapV1Password=cdtPppMsChapV1Password, cdtTemplateAssociationName=cdtTemplateAssociationName, ciscoDynamicTemplateMIBCompliances=ciscoDynamicTemplateMIBCompliances, cdtTemplateCommonEntry=cdtTemplateCommonEntry, cdtPppPapOpts=cdtPppPapOpts, cdtPppMsChapV1Opts=cdtPppMsChapV1Opts, cdtTemplateType=cdtTemplateType, cdtIfTemplateTable=cdtIfTemplateTable, cdtPppIpcpMaskOption=cdtPppIpcpMaskOption, cdtSrvSgSrvType=cdtSrvSgSrvType, cdtPppPapUsername=cdtPppPapUsername, cdtBase=cdtBase, cdtIfIpv6NdRaIntervalUnits=cdtIfIpv6NdRaIntervalUnits, cdtTemplateTargetTable=cdtTemplateTargetTable, cdtTemplateTargetStatus=cdtTemplateTargetStatus, cdtPppPapPassword=cdtPppPapPassword, cdtPppAccounting=cdtPppAccounting, cdtIfIpv4Unnumbered=cdtIfIpv4Unnumbered, cdtCommonIpv6Unreachables=cdtCommonIpv6Unreachables, cdtPppChapPassword=cdtPppChapPassword, cdtSrvVpdnGroup=cdtSrvVpdnGroup, cdtSubscriberGroup=cdtSubscriberGroup, cdtTemplateAssociationEntry=cdtTemplateAssociationEntry, cdtPppPeerDefIpAddrOpts=cdtPppPeerDefIpAddrOpts, cdtEthernetValid=cdtEthernetValid, cdtIfCdpEnable=cdtIfCdpEnable, cdtIfIpv6NdRaIntervalMax=cdtIfIpv6NdRaIntervalMax, cdtPppIpcpWinsOption=cdtPppIpcpWinsOption, cdtPppPeerIpAddrPoolEntry=cdtPppPeerIpAddrPoolEntry, cdtEthernet=cdtEthernet, cdtPppPeerIpAddrPoolStatus=cdtPppPeerIpAddrPoolStatus, cdtCommonSrvSubControl=cdtCommonSrvSubControl, cdtIfMtu=cdtIfMtu, cdtPppPeerIpAddrPoolPriority=cdtPppPeerIpAddrPoolPriority, cdtPppAuthentication=cdtPppAuthentication, cdtCommonGroup=cdtCommonGroup, cdtTemplateUsageTargetType=cdtTemplateUsageTargetType, cdtEthernetPppoeEnable=cdtEthernetPppoeEnable, cdtIfIpv6NdPreferredLife=cdtIfIpv6NdPreferredLife, cdtPppPeerDefIpAddrSrc=cdtPppPeerDefIpAddrSrc, cdtCommonVrf=cdtCommonVrf, cdtTemplateTable=cdtTemplateTable, cdtIfIpv6NdNsInterval=cdtIfIpv6NdNsInterval, cdtIfIpv4VerifyUniRpfOpts=cdtIfIpv4VerifyUniRpfOpts, cdtPppIpcpWinsPrimary=cdtPppIpcpWinsPrimary)
| 159.795107
| 6,565
| 0.756952
|
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint")
CbpElementName, = mibBuilder.importSymbols("CISCO-CBP-TC-MIB", "CbpElementName")
DynamicTemplateName, DynamicTemplateType, DynamicTemplateTargetId, DynamicTemplateTargetType = mibBuilder.importSymbols("CISCO-DYNAMIC-TEMPLATE-TC-MIB", "DynamicTemplateName", "DynamicTemplateType", "DynamicTemplateTargetId", "DynamicTemplateTargetType")
UnicastRpfOptions, UnicastRpfType = mibBuilder.importSymbols("CISCO-IP-URPF-MIB", "UnicastRpfOptions", "UnicastRpfType")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
CiscoVrfName, = mibBuilder.importSymbols("CISCO-TC", "CiscoVrfName")
InterfaceIndexOrZero, = mibBuilder.importSymbols("IF-MIB", "InterfaceIndexOrZero")
InetAddressIPv4, InetAddressIPv6, InetAddressPrefixLength = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressIPv4", "InetAddressIPv6", "InetAddressPrefixLength")
SnmpAdminString, = mibBuilder.importSymbols("SNMP-FRAMEWORK-MIB", "SnmpAdminString")
ModuleCompliance, NotificationGroup, ObjectGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup", "ObjectGroup")
MibIdentifier, Unsigned32, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, NotificationType, ModuleIdentity, Counter64, ObjectIdentity, TimeTicks, Bits, Counter32, Integer32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Unsigned32", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "NotificationType", "ModuleIdentity", "Counter64", "ObjectIdentity", "TimeTicks", "Bits", "Counter32", "Integer32", "IpAddress", "iso")
RowStatus, TruthValue, TextualConvention, StorageType, MacAddress, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "TextualConvention", "StorageType", "MacAddress", "DisplayString")
ciscoDynamicTemplateMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 784))
ciscoDynamicTemplateMIB.setRevisions(('2007-09-06 00:00',))
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setLastUpdated('200709060000Z')
if mibBuilder.loadTexts: ciscoDynamicTemplateMIB.setOrganization('Cisco Systems, Inc.')
ciscoDynamicTemplateMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 0))
ciscoDynamicTemplateMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1))
ciscoDynamicTemplateMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2))
cdtBase = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1))
cdtCommonIf = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2))
cdtPpp = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3))
cdtEthernet = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4))
cdtIpSubscriber = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 5))
cdtService = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6))
cdtSubscriberGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 7))
cdtTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1), )
if mibBuilder.loadTexts: cdtTemplateTable.setStatus('current')
cdtTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateEntry.setStatus('current')
cdtTemplateName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateName.setStatus('current')
cdtTemplateStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStatus.setStatus('current')
cdtTemplateStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateStorage.setStatus('current')
cdtTemplateType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 4), DynamicTemplateType().clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateType.setStatus('current')
cdtTemplateSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("other", 1), ("derived", 2), ("local", 3), ("aaaUserProfile", 4), ("aaaServiceProfile", 5)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateSrc.setStatus('current')
cdtTemplateUsageCount = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 1, 1, 6), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageCount.setStatus('current')
cdtTemplateTargetTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2), )
if mibBuilder.loadTexts: cdtTemplateTargetTable.setStatus('current')
cdtTemplateTargetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"))
if mibBuilder.loadTexts: cdtTemplateTargetEntry.setStatus('current')
cdtTemplateTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 1), DynamicTemplateTargetType())
if mibBuilder.loadTexts: cdtTemplateTargetType.setStatus('current')
cdtTemplateTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 2), DynamicTemplateTargetId())
if mibBuilder.loadTexts: cdtTemplateTargetId.setStatus('current')
cdtTemplateTargetStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStatus.setStatus('current')
cdtTemplateTargetStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 2, 1, 4), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtTemplateTargetStorage.setStatus('current')
cdtTemplateAssociationTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3), )
if mibBuilder.loadTexts: cdtTemplateAssociationTable.setStatus('current')
cdtTemplateAssociationEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetId"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationName"))
if mibBuilder.loadTexts: cdtTemplateAssociationEntry.setStatus('current')
cdtTemplateAssociationName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 1), DynamicTemplateName()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationName.setStatus('current')
cdtTemplateAssociationPrecedence = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 3, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateAssociationPrecedence.setStatus('current')
cdtTemplateUsageTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4), )
if mibBuilder.loadTexts: cdtTemplateUsageTable.setStatus('current')
cdtTemplateUsageEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if mibBuilder.loadTexts: cdtTemplateUsageEntry.setStatus('current')
cdtTemplateUsageTargetType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 1), DynamicTemplateTargetType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetType.setStatus('current')
cdtTemplateUsageTargetId = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 4, 1, 2), DynamicTemplateTargetId()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cdtTemplateUsageTargetId.setStatus('current')
cdtTemplateCommonTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5), )
if mibBuilder.loadTexts: cdtTemplateCommonTable.setStatus('current')
cdtTemplateCommonEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtTemplateCommonEntry.setStatus('current')
cdtCommonValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 1), Bits().clone(namedValues=NamedValues(("descr", 0), ("keepalive", 1), ("vrf", 2), ("addrPool", 3), ("ipv4AccessGroup", 4), ("ipv4Unreachables", 5), ("ipv6AccessGroup", 6), ("ipv6Unreachables", 7), ("srvSubControl", 8), ("srvRedirect", 9), ("srvAcct", 10), ("srvQos", 11), ("srvNetflow", 12)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonValid.setStatus('current')
cdtCommonDescr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonDescr.setStatus('current')
cdtCommonKeepaliveInt = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 3), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveInt.setStatus('current')
cdtCommonKeepaliveRetries = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 4), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(5)).setUnits('retries').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonKeepaliveRetries.setStatus('current')
cdtCommonVrf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 5), CiscoVrfName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonVrf.setStatus('current')
cdtCommonAddrPool = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 6), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonAddrPool.setStatus('current')
cdtCommonIpv4AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 7), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4AccessGroup.setStatus('current')
cdtCommonIpv4Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 8), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv4Unreachables.setStatus('current')
cdtCommonIpv6AccessGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 9), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6AccessGroup.setStatus('current')
cdtCommonIpv6Unreachables = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 10), TruthValue().clone('true')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonIpv6Unreachables.setStatus('current')
cdtCommonSrvSubControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 11), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvSubControl.setStatus('current')
cdtCommonSrvRedirect = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 12), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvRedirect.setStatus('current')
cdtCommonSrvAcct = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 13), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvAcct.setStatus('current')
cdtCommonSrvQos = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 14), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvQos.setStatus('current')
cdtCommonSrvNetflow = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 1, 5, 1, 15), CbpElementName()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtCommonSrvNetflow.setStatus('current')
cdtIfTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1), )
if mibBuilder.loadTexts: cdtIfTemplateTable.setStatus('current')
cdtIfTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtIfTemplateEntry.setStatus('current')
cdtIfValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 1), Bits().clone(namedValues=NamedValues(("mtu", 0), ("cdpEnable", 1), ("flowMonitor", 2), ("ipv4Unnumbered", 3), ("ipv4SubEnable", 4), ("ipv4Mtu", 5), ("ipv4TcpMssAdjust", 6), ("ipv4VerifyUniRpf", 7), ("ipv4VerifyUniRpfAcl", 8), ("ipv4VerifyUniRpfOpts", 9), ("ipv6Enable", 10), ("ipv6SubEnable", 11), ("ipv6TcpMssAdjust", 12), ("ipv6VerifyUniRpf", 13), ("ipv6VerifyUniRpfAcl", 14), ("ipv6VerifyUniRpfOpts", 15), ("ipv6NdPrefix", 16), ("ipv6NdValidLife", 17), ("ipv6NdPreferredLife", 18), ("ipv6NdOpts", 19), ("ipv6NdDadAttempts", 20), ("ipv6NdNsInterval", 21), ("ipv6NdReachableTime", 22), ("ipv6NdRaIntervalMax", 23), ("ipv6NdRaIntervalMin", 24), ("ipv6NdRaLife", 25), ("ipv6NdRaRouterPreference", 26)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfValid.setStatus('current')
cdtIfMtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 2), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(64, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfMtu.setStatus('current')
cdtIfCdpEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfCdpEnable.setStatus('current')
cdtIfFlowMonitor = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfFlowMonitor.setStatus('current')
cdtIfIpv4Unnumbered = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 5), InterfaceIndexOrZero()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Unnumbered.setStatus('current')
cdtIfIpv4SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4SubEnable.setStatus('current')
cdtIfIpv4Mtu = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(128, 65535), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4Mtu.setStatus('current')
cdtIfIpv4TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4TcpMssAdjust.setStatus('current')
cdtIfIpv4VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 9), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpf.setStatus('current')
cdtIfIpv4VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 10), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfAcl.setStatus('current')
cdtIfIpv4VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 11), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv4VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6Enable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 12), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6Enable.setStatus('current')
cdtIfIpv6SubEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 13), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6SubEnable.setStatus('current')
cdtIfIpv6TcpMssAdjust = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 14), Unsigned32().subtype(subtypeSpec=ConstraintsUnion(ValueRangeConstraint(0, 0), ValueRangeConstraint(500, 1460), ))).setUnits('octets').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6TcpMssAdjust.setStatus('current')
cdtIfIpv6VerifyUniRpf = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 15), UnicastRpfType().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpf.setStatus('current')
cdtIfIpv6VerifyUniRpfAcl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 16), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfAcl.setStatus('current')
cdtIfIpv6VerifyUniRpfOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 17), UnicastRpfOptions()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6VerifyUniRpfOpts.setStatus('current')
cdtIfIpv6NdPrefix = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 18), InetAddressIPv6().clone(hexValue="00000000000000000000000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefix.setStatus('current')
cdtIfIpv6NdPrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 19), InetAddressPrefixLength()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPrefixLength.setStatus('current')
cdtIfIpv6NdValidLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 20), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2592000)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdValidLife.setStatus('current')
cdtIfIpv6NdPreferredLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 21), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(604800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdPreferredLife.setStatus('current')
cdtIfIpv6NdOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 22), Bits().clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3), ("advertisementInterval", 4), ("managedConfigFlag", 5), ("otherConfigFlag", 6), ("framedIpv6Prefix", 7), ("raSuppress", 8))).clone(namedValues=NamedValues(("advertise", 0), ("onlink", 1), ("router", 2), ("autoConfig", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdOpts.setStatus('current')
cdtIfIpv6NdDadAttempts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 23), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 600)).clone(1)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdDadAttempts.setStatus('current')
cdtIfIpv6NdNsInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 24), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1000, 3600000)).clone(1000)).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdNsInterval.setStatus('current')
cdtIfIpv6NdReachableTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 25), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setUnits('milliseconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdReachableTime.setStatus('current')
cdtIfIpv6NdRaIntervalUnits = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 26), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("seconds", 1), ("milliseconds", 2))).clone('seconds')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalUnits.setStatus('current')
cdtIfIpv6NdRaIntervalMax = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 27), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMax.setStatus('current')
cdtIfIpv6NdRaIntervalMin = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 28), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaIntervalMin.setStatus('current')
cdtIfIpv6NdRaLife = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 29), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295)).clone(1800)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRaLife.setStatus('current')
cdtIfIpv6NdRouterPreference = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 2, 1, 1, 30), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("high", 1), ("medium", 2), ("low", 3))).clone('medium')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtIfIpv6NdRouterPreference.setStatus('current')
cdtPppTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1), )
if mibBuilder.loadTexts: cdtPppTemplateTable.setStatus('current')
cdtPppTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtPppTemplateEntry.setStatus('current')
cdtPppValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 1), Bits().clone(namedValues=NamedValues(("valid", 0), ("accounting", 1), ("authentication", 2), ("autthenticationMethods", 3), ("authorization", 4), ("loopbackIgnore", 5), ("maxBadAuth", 6), ("maxConfigure", 7), ("maxFailure", 8), ("maxTerminate", 9), ("timeoutAuthentication", 10), ("timeoutRetry", 11), ("chapOpts", 12), ("chapHostname", 13), ("chapPassword", 14), ("msChapV1Opts", 15), ("msChapV1Hostname", 16), ("msChapV1Password", 17), ("msChapV2Opts", 18), ("msChapV2Hostname", 19), ("msChapV2Password", 20), ("papOpts", 21), ("papUsername", 22), ("papPassword", 23), ("eapOpts", 24), ("eapIdentity", 25), ("eapPassword", 26), ("ipcpAddrOption", 27), ("ipcpDnsOption", 28), ("ipcpDnsPrimary", 29), ("ipcpDnsSecondary", 30), ("ipcpWinsOption", 31), ("ipcpWinsPrimary", 32), ("ipcpWinsSecondary", 33), ("ipcpMaskOption", 34), ("ipcpMask", 35), ("peerDefIpAddrOpts", 36), ("peerDefIpAddrSrc", 37), ("peerDefIpAddr", 38)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppValid.setStatus('current')
cdtPppAccounting = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 2), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAccounting.setStatus('current')
cdtPppAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 3), Bits().clone(namedValues=NamedValues(("chap", 0), ("msChap", 1), ("msChapV2", 2), ("pap", 3), ("eap", 4), ("optional", 5), ("callin", 6), ("oneTime", 7)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthentication.setStatus('current')
cdtPppAuthenticationMethods = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthenticationMethods.setStatus('current')
cdtPppAuthorization = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 5), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppAuthorization.setStatus('current')
cdtPppLoopbackIgnore = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppLoopbackIgnore.setStatus('current')
cdtPppMaxBadAuth = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 7), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(0, 4294967295))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxBadAuth.setStatus('current')
cdtPppMaxConfigure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 8), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(10)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxConfigure.setStatus('current')
cdtPppMaxFailure = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 9), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(5)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxFailure.setStatus('current')
cdtPppMaxTerminate = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 10), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)).clone(2)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMaxTerminate.setStatus('current')
cdtPppTimeoutAuthentication = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 11), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(10)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutAuthentication.setStatus('current')
cdtPppTimeoutRetry = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 12), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 255)).clone(3)).setUnits('seconds').setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppTimeoutRetry.setStatus('current')
cdtPppChapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 13), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapOpts.setStatus('current')
cdtPppChapHostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 14), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapHostname.setStatus('current')
cdtPppChapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 15), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppChapPassword.setStatus('current')
cdtPppMsChapV1Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 16), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Opts.setStatus('current')
cdtPppMsChapV1Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 17), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Hostname.setStatus('current')
cdtPppMsChapV1Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 18), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV1Password.setStatus('current')
cdtPppMsChapV2Opts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 19), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("encrypted", 3))).clone(namedValues=NamedValues(("wait", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Opts.setStatus('current')
cdtPppMsChapV2Hostname = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 20), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Hostname.setStatus('current')
cdtPppMsChapV2Password = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 21), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppMsChapV2Password.setStatus('current')
cdtPppPapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 22), Bits().clone(namedValues=NamedValues(("refuse", 0), ("encrypted", 1)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapOpts.setStatus('current')
cdtPppPapUsername = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 23), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapUsername.setStatus('current')
cdtPppPapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 24), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPapPassword.setStatus('current')
cdtPppEapOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 25), Bits().clone(namedValues=NamedValues(("refuse", 0), ("callin", 1), ("wait", 2), ("local", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapOpts.setStatus('current')
cdtPppEapIdentity = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 26), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapIdentity.setStatus('current')
cdtPppEapPassword = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 27), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppEapPassword.setStatus('current')
cdtPppIpcpAddrOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 28), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("required", 3), ("unique", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpAddrOption.setStatus('current')
cdtPppIpcpDnsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 29), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsOption.setStatus('current')
cdtPppIpcpDnsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 30), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsPrimary.setStatus('current')
cdtPppIpcpDnsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 31), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpDnsSecondary.setStatus('current')
cdtPppIpcpWinsOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 32), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("accept", 2), ("request", 3), ("reject", 4))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsOption.setStatus('current')
cdtPppIpcpWinsPrimary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 33), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsPrimary.setStatus('current')
cdtPppIpcpWinsSecondary = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 34), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpWinsSecondary.setStatus('current')
cdtPppIpcpMaskOption = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 35), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("other", 1), ("request", 2), ("reject", 3))).clone('other')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMaskOption.setStatus('current')
cdtPppIpcpMask = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 36), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppIpcpMask.setStatus('current')
cdtPppPeerDefIpAddrOpts = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 37), Bits().clone(namedValues=NamedValues(("ipAddrForced", 0), ("matchAaaPools", 1), ("staticPool", 2))).clone(namedValues=NamedValues(("ipAddrForced", 0)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrOpts.setStatus('current')
cdtPppPeerDefIpAddrSrc = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 38), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("static", 1), ("pool", 2), ("dhcp", 3))).clone('pool')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddrSrc.setStatus('current')
cdtPppPeerDefIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 1, 1, 39), InetAddressIPv4().clone(hexValue="00000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerDefIpAddr.setStatus('current')
cdtPppPeerIpAddrPoolTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2), )
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolTable.setStatus('current')
cdtPppPeerIpAddrPoolEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"), (0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolPriority"))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolEntry.setStatus('current')
cdtPppPeerIpAddrPoolPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 1), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 4294967295)))
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolPriority.setStatus('current')
cdtPppPeerIpAddrPoolStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStatus.setStatus('current')
cdtPppPeerIpAddrPoolStorage = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 3), StorageType().clone('volatile')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolStorage.setStatus('current')
cdtPppPeerIpAddrPoolName = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 3, 2, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtPppPeerIpAddrPoolName.setStatus('current')
cdtEthernetTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1), )
if mibBuilder.loadTexts: cdtEthernetTemplateTable.setStatus('current')
cdtEthernetTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtEthernetTemplateEntry.setStatus('current')
cdtEthernetValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 1), Bits().clone(namedValues=NamedValues(("bridgeDomain", 0), ("pppoeEnable", 1), ("ipv4PointToPoint", 2), ("macAddr", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetValid.setStatus('current')
cdtEthernetBridgeDomain = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 2), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetBridgeDomain.setStatus('current')
cdtEthernetPppoeEnable = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 3), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetPppoeEnable.setStatus('current')
cdtEthernetIpv4PointToPoint = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 4), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetIpv4PointToPoint.setStatus('current')
cdtEthernetMacAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 4, 1, 1, 5), MacAddress().clone(hexValue="000000000000")).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtEthernetMacAddr.setStatus('current')
cdtSrvTemplateTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1), )
if mibBuilder.loadTexts: cdtSrvTemplateTable.setStatus('current')
cdtSrvTemplateEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1), ).setIndexNames((0, "CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateName"))
if mibBuilder.loadTexts: cdtSrvTemplateEntry.setStatus('current')
cdtSrvValid = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 1), Bits().clone(namedValues=NamedValues(("networkSrv", 0), ("vpdnGroup", 1), ("sgSrvGroup", 2), ("sgSrvType", 3), ("multicast", 4)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvValid.setStatus('current')
cdtSrvNetworkSrv = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("other", 1), ("none", 2), ("local", 3), ("vpdn", 4))).clone('none')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvNetworkSrv.setStatus('current')
cdtSrvVpdnGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 3), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvVpdnGroup.setStatus('current')
cdtSrvSgSrvGroup = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 4), SnmpAdminString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvGroup.setStatus('current')
cdtSrvSgSrvType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("primary", 1), ("secondary", 2))).clone('secondary')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvSgSrvType.setStatus('current')
cdtSrvMulticast = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 784, 1, 6, 1, 1, 6), TruthValue().clone('false')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cdtSrvMulticast.setStatus('current')
ciscoDynamicTemplateMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1))
ciscoDynamicTemplateMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2))
ciscoDynamicTemplateR1Compliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 1, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtBaseGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoDynamicTemplateR1Compliance = ciscoDynamicTemplateR1Compliance.setStatus('current')
cdtBaseGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 1)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageCount"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateTargetStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateAssociationPrecedence"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtTemplateUsageTargetId"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtBaseGroup = cdtBaseGroup.setStatus('current')
cdtCommonGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 2)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonDescr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveInt"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonKeepaliveRetries"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonVrf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonAddrPool"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv4Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6AccessGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonIpv6Unreachables"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvSubControl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvRedirect"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvAcct"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvQos"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtCommonSrvNetflow"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtCommonGroup = cdtCommonGroup.setStatus('current')
cdtIfGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 3)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfMtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfCdpEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfFlowMonitor"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Unnumbered"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4Mtu"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv4VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6Enable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6SubEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6TcpMssAdjust"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpf"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfAcl"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6VerifyUniRpfOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefix"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPrefixLength"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdValidLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdPreferredLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdDadAttempts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdNsInterval"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdReachableTime"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalUnits"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMax"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaIntervalMin"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRaLife"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtIfIpv6NdRouterPreference"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtIfGroup = cdtIfGroup.setStatus('current')
cdtPppGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 4)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAccounting"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthenticationMethods"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppAuthorization"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppLoopbackIgnore"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxBadAuth"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxConfigure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxFailure"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMaxTerminate"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutAuthentication"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppTimeoutRetry"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapHostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppChapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV1Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Opts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Hostname"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppMsChapV2Password"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapUsername"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapIdentity"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppEapPassword"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpAddrOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpDnsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsPrimary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpWinsSecondary"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMaskOption"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppIpcpMask"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrOpts"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddrSrc"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerDefIpAddr"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStatus"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolStorage"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtPppPeerIpAddrPoolName"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtPppGroup = cdtPppGroup.setStatus('current')
cdtEthernetGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 5)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetBridgeDomain"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetPppoeEnable"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetIpv4PointToPoint"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtEthernetMacAddr"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtEthernetGroup = cdtEthernetGroup.setStatus('current')
cdtSrvGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 784, 2, 2, 6)).setObjects(("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvValid"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvNetworkSrv"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvVpdnGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvGroup"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvSgSrvType"), ("CISCO-DYNAMIC-TEMPLATE-MIB", "cdtSrvMulticast"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cdtSrvGroup = cdtSrvGroup.setStatus('current')
mibBuilder.exportSymbols("CISCO-DYNAMIC-TEMPLATE-MIB", cdtTemplateUsageEntry=cdtTemplateUsageEntry, cdtPppMaxTerminate=cdtPppMaxTerminate, cdtTemplateTargetId=cdtTemplateTargetId, cdtPppMsChapV1Hostname=cdtPppMsChapV1Hostname, cdtService=cdtService, cdtPppGroup=cdtPppGroup, cdtIfIpv6NdRouterPreference=cdtIfIpv6NdRouterPreference, cdtSrvSgSrvGroup=cdtSrvSgSrvGroup, cdtTemplateTargetEntry=cdtTemplateTargetEntry, cdtTemplateUsageTargetId=cdtTemplateUsageTargetId, ciscoDynamicTemplateMIBObjects=ciscoDynamicTemplateMIBObjects, cdtPppMaxBadAuth=cdtPppMaxBadAuth, cdtPppIpcpDnsPrimary=cdtPppIpcpDnsPrimary, cdtIfIpv6NdValidLife=cdtIfIpv6NdValidLife, cdtPppPeerIpAddrPoolStorage=cdtPppPeerIpAddrPoolStorage, cdtBaseGroup=cdtBaseGroup, cdtSrvTemplateTable=cdtSrvTemplateTable, cdtTemplateStatus=cdtTemplateStatus, cdtIfIpv4VerifyUniRpfAcl=cdtIfIpv4VerifyUniRpfAcl, cdtPppEapOpts=cdtPppEapOpts, cdtEthernetTemplateEntry=cdtEthernetTemplateEntry, cdtEthernetMacAddr=cdtEthernetMacAddr, cdtCommonKeepaliveInt=cdtCommonKeepaliveInt, cdtPppChapHostname=cdtPppChapHostname, cdtPppMsChapV2Password=cdtPppMsChapV2Password, cdtPppEapPassword=cdtPppEapPassword, cdtCommonIpv4Unreachables=cdtCommonIpv4Unreachables, cdtCommonIf=cdtCommonIf, cdtTemplateStorage=cdtTemplateStorage, cdtCommonKeepaliveRetries=cdtCommonKeepaliveRetries, cdtIfIpv6NdRaLife=cdtIfIpv6NdRaLife, cdtCommonSrvRedirect=cdtCommonSrvRedirect, cdtEthernetTemplateTable=cdtEthernetTemplateTable, cdtTemplateSrc=cdtTemplateSrc, cdtPppIpcpDnsSecondary=cdtPppIpcpDnsSecondary, cdtPppPeerDefIpAddr=cdtPppPeerDefIpAddr, cdtIfIpv6VerifyUniRpfAcl=cdtIfIpv6VerifyUniRpfAcl, cdtIfIpv6NdOpts=cdtIfIpv6NdOpts, cdtSrvMulticast=cdtSrvMulticast, cdtPppPeerIpAddrPoolTable=cdtPppPeerIpAddrPoolTable, ciscoDynamicTemplateR1Compliance=ciscoDynamicTemplateR1Compliance, cdtPppIpcpDnsOption=cdtPppIpcpDnsOption, cdtPppLoopbackIgnore=cdtPppLoopbackIgnore, cdtPppTimeoutAuthentication=cdtPppTimeoutAuthentication, cdtTemplateCommonTable=cdtTemplateCommonTable, cdtIfIpv6Enable=cdtIfIpv6Enable, PYSNMP_MODULE_ID=ciscoDynamicTemplateMIB, cdtTemplateEntry=cdtTemplateEntry, cdtPppMsChapV2Opts=cdtPppMsChapV2Opts, cdtIfIpv6TcpMssAdjust=cdtIfIpv6TcpMssAdjust, cdtEthernetIpv4PointToPoint=cdtEthernetIpv4PointToPoint, cdtCommonSrvNetflow=cdtCommonSrvNetflow, cdtTemplateAssociationPrecedence=cdtTemplateAssociationPrecedence, cdtIfIpv6NdDadAttempts=cdtIfIpv6NdDadAttempts, cdtIfTemplateEntry=cdtIfTemplateEntry, cdtIfIpv6VerifyUniRpf=cdtIfIpv6VerifyUniRpf, cdtIpSubscriber=cdtIpSubscriber, ciscoDynamicTemplateMIBGroups=ciscoDynamicTemplateMIBGroups, cdtPppPeerIpAddrPoolName=cdtPppPeerIpAddrPoolName, cdtIfFlowMonitor=cdtIfFlowMonitor, cdtTemplateUsageCount=cdtTemplateUsageCount, cdtPppEapIdentity=cdtPppEapIdentity, ciscoDynamicTemplateMIBNotifs=ciscoDynamicTemplateMIBNotifs, cdtCommonIpv4AccessGroup=cdtCommonIpv4AccessGroup, cdtSrvTemplateEntry=cdtSrvTemplateEntry, cdtPppTimeoutRetry=cdtPppTimeoutRetry, cdtCommonSrvAcct=cdtCommonSrvAcct, cdtIfIpv6NdPrefixLength=cdtIfIpv6NdPrefixLength, cdtPppTemplateTable=cdtPppTemplateTable, cdtPppAuthorization=cdtPppAuthorization, cdtPppIpcpAddrOption=cdtPppIpcpAddrOption, cdtPppMaxFailure=cdtPppMaxFailure, cdtPppValid=cdtPppValid, cdtTemplateTargetStorage=cdtTemplateTargetStorage, ciscoDynamicTemplateMIBConform=ciscoDynamicTemplateMIBConform, cdtTemplateAssociationTable=cdtTemplateAssociationTable, cdtIfIpv6NdReachableTime=cdtIfIpv6NdReachableTime, cdtIfGroup=cdtIfGroup, cdtSrvValid=cdtSrvValid, cdtPpp=cdtPpp, cdtPppTemplateEntry=cdtPppTemplateEntry, cdtSrvGroup=cdtSrvGroup, cdtIfIpv4Mtu=cdtIfIpv4Mtu, cdtCommonDescr=cdtCommonDescr, cdtTemplateUsageTable=cdtTemplateUsageTable, cdtIfIpv4TcpMssAdjust=cdtIfIpv4TcpMssAdjust, cdtIfIpv6VerifyUniRpfOpts=cdtIfIpv6VerifyUniRpfOpts, cdtSrvNetworkSrv=cdtSrvNetworkSrv, cdtPppAuthenticationMethods=cdtPppAuthenticationMethods, cdtPppChapOpts=cdtPppChapOpts, cdtCommonValid=cdtCommonValid, cdtCommonSrvQos=cdtCommonSrvQos, cdtIfIpv6NdRaIntervalMin=cdtIfIpv6NdRaIntervalMin, cdtEthernetGroup=cdtEthernetGroup, cdtTemplateTargetType=cdtTemplateTargetType, cdtTemplateName=cdtTemplateName, cdtCommonIpv6AccessGroup=cdtCommonIpv6AccessGroup, cdtPppMaxConfigure=cdtPppMaxConfigure, cdtIfIpv4VerifyUniRpf=cdtIfIpv4VerifyUniRpf, cdtPppIpcpMask=cdtPppIpcpMask, cdtIfIpv6SubEnable=cdtIfIpv6SubEnable, cdtIfIpv6NdPrefix=cdtIfIpv6NdPrefix, cdtIfValid=cdtIfValid, ciscoDynamicTemplateMIB=ciscoDynamicTemplateMIB, cdtEthernetBridgeDomain=cdtEthernetBridgeDomain, cdtPppIpcpWinsSecondary=cdtPppIpcpWinsSecondary, cdtCommonAddrPool=cdtCommonAddrPool, cdtPppMsChapV2Hostname=cdtPppMsChapV2Hostname, cdtIfIpv4SubEnable=cdtIfIpv4SubEnable, cdtPppMsChapV1Password=cdtPppMsChapV1Password, cdtTemplateAssociationName=cdtTemplateAssociationName, ciscoDynamicTemplateMIBCompliances=ciscoDynamicTemplateMIBCompliances, cdtTemplateCommonEntry=cdtTemplateCommonEntry, cdtPppPapOpts=cdtPppPapOpts, cdtPppMsChapV1Opts=cdtPppMsChapV1Opts, cdtTemplateType=cdtTemplateType, cdtIfTemplateTable=cdtIfTemplateTable, cdtPppIpcpMaskOption=cdtPppIpcpMaskOption, cdtSrvSgSrvType=cdtSrvSgSrvType, cdtPppPapUsername=cdtPppPapUsername, cdtBase=cdtBase, cdtIfIpv6NdRaIntervalUnits=cdtIfIpv6NdRaIntervalUnits, cdtTemplateTargetTable=cdtTemplateTargetTable, cdtTemplateTargetStatus=cdtTemplateTargetStatus, cdtPppPapPassword=cdtPppPapPassword, cdtPppAccounting=cdtPppAccounting, cdtIfIpv4Unnumbered=cdtIfIpv4Unnumbered, cdtCommonIpv6Unreachables=cdtCommonIpv6Unreachables, cdtPppChapPassword=cdtPppChapPassword, cdtSrvVpdnGroup=cdtSrvVpdnGroup, cdtSubscriberGroup=cdtSubscriberGroup, cdtTemplateAssociationEntry=cdtTemplateAssociationEntry, cdtPppPeerDefIpAddrOpts=cdtPppPeerDefIpAddrOpts, cdtEthernetValid=cdtEthernetValid, cdtIfCdpEnable=cdtIfCdpEnable, cdtIfIpv6NdRaIntervalMax=cdtIfIpv6NdRaIntervalMax, cdtPppIpcpWinsOption=cdtPppIpcpWinsOption, cdtPppPeerIpAddrPoolEntry=cdtPppPeerIpAddrPoolEntry, cdtEthernet=cdtEthernet, cdtPppPeerIpAddrPoolStatus=cdtPppPeerIpAddrPoolStatus, cdtCommonSrvSubControl=cdtCommonSrvSubControl, cdtIfMtu=cdtIfMtu, cdtPppPeerIpAddrPoolPriority=cdtPppPeerIpAddrPoolPriority, cdtPppAuthentication=cdtPppAuthentication, cdtCommonGroup=cdtCommonGroup, cdtTemplateUsageTargetType=cdtTemplateUsageTargetType, cdtEthernetPppoeEnable=cdtEthernetPppoeEnable, cdtIfIpv6NdPreferredLife=cdtIfIpv6NdPreferredLife, cdtPppPeerDefIpAddrSrc=cdtPppPeerDefIpAddrSrc, cdtCommonVrf=cdtCommonVrf, cdtTemplateTable=cdtTemplateTable, cdtIfIpv6NdNsInterval=cdtIfIpv6NdNsInterval, cdtIfIpv4VerifyUniRpfOpts=cdtIfIpv4VerifyUniRpfOpts, cdtPppIpcpWinsPrimary=cdtPppIpcpWinsPrimary)
| true
| true
|
f70754ec00e8abe2a03b841fcbe112a86736ab17
| 16,591
|
py
|
Python
|
examples/demo_optim_data_preproc..py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
examples/demo_optim_data_preproc..py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
examples/demo_optim_data_preproc..py
|
andrerubeis/AIF360
|
c0ce6f2e3eff9cab0ccce0bc0a05b681a5df7e44
|
[
"Apache-2.0"
] | null | null | null |
# %% md
#### This notebook demonstrates the use of an optimized data pre-processing algorithm for bias mitigation
# - The
# debiasing
# function
# used is implemented in the
# `OptimPreproc`
#
# class .
# - Define
# parameters
# for optimized pre - processing specific to the dataset.
#
#
# - Divide
# the
# dataset
# into
# training, validation, and testing
# partitions.
# - Learn
# the
# optimized
# pre - processing
# transformation
# from the training
#
# data.
# - Train
# classifier
# on
# original
# training
# data.
# - Estimate
# the
# optimal
# classification
# threshold, that
# maximizes
# balanced
# accuracy
# without
# fairness
# constraints(
# from the original
#
# validation
# set).
# - Determine
# the
# prediction
# scores
# for original testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.
# - Transform
# the
# testing
# set
# using
# the
# learned
# probabilistic
# transformation.
# - Determine
# the
# prediction
# scores
# for transformed testing data.Using the estimated optimal classification threshold, compute accuracy and fairness metrics.
#
# %%
# Load all necessary packages
import sys
sys.path.append("../")
import numpy as np
from tqdm import tqdm
from aif360.datasets import BinaryLabelDataset
from aif360.datasets import AdultDataset, GermanDataset, CompasDataset
from aif360.metrics import BinaryLabelDatasetMetric
from aif360.metrics import ClassificationMetric
from aif360.metrics.utils import compute_boolean_conditioning_vector
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \
import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions \
import get_distortion_adult, get_distortion_german, get_distortion_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from common_utils import compute_metrics
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
# %% md
#### Load dataset and specify options
# %%
# import dataset
dataset_used = "adult" # "adult", "german", "compas"
protected_attribute_used = 1 # 1, 2
if dataset_used == "adult":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_adult(['race'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "german":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_german(['sex'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
else:
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
dataset_orig = load_preproc_data_german(['age'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.1,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "compas":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_compas(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_compas(['race'])
optim_options = {
"distortion_fun": get_distortion_compas,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
# random seed
np.random.seed(1)
# Split into train, validation, and test
dataset_orig_train, dataset_orig_vt = dataset_orig.split([0.7], shuffle=True)
dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5], shuffle=True)
# %% md
#### Display dataset attributes
# %%
# print out some labels, names, etc.
display(Markdown("#### Training Dataset shape"))
print(dataset_orig_train.features.shape)
display(Markdown("#### Favorable and unfavorable labels"))
print(dataset_orig_train.favorable_label, dataset_orig_train.unfavorable_label)
display(Markdown("#### Protected attribute names"))
print(dataset_orig_train.protected_attribute_names)
display(Markdown("#### Privileged and unprivileged protected attribute values"))
print(dataset_orig_train.privileged_protected_attributes,
dataset_orig_train.unprivileged_protected_attributes)
display(Markdown("#### Dataset feature names"))
print(dataset_orig_train.feature_names)
# %% md
#### Metric for original training data
# %%
# Metric for the original dataset
metric_orig_train = BinaryLabelDatasetMetric(dataset_orig_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
# %% md
#### Train with and transform the original training data
# %%
OP = OptimPreproc(OptTools, optim_options,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
OP = OP.fit(dataset_orig_train)
# Transform training data and align features
dataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True)
dataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)
# %% md
#### Metric with the transformed training data
# %%
metric_transf_train = BinaryLabelDatasetMetric(dataset_transf_train,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
# %% md
# Optimized
# preprocessing
# has
# reduced
# the
# disparity in favorable
# outcomes
# between
# the
# privileged and unprivileged
# groups(training
# data).
# %%
### Testing
assert np.abs(metric_transf_train.mean_difference()) < np.abs(metric_orig_train.mean_difference())
# %% md
#### Load, clean up original test data and compute metric
# %%
dataset_orig_test = dataset_transf_train.align_datasets(dataset_orig_test)
display(Markdown("#### Testing Dataset shape"))
print(dataset_orig_test.features.shape)
metric_orig_test = BinaryLabelDatasetMetric(dataset_orig_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_test.mean_difference())
# %% md
#### Transform test data and compute metric
# %%
dataset_transf_test = OP.transform(dataset_orig_test, transform_Y=True)
dataset_transf_test = dataset_orig_test.align_datasets(dataset_transf_test)
metric_transf_test = BinaryLabelDatasetMetric(dataset_transf_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_test.mean_difference())
# %% md
# Optimized
# preprocessing
# has
# reduced
# the
# disparity in favorable
# outcomes
# between
# the
# privileged and unprivileged
# groups(test
# data).
# %%
### Testing
assert np.abs(metric_transf_test.mean_difference()) < np.abs(metric_orig_test.mean_difference())
# %% md
### Train classifier on original data
# %%
# Logistic regression classifier and predictions
scale_orig = StandardScaler()
X_train = scale_orig.fit_transform(dataset_orig_train.features)
y_train = dataset_orig_train.labels.ravel()
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
# positive class index
pos_ind = np.where(lmod.classes_ == dataset_orig_train.favorable_label)[0][0]
dataset_orig_train_pred = dataset_orig_train.copy()
dataset_orig_train_pred.labels = y_train_pred
# %% md
#### Obtain scores original test set
# %%
dataset_orig_valid_pred = dataset_orig_valid.copy(deepcopy=True)
X_valid = scale_orig.transform(dataset_orig_valid_pred.features)
y_valid = dataset_orig_valid_pred.labels
dataset_orig_valid_pred.scores = lmod.predict_proba(X_valid)[:, pos_ind].reshape(-1, 1)
dataset_orig_test_pred = dataset_orig_test.copy(deepcopy=True)
X_test = scale_orig.transform(dataset_orig_test_pred.features)
y_test = dataset_orig_test_pred.labels
dataset_orig_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
# %% md
### Find the optimal classification threshold from the validation set
# %%
num_thresh = 100
ba_arr = np.zeros(num_thresh)
class_thresh_arr = np.linspace(0.01, 0.99, num_thresh)
for idx, class_thresh in enumerate(class_thresh_arr):
fav_inds = dataset_orig_valid_pred.scores > class_thresh
dataset_orig_valid_pred.labels[fav_inds] = dataset_orig_valid_pred.favorable_label
dataset_orig_valid_pred.labels[~fav_inds] = dataset_orig_valid_pred.unfavorable_label
classified_metric_orig_valid = ClassificationMetric(dataset_orig_valid,
dataset_orig_valid_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
ba_arr[idx] = 0.5 * (classified_metric_orig_valid.true_positive_rate() \
+ classified_metric_orig_valid.true_negative_rate())
best_ind = np.where(ba_arr == np.max(ba_arr))[0][0]
best_class_thresh = class_thresh_arr[best_ind]
print("Best balanced accuracy (no fairness constraints) = %.4f" % np.max(ba_arr))
print("Optimal classification threshold (no fairness constraints) = %.4f" % best_class_thresh)
# %% md
### Predictions and fairness metrics from original test set
# %%
display(Markdown("#### Predictions from original testing data"))
bal_acc_arr_orig = []
disp_imp_arr_orig = []
avg_odds_diff_arr_orig = []
display(Markdown("#### Testing set"))
display(Markdown("##### Raw predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_orig_test_pred.scores > thresh
dataset_orig_test_pred.labels[fav_inds] = dataset_orig_test_pred.favorable_label
dataset_orig_test_pred.labels[~fav_inds] = dataset_orig_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_orig_test, dataset_orig_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_orig.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_orig.append(metric_test_bef["Average odds difference"])
disp_imp_arr_orig.append(metric_test_bef["Disparate impact"])
# %%
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_orig)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_orig)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_orig = np.abs(1.0 - np.array(disp_imp_arr_orig))[best_ind]
# %% md
# ```abs(1 - disparate
# impact)``` must
# be
# close
# to
# zero
# for classifier predictions to be fair.
# %% md
### Train classifier on transformed data and obtain predictions with its fairness metrics
# %%
scale_transf = StandardScaler()
X_train = scale_transf.fit_transform(dataset_transf_train.features)
y_train = dataset_transf_train.labels.ravel()
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
dataset_transf_train_pred = dataset_transf_train.copy()
dataset_transf_train_pred.labels = y_train_pred
# %% md
### Predictions and fairness metrics from transformed test set
# %%
dataset_transf_test_pred = dataset_transf_test.copy(deepcopy=True)
X_test = scale_transf.transform(dataset_transf_test_pred.features)
y_test = dataset_transf_test_pred.labels
dataset_transf_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
# %%
display(Markdown("#### Predictions from transformed testing data"))
bal_acc_arr_transf = []
disp_imp_arr_transf = []
avg_odds_diff_arr_transf = []
display(Markdown("#### Testing set"))
display(Markdown("##### Transformed predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_transf_test_pred.scores > thresh
dataset_transf_test_pred.labels[fav_inds] = dataset_transf_test_pred.favorable_label
dataset_transf_test_pred.labels[~fav_inds] = dataset_transf_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_transf_test, dataset_transf_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_transf.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_transf.append(metric_test_bef["Average odds difference"])
disp_imp_arr_transf.append(metric_test_bef["Disparate impact"])
# %%
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_transf)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_transf)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_transf = np.abs(1.0 - np.array(disp_imp_arr_transf))[best_ind]
# %% md
# ```abs(1 - disparate
# impact)``` must
# be
# close
# to
# zero
# for classifier predictions to be fair.This measure has improved using classifier trained using the transformed data compared to the original data.
# %%
### testing
assert disp_imp_at_best_bal_acc_transf < disp_imp_at_best_bal_acc_orig
# %% md
# Summary of Results
# We
# show
# the
# optimal
# classification
# thresholds, and the
# fairness and accuracy
# metrics.
# %% md
### Classification Thresholds
# | Dataset | Classification
# threshold |
# | - | - |
# | Adult | 0.2674 |
# | German | 0.6732 |
# | Compas | 0.5148 |
# %% md
### Fairness Metric: Disparate impact, Accuracy Metric: Balanced accuracy
#### Performance
# | Dataset | Sex(Acc - Bef) | Sex(Acc - Aft) | Sex(Fair - Bef) | Sex(Fair - Aft) | Race / Age(Acc - Bef) | Race / Age(
# Acc - Aft) | Race / Age(Fair - Bef) | Race / Age(Fair - Aft) |
# | - | - | - | - | - | - | - | - | - |
# | Adult(Test) | 0.7417 | 0.7021 | 0.2774 | 0.7729 | 0.7417 | 0.7408 | 0.4423 | 0.7645 |
# | German(Test) | 0.6524 | 0.5698 | 0.9948 | 1.0664 | 0.6524 | 0.6067 | 0.3824 | 0.8228 |
# | Compas(Test) | 0.6774 | 0.6606 | 0.6631 | 0.8085 | 0.6774 | 0.6790 | 0.6600 | 0.8430 |
# %%
| 29.056042
| 148
| 0.710807
|
atasetMetric
from aif360.metrics import ClassificationMetric
from aif360.metrics.utils import compute_boolean_conditioning_vector
from aif360.algorithms.preprocessing.optim_preproc import OptimPreproc
from aif360.algorithms.preprocessing.optim_preproc_helpers.data_preproc_functions \
import load_preproc_data_adult, load_preproc_data_german, load_preproc_data_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.distortion_functions \
import get_distortion_adult, get_distortion_german, get_distortion_compas
from aif360.algorithms.preprocessing.optim_preproc_helpers.opt_tools import OptTools
from common_utils import compute_metrics
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score
from IPython.display import Markdown, display
import matplotlib.pyplot as plt
d_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_adult(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_adult(['race'])
optim_options = {
"distortion_fun": get_distortion_adult,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "german":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_german(['sex'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
else:
privileged_groups = [{'age': 1}]
unprivileged_groups = [{'age': 0}]
dataset_orig = load_preproc_data_german(['age'])
optim_options = {
"distortion_fun": get_distortion_german,
"epsilon": 0.1,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
elif dataset_used == "compas":
if protected_attribute_used == 1:
privileged_groups = [{'sex': 1}]
unprivileged_groups = [{'sex': 0}]
dataset_orig = load_preproc_data_compas(['sex'])
else:
privileged_groups = [{'race': 1}]
unprivileged_groups = [{'race': 0}]
dataset_orig = load_preproc_data_compas(['race'])
optim_options = {
"distortion_fun": get_distortion_compas,
"epsilon": 0.05,
"clist": [0.99, 1.99, 2.99],
"dlist": [.1, 0.05, 0]
}
np.random.seed(1)
dataset_orig_train, dataset_orig_vt = dataset_orig.split([0.7], shuffle=True)
dataset_orig_valid, dataset_orig_test = dataset_orig_vt.split([0.5], shuffle=True)
.shape)
display(Markdown("#### Favorable and unfavorable labels"))
print(dataset_orig_train.favorable_label, dataset_orig_train.unfavorable_label)
display(Markdown("#### Protected attribute names"))
print(dataset_orig_train.protected_attribute_names)
display(Markdown("#### Privileged and unprivileged protected attribute values"))
print(dataset_orig_train.privileged_protected_attributes,
dataset_orig_train.unprivileged_protected_attributes)
display(Markdown("#### Dataset feature names"))
print(dataset_orig_train.feature_names)
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_train.mean_difference())
OP = OP.fit(dataset_orig_train)
dataset_transf_train = OP.transform(dataset_orig_train, transform_Y=True)
dataset_transf_train = dataset_orig_train.align_datasets(dataset_transf_train)
_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed training dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_train.mean_difference())
ic_transf_train.mean_difference()) < np.abs(metric_orig_train.mean_difference())
)
metric_orig_test = BinaryLabelDatasetMetric(dataset_orig_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Original test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_orig_test.mean_difference())
atasets(dataset_transf_test)
metric_transf_test = BinaryLabelDatasetMetric(dataset_transf_test,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
display(Markdown("#### Transformed test dataset"))
print(
"Difference in mean outcomes between unprivileged and privileged groups = %f" % metric_transf_test.mean_difference())
ic_transf_test.mean_difference()) < np.abs(metric_orig_test.mean_difference())
ataset_orig_train.features)
y_train = dataset_orig_train.labels.ravel()
lmod = LogisticRegression()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
pos_ind = np.where(lmod.classes_ == dataset_orig_train.favorable_label)[0][0]
dataset_orig_train_pred = dataset_orig_train.copy()
dataset_orig_train_pred.labels = y_train_pred
taset_orig_valid_pred.features)
y_valid = dataset_orig_valid_pred.labels
dataset_orig_valid_pred.scores = lmod.predict_proba(X_valid)[:, pos_ind].reshape(-1, 1)
dataset_orig_test_pred = dataset_orig_test.copy(deepcopy=True)
X_test = scale_orig.transform(dataset_orig_test_pred.features)
y_test = dataset_orig_test_pred.labels
dataset_orig_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
erate(class_thresh_arr):
fav_inds = dataset_orig_valid_pred.scores > class_thresh
dataset_orig_valid_pred.labels[fav_inds] = dataset_orig_valid_pred.favorable_label
dataset_orig_valid_pred.labels[~fav_inds] = dataset_orig_valid_pred.unfavorable_label
classified_metric_orig_valid = ClassificationMetric(dataset_orig_valid,
dataset_orig_valid_pred,
unprivileged_groups=unprivileged_groups,
privileged_groups=privileged_groups)
ba_arr[idx] = 0.5 * (classified_metric_orig_valid.true_positive_rate() \
+ classified_metric_orig_valid.true_negative_rate())
best_ind = np.where(ba_arr == np.max(ba_arr))[0][0]
best_class_thresh = class_thresh_arr[best_ind]
print("Best balanced accuracy (no fairness constraints) = %.4f" % np.max(ba_arr))
print("Optimal classification threshold (no fairness constraints) = %.4f" % best_class_thresh)
avg_odds_diff_arr_orig = []
display(Markdown("#### Testing set"))
display(Markdown("##### Raw predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_orig_test_pred.scores > thresh
dataset_orig_test_pred.labels[fav_inds] = dataset_orig_test_pred.favorable_label
dataset_orig_test_pred.labels[~fav_inds] = dataset_orig_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_orig_test, dataset_orig_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_orig.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_orig.append(metric_test_bef["Average odds difference"])
disp_imp_arr_orig.append(metric_test_bef["Disparate impact"])
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_orig)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_orig)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_orig = np.abs(1.0 - np.array(disp_imp_arr_orig))[best_ind]
n()
lmod.fit(X_train, y_train)
y_train_pred = lmod.predict(X_train)
dataset_transf_train_pred = dataset_transf_train.copy()
dataset_transf_train_pred.labels = y_train_pred
t_pred.features)
y_test = dataset_transf_test_pred.labels
dataset_transf_test_pred.scores = lmod.predict_proba(X_test)[:, pos_ind].reshape(-1, 1)
display(Markdown("#### Predictions from transformed testing data"))
bal_acc_arr_transf = []
disp_imp_arr_transf = []
avg_odds_diff_arr_transf = []
display(Markdown("#### Testing set"))
display(Markdown("##### Transformed predictions - No fairness constraints"))
for thresh in tqdm(class_thresh_arr):
fav_inds = dataset_transf_test_pred.scores > thresh
dataset_transf_test_pred.labels[fav_inds] = dataset_transf_test_pred.favorable_label
dataset_transf_test_pred.labels[~fav_inds] = dataset_transf_test_pred.unfavorable_label
if (thresh == best_class_thresh):
disp = True
else:
disp = False
metric_test_bef = compute_metrics(dataset_transf_test, dataset_transf_test_pred,
unprivileged_groups, privileged_groups, disp=disp)
bal_acc_arr_transf.append(metric_test_bef["Balanced accuracy"])
avg_odds_diff_arr_transf.append(metric_test_bef["Average odds difference"])
disp_imp_arr_transf.append(metric_test_bef["Disparate impact"])
fig, ax1 = plt.subplots(figsize=(10, 7))
ax1.plot(class_thresh_arr, bal_acc_arr_transf)
ax1.set_xlabel('Classification Thresholds', fontsize=16, fontweight='bold')
ax1.set_ylabel('Balanced Accuracy', color='b', fontsize=16, fontweight='bold')
ax1.xaxis.set_tick_params(labelsize=14)
ax1.yaxis.set_tick_params(labelsize=14)
ax2 = ax1.twinx()
ax2.plot(class_thresh_arr, np.abs(1.0 - np.array(disp_imp_arr_transf)), color='r')
ax2.set_ylabel('abs(1-disparate impact)', color='r', fontsize=16, fontweight='bold')
ax2.axvline(np.array(class_thresh_arr)[best_ind],
color='k', linestyle=':')
ax2.yaxis.set_tick_params(labelsize=14)
ax2.grid(True)
disp_imp_at_best_bal_acc_transf = np.abs(1.0 - np.array(disp_imp_arr_transf))[best_ind]
_best_bal_acc_transf < disp_imp_at_best_bal_acc_orig
| true
| true
|
f70754f9689e79c241fc94176e86570974ac06ea
| 257
|
py
|
Python
|
alarm_client/contracts/payment_lib.py
|
StephenCleary/ethereum-alarm-clock
|
73547223b2021638b85e9f5ac2aaff18179598f7
|
[
"MIT"
] | 1
|
2018-01-26T08:00:19.000Z
|
2018-01-26T08:00:19.000Z
|
alarm_client/contracts/payment_lib.py
|
ethereum-alarm-clock/alarm_client
|
021a8562e4ea2e8cfdb6467a8fd6647dbc37ad70
|
[
"MIT"
] | null | null | null |
alarm_client/contracts/payment_lib.py
|
ethereum-alarm-clock/alarm_client
|
021a8562e4ea2e8cfdb6467a8fd6647dbc37ad70
|
[
"MIT"
] | 1
|
2020-06-15T00:57:09.000Z
|
2020-06-15T00:57:09.000Z
|
from web3.contract import Contract
class PaymentLibFactory(Contract):
pass
def get_payment_lib(web3, address, abi):
return web3.eth.contract(
abi=abi,
address=address,
base_contract_factory_class=PaymentLibFactory,
)
| 18.357143
| 54
| 0.70428
|
from web3.contract import Contract
class PaymentLibFactory(Contract):
pass
def get_payment_lib(web3, address, abi):
return web3.eth.contract(
abi=abi,
address=address,
base_contract_factory_class=PaymentLibFactory,
)
| true
| true
|
f7075980c08b471b1e04c09cbf6cc53a6847bdb7
| 8,771
|
py
|
Python
|
modules/extractors/pdfUtils.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4
|
2015-03-22T20:23:36.000Z
|
2015-12-09T14:31:34.000Z
|
modules/extractors/pdfUtils.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | 4
|
2015-03-24T10:42:00.000Z
|
2016-06-21T08:44:01.000Z
|
modules/extractors/pdfUtils.py
|
rotsee/protokollen
|
a001a1db86df57adcf5c53c95c4c2fae426340f1
|
[
"MIT",
"Apache-2.0",
"CC0-1.0",
"Unlicense"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Various helper methods for PDF extraction.
"""
# This file contains mostly unused leftovers from pdf.py.
class Stream (object):
"""Wrapper around PdfMiner's stream class"""
def __init__(self, stream):
self.stream = stream
def get(self, attribute):
"""Returns a cleaned up PDF stream attribute value
"""
try:
value = self.stream[attribute]
return str(value).strip("/_").lower()
except Exception:
return None
"""
from pdfminer.pdftypes import resolve1, PDFObjRef
from binascii import b2a_hex
import zlib
from pdfminer.ccitt import ccittfaxdecode
hexadecimal = {'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6,
'7': 7, '8': 8, '9': 9, 'a': 10, 'b': 11, 'c': 12,
'd': 13, 'e': 14, 'f': 15}
base85m4 = long(pow(85, 4))
base85m3 = long(pow(85, 3))
base85m2 = long(pow(85, 2))
def get_colormode(color_space, bits=None):
color_mode = None
if isinstance(color_space, list):
color_space_family = _clean_up_stream_attribute(color_space[0])
else:
color_space_family = _clean_up_stream_attribute(color_space)
if color_space_family == "indexed":
color_schema = color_space[1]
if isinstance(color_schema, PDFObjRef):
color_schema = color_schema.resolve()
if isinstance(color_schema, list):
color_schema = color_schema[0]
color_schema = _clean_up_stream_attribute(color_schema)
bits = color_space[2] or bits
if isinstance(bits, PDFObjRef):
bits = bits.resolve()
if color_schema == "devicegray" and bits == 1:
color_mode = "1"
elif color_schema == "devicegray" and bits == 8:
color_mode = "L"
elif color_schema == "iccbased":
# FIXME This just happens to work often enough. We should
# let PDFMiner take care of all this work, though, rather
# than implementníng all the logic (this is complex!) ourselves
color_mode = "L"
elif color_space_family == "pattern":
pass
elif color_space_family == "separation":
pass
elif color_space_family == "devicen":
pass
elif color_space_family == "calgray":
pass
elif color_space_family == "calrgb":
pass
elif color_space_family == "lab":
pass
elif color_space_family == "iccbased":
color_mode = "L"
elif color_space_family == "devicegray":
if bits == 8:
color_mode = "L"
else:
color_mode = "1"
elif color_space_family == "devicergb":
color_mode = "RGB"
elif color_space_family == "devicecmyk":
pass
return color_mode
def _clean_up_stream_attribute(self, attribute):
try:
return str(attribute).strip("/_").lower()
except Exception:
return None
def _decompress(self):
Decompress the image raw data in this image
if self._filter == 'asciihexdecode':
self._raw_data = self._asciihexdecode(self._raw_data)
elif self._filter == 'ascii85decode':
self._raw_data = self._ascii85decode(self._raw_data)
elif self._filter == 'flatedecode':
self._raw_data = zlib.decompress(self._raw_data)
elif self._filter == "ccittfaxdecode":
self._raw_data = ccittfaxdecode(self._raw_data, self._filter_params)
return None
def _determine_image_type(self, stream_first_4_bytes):
Find out the image file type based on the magic number
file_type = None
bytes_as_hex = b2a_hex(stream_first_4_bytes)
if bytes_as_hex.startswith('ffd8'):
file_type = 'jpeg'
elif bytes_as_hex == '89504e47':
file_type = 'png'
elif bytes_as_hex == '47494638':
file_type = 'gif'
elif bytes_as_hex.startswith('424d'):
file_type = 'bmp'
return file_type
def _clean_hexadecimal(self, a):
Read the string, converting the pairs of digits to
characters
b = ''
shift = 4
value = 0
try:
for i in a:
value = value | (hexadecimal[i] << shift)
shift = 4 - shift
if shift == 4:
b = b + chr(value)
value = 0
except ValueError:
raise PDFError("Problem with hexadecimal string %s" % a)
return b
def _asciihexdecode(self, text):
at = text.find('>')
return self._clean_hexadecimal(text[:at].lower())
def _ascii85decode(self, text):
end = text.find('~>')
new = []
i = 0
ch = 0
value = 0
while i < end:
if text[i] == 'z':
if ch != 0:
raise PDFError('Badly encoded ASCII85 format.')
new.append('\000\000\000\000')
ch = 0
value = 0
else:
v = ord(text[i])
if v >= 33 and v <= 117:
if ch == 0:
value = ((v - 33) * base85m4)
elif ch == 1:
value = value + ((v - 33) * base85m3)
elif ch == 2:
value = value + ((v - 33) * base85m2)
elif ch == 3:
value = value + ((v - 33) * 85)
elif ch == 4:
value = value + (v - 33)
c1 = int(value >> 24)
c2 = int((value >> 16) & 255)
c3 = int((value >> 8) & 255)
c4 = int(value & 255)
new.append(chr(c1) + chr(c2) + chr(c3) + chr(c4))
ch = (ch + 1) % 5
i = i + 1
if ch != 0:
c = chr(value >> 24) + chr((value >> 16) & 255) + \
chr((value >> 8) & 255) + chr(value & 255)
new.append(c[:ch - 1])
return "".join(new)
def _get_image(self):
Return an image from this image data.
temp_image = None
image_data = self._stream.get_data()
print "len(image_data)",
print len(image_data)
try:
# Assume war image data
# temp_image = Image.frombuffer(self.color_mode,
# (self.width, self.height),
# self._raw_data, "raw",
# self.color_mode, 0, 1)
temp_image = Image.frombuffer(self.color_mode,
(self.width, self.height),
image_data, "raw",
self.color_mode, 0, 1)
except Exception:
# Not raw image data.
# Can we make sense of this stream some other way?
try:
import StringIO
# temp_image = Image.open(StringIO.StringIO(self._raw_data))
temp_image = Image.open(StringIO.StringIO(image_data))
except Exception:
# PIL failed us. Try to print data to a file, and open it
# file_ext = self._determine_image_type(self._raw_data[0:4])
file_ext = self._determine_image_type(image_data[0:4])
if file_ext:
# TODO use tempfile
file_name = os_sep.join(["header", file_ext])
with open("temp/" + file_name, "w") as image_file:
# image_file.write(self._raw_data)
image_file.write(image_data)
temp_image = Image.open(image_file)
return temp_image or None
"""
"""
if "F" in image_obj.stream:
self._filter = self._clean_up_stream_attribute(image_obj.stream["F"])
else:
self._filter = self._clean_up_stream_attribute(image_obj.stream["Filter"])
if "DP" in image_obj.stream:
self._filter_params = image_obj.stream["DP"]
elif "DecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["DecodeParms"]
elif "FDecodeParms" in image_obj.stream:
self._filter_params = image_obj.stream["FDecodeParms"]
self._bits = image_obj.stream["BitsPerComponent"]
self._raw_data = image_obj.stream.get_rawdata()
if self._filter is not None:
self._decompress()
if "CS" in image_obj.stream:
self.colorspace = image_obj.stream["CS"]
elif "ColorSpace" in image_obj.stream:
self.colorspace = image_obj.stream["ColorSpace"]
else:
self.colorspace = "DeviceGray"
if isinstance(self.colorspace, PDFObjRef):
self.colorspace = self.colorspace.resolve()
self.color_mode = self.get_colormode(self.colorspace,
bits=self._bits)
if self.color_mode is None:
print self.colorspace
raise Exception("No method for handling colorspace")
"""
| 34.128405
| 86
| 0.555581
|
class Stream (object):
def __init__(self, stream):
self.stream = stream
def get(self, attribute):
try:
value = self.stream[attribute]
return str(value).strip("/_").lower()
except Exception:
return None
| true
| true
|
f7075a14af26e3ded7e37c9d092ce1308362cd4b
| 129
|
py
|
Python
|
yourName2.py
|
pazyko/python_home
|
1e13a4d540ea56dd710d583aeea061a984d3fd7a
|
[
"Apache-2.0"
] | null | null | null |
yourName2.py
|
pazyko/python_home
|
1e13a4d540ea56dd710d583aeea061a984d3fd7a
|
[
"Apache-2.0"
] | null | null | null |
yourName2.py
|
pazyko/python_home
|
1e13a4d540ea56dd710d583aeea061a984d3fd7a
|
[
"Apache-2.0"
] | null | null | null |
while True:
print('Plese type your name')
name=input()
if name=='your name':
break
print('Thank you!')
| 18.428571
| 34
| 0.55814
|
while True:
print('Plese type your name')
name=input()
if name=='your name':
break
print('Thank you!')
| true
| true
|
f7075d8698b9baf537da574e190efb77bafe24e9
| 18,834
|
py
|
Python
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 64
|
2016-10-20T15:47:18.000Z
|
2021-11-11T11:57:32.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 126
|
2016-10-05T10:36:14.000Z
|
2019-05-15T08:43:23.000Z
|
napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/isis/global_/lsp_bit/__init__.py
|
ckishimo/napalm-yang
|
8f2bd907bd3afcde3c2f8e985192de74748baf6c
|
[
"Apache-2.0"
] | 63
|
2016-11-07T15:23:08.000Z
|
2021-09-22T14:41:16.000Z
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Operational Bits.
"""
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
"""
Getter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
YANG Description: This container defines Overload Bit configuration.
"""
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
"""
Setter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overload_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overload_bit() directly.
YANG Description: This container defines Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
"""
Getter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
YANG Description: This container defines Attached Bit.
"""
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
"""
Setter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_attached_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attached_bit() directly.
YANG Description: This container defines Attached Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/global/lsp-bit. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container defines ISIS LSP Operational Bits.
"""
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
"""
Getter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
YANG Description: This container defines Overload Bit configuration.
"""
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
"""
Setter method for overload_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/overload_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_overload_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_overload_bit() directly.
YANG Description: This container defines Overload Bit configuration.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
"""
Getter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
YANG Description: This container defines Attached Bit.
"""
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
"""
Setter method for attached_bit, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/global/lsp_bit/attached_bit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_attached_bit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_attached_bit() directly.
YANG Description: This container defines Attached Bit.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
| 39.734177
| 395
| 0.613359
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
from . import overload_bit
from . import attached_bit
class lsp_bit(PybindBase):
__slots__ = ("_path_helper", "_extmethods", "__overload_bit", "__attached_bit")
_yang_name = "lsp-bit"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"global",
"lsp-bit",
]
def _get_overload_bit(self):
return self.__overload_bit
def _set_overload_bit(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """overload_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=overload_bit.overload_bit, is_container='container', yang_name="overload-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__overload_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_overload_bit(self):
self.__overload_bit = YANGDynClass(
base=overload_bit.overload_bit,
is_container="container",
yang_name="overload-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_attached_bit(self):
return self.__attached_bit
def _set_attached_bit(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """attached_bit must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=attached_bit.attached_bit, is_container='container', yang_name="attached-bit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__attached_bit = t
if hasattr(self, "_set"):
self._set()
def _unset_attached_bit(self):
self.__attached_bit = YANGDynClass(
base=attached_bit.attached_bit,
is_container="container",
yang_name="attached-bit",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
overload_bit = __builtin__.property(_get_overload_bit, _set_overload_bit)
attached_bit = __builtin__.property(_get_attached_bit, _set_attached_bit)
_pyangbind_elements = OrderedDict(
[("overload_bit", overload_bit), ("attached_bit", attached_bit)]
)
| true
| true
|
f7075e0814a6b52830ec7abfbaab90ad9441a256
| 732
|
py
|
Python
|
load_pkl_model.py
|
Rayaction/ECO-paddle
|
28c9adf0f6626dd8d262848fd6a2d7147e76048e
|
[
"MIT"
] | null | null | null |
load_pkl_model.py
|
Rayaction/ECO-paddle
|
28c9adf0f6626dd8d262848fd6a2d7147e76048e
|
[
"MIT"
] | null | null | null |
load_pkl_model.py
|
Rayaction/ECO-paddle
|
28c9adf0f6626dd8d262848fd6a2d7147e76048e
|
[
"MIT"
] | null | null | null |
import pickle
import sys
sys.path.append("..")
from model import ECO
import paddle.fluid as fluid
# Load pickle, since pretrained model is too bigger than the threshold(150M), split them into 2 parts and then reload them
f0 = open('seg0.pkl', 'rb')
f1 = open('seg1.pkl', 'rb')
model_out = dict()
model_0 = pickle.load(f0)
model_1 = pickle.load(f1)
for i,key in enumerate(model_0):
model_out[key]=model_0[key]
for i,key in enumerate(model_1):
model_out[key]=model_1[key]
with fluid.dygraph.guard():
paddle_model = ECO.ECO(num_classes=101, num_segments=24)
paddle_model.load_dict(model_out)
fluid.dygraph.save_dygraph(paddle_model.state_dict(), 'ECO_FULL_RGB__seg16')
print('finished')
| 31.826087
| 123
| 0.714481
|
import pickle
import sys
sys.path.append("..")
from model import ECO
import paddle.fluid as fluid
f0 = open('seg0.pkl', 'rb')
f1 = open('seg1.pkl', 'rb')
model_out = dict()
model_0 = pickle.load(f0)
model_1 = pickle.load(f1)
for i,key in enumerate(model_0):
model_out[key]=model_0[key]
for i,key in enumerate(model_1):
model_out[key]=model_1[key]
with fluid.dygraph.guard():
paddle_model = ECO.ECO(num_classes=101, num_segments=24)
paddle_model.load_dict(model_out)
fluid.dygraph.save_dygraph(paddle_model.state_dict(), 'ECO_FULL_RGB__seg16')
print('finished')
| true
| true
|
f7075e0f367ebcead4beaff14fec4bae9eb78f51
| 2,959
|
py
|
Python
|
patron/tests/unit/cert/test_rpcapi.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patron/tests/unit/cert/test_rpcapi.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
patron/tests/unit/cert/test_rpcapi.py
|
casbin/openstack-patron
|
b41b1262f3a52c8cc9f6b6bdf87be5a1abcf6d25
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit Tests for patron.cert.rpcapi
"""
import contextlib
import mock
from oslo_config import cfg
from patron.cert import rpcapi as cert_rpcapi
from patron import context
from patron import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.cert_topic)
orig_prepare = rpcapi.client.prepare
with contextlib.nested(
mock.patch.object(rpcapi.client, 'call'),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo'
csv_mock.side_effect = (
lambda v: orig_prepare().can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with()
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
| 34.811765
| 78
| 0.6634
|
import contextlib
import mock
from oslo_config import cfg
from patron.cert import rpcapi as cert_rpcapi
from patron import context
from patron import test
CONF = cfg.CONF
class CertRpcAPITestCase(test.NoDBTestCase):
def _test_cert_api(self, method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
rpcapi = cert_rpcapi.CertAPI()
self.assertIsNotNone(rpcapi.client)
self.assertEqual(rpcapi.client.target.topic, CONF.cert_topic)
orig_prepare = rpcapi.client.prepare
with contextlib.nested(
mock.patch.object(rpcapi.client, 'call'),
mock.patch.object(rpcapi.client, 'prepare'),
mock.patch.object(rpcapi.client, 'can_send_version'),
) as (
rpc_mock, prepare_mock, csv_mock
):
prepare_mock.return_value = rpcapi.client
rpc_mock.return_value = 'foo'
csv_mock.side_effect = (
lambda v: orig_prepare().can_send_version())
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, rpc_mock.return_value)
prepare_mock.assert_called_once_with()
rpc_mock.assert_called_once_with(ctxt, method, **kwargs)
def test_revoke_certs_by_user(self):
self._test_cert_api('revoke_certs_by_user', user_id='fake_user_id')
def test_revoke_certs_by_project(self):
self._test_cert_api('revoke_certs_by_project',
project_id='fake_project_id')
def test_revoke_certs_by_user_and_project(self):
self._test_cert_api('revoke_certs_by_user_and_project',
user_id='fake_user_id',
project_id='fake_project_id')
def test_generate_x509_cert(self):
self._test_cert_api('generate_x509_cert',
user_id='fake_user_id',
project_id='fake_project_id')
def test_fetch_ca(self):
self._test_cert_api('fetch_ca', project_id='fake_project_id')
def test_fetch_crl(self):
self._test_cert_api('fetch_crl', project_id='fake_project_id')
def test_decrypt_text(self):
self._test_cert_api('decrypt_text',
project_id='fake_project_id', text='blah')
| true
| true
|
f7075e944abb59672f7df904d36157a6ba3e5bfa
| 1,750
|
py
|
Python
|
otp/level/LevelMgr.py
|
LittleNed/toontown-stride
|
1252a8f9a8816c1810106006d09c8bdfe6ad1e57
|
[
"Apache-2.0"
] | 3
|
2020-01-02T08:43:36.000Z
|
2020-07-05T08:59:02.000Z
|
otp/level/LevelMgr.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | null | null | null |
otp/level/LevelMgr.py
|
NoraTT/Historical-Commits-Project-Altis-Source
|
fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179
|
[
"Apache-2.0"
] | 4
|
2019-06-20T23:45:23.000Z
|
2020-10-14T20:30:15.000Z
|
from toontown.toonbase.ToonPythonUtil import Functor
from otp.level import LevelMgrBase
class LevelMgr(LevelMgrBase.LevelMgrBase):
def __init__(self, level, entId):
LevelMgrBase.LevelMgrBase.__init__(self, level, entId)
self.geom = loader.loadModel(self.modelFilename)
if not self.geom:
import pdb
pdb.set_trace()
self.zoneNums = []
self.level.zoneNum2zoneId = {}
self.level.zoneId2zoneNum = {}
self.accept(self.level.getEntityOfTypeCreateEvent('zone'), self.handleZoneCreated)
def destroy(self):
del self.level.zoneIds
del self.level.zoneId2zoneNum
del self.level.zoneNum2zoneId
self.geom.removeNode()
del self.geom
LevelMgrBase.LevelMgrBase.destroy(self)
def handleZoneCreated(self, entId):
zoneEnt = self.level.getEntity(entId)
self.zoneNums.append(zoneEnt.entId)
self.privAssignZoneIds()
self.accept(self.level.getEntityDestroyEvent(entId), Functor(self.handleZoneDestroy, entId))
def handleZoneDestroy(self, entId):
zoneEnt = self.level.getEntity(entId)
del self.level.zoneId2zoneNum[self.level.zoneNum2zoneId[zoneEnt.entId]]
del self.level.zoneNum2zoneId[zoneEnt.entId]
self.zoneNums.remove(zoneEnt.entId)
self.privAssignZoneIds()
def privAssignZoneIds(self):
self.zoneNums.sort()
for i in xrange(len(self.zoneNums)):
zoneNum = self.zoneNums[i]
zoneEnt = self.level.getEntity(zoneNum)
zoneId = self.level.zoneIds[i]
zoneEnt.setZoneId(zoneId)
self.level.zoneNum2zoneId[zoneNum] = zoneId
self.level.zoneId2zoneNum[zoneId] = zoneNum
| 37.234043
| 100
| 0.668
|
from toontown.toonbase.ToonPythonUtil import Functor
from otp.level import LevelMgrBase
class LevelMgr(LevelMgrBase.LevelMgrBase):
def __init__(self, level, entId):
LevelMgrBase.LevelMgrBase.__init__(self, level, entId)
self.geom = loader.loadModel(self.modelFilename)
if not self.geom:
import pdb
pdb.set_trace()
self.zoneNums = []
self.level.zoneNum2zoneId = {}
self.level.zoneId2zoneNum = {}
self.accept(self.level.getEntityOfTypeCreateEvent('zone'), self.handleZoneCreated)
def destroy(self):
del self.level.zoneIds
del self.level.zoneId2zoneNum
del self.level.zoneNum2zoneId
self.geom.removeNode()
del self.geom
LevelMgrBase.LevelMgrBase.destroy(self)
def handleZoneCreated(self, entId):
zoneEnt = self.level.getEntity(entId)
self.zoneNums.append(zoneEnt.entId)
self.privAssignZoneIds()
self.accept(self.level.getEntityDestroyEvent(entId), Functor(self.handleZoneDestroy, entId))
def handleZoneDestroy(self, entId):
zoneEnt = self.level.getEntity(entId)
del self.level.zoneId2zoneNum[self.level.zoneNum2zoneId[zoneEnt.entId]]
del self.level.zoneNum2zoneId[zoneEnt.entId]
self.zoneNums.remove(zoneEnt.entId)
self.privAssignZoneIds()
def privAssignZoneIds(self):
self.zoneNums.sort()
for i in xrange(len(self.zoneNums)):
zoneNum = self.zoneNums[i]
zoneEnt = self.level.getEntity(zoneNum)
zoneId = self.level.zoneIds[i]
zoneEnt.setZoneId(zoneId)
self.level.zoneNum2zoneId[zoneNum] = zoneId
self.level.zoneId2zoneNum[zoneId] = zoneNum
| true
| true
|
f7075fe574053a5b93e97ee37753e0601d4ae576
| 1,708
|
py
|
Python
|
src/pwmio.py
|
domdfcoding/Adafruit_Blinka
|
8a25ee484e7aed8c58e4366b3fca78083bc61422
|
[
"MIT"
] | 294
|
2018-06-30T19:08:27.000Z
|
2022-03-26T21:08:47.000Z
|
src/pwmio.py
|
domdfcoding/Adafruit_Blinka
|
8a25ee484e7aed8c58e4366b3fca78083bc61422
|
[
"MIT"
] | 421
|
2018-06-30T20:54:46.000Z
|
2022-03-31T15:08:37.000Z
|
src/pwmio.py
|
domdfcoding/Adafruit_Blinka
|
8a25ee484e7aed8c58e4366b3fca78083bc61422
|
[
"MIT"
] | 234
|
2018-07-23T18:49:16.000Z
|
2022-03-28T16:59:48.000Z
|
"""
`pwmio` - Support for PWM based protocols
===========================================================
See `CircuitPython:pwmio` in CircuitPython for more details.
Not supported by all boards.
* Author(s): Melissa LeBlanc-Williams
"""
import sys
from adafruit_blinka.agnostic import detector
# pylint: disable=unused-import
if detector.board.any_raspberry_pi:
from adafruit_blinka.microcontroller.bcm283x.pulseio.PWMOut import PWMOut
elif detector.board.any_coral_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_giant_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_beaglebone:
from adafruit_blinka.microcontroller.am335x.sysfs_pwmout import PWMOut
elif detector.board.any_rock_pi_board:
from adafruit_blinka.microcontroller.rockchip.PWMOut import PWMOut
elif detector.board.binho_nova:
from adafruit_blinka.microcontroller.nova.pwmout import PWMOut
elif detector.board.greatfet_one:
from adafruit_blinka.microcontroller.nxp_lpc4330.pwmout import PWMOut
elif detector.board.any_lubancat:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.pico_u2if:
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif (
detector.board.feather_u2if
or detector.board.qtpy_u2if
or detector.board.itsybitsy_u2if
or detector.board.macropad_u2if
or detector.board.qt2040_trinkey_u2if
):
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif "sphinx" in sys.modules:
pass
else:
raise NotImplementedError("pwmio not supported for this board.")
| 37.130435
| 81
| 0.792155
|
import sys
from adafruit_blinka.agnostic import detector
if detector.board.any_raspberry_pi:
from adafruit_blinka.microcontroller.bcm283x.pulseio.PWMOut import PWMOut
elif detector.board.any_coral_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_giant_board:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.any_beaglebone:
from adafruit_blinka.microcontroller.am335x.sysfs_pwmout import PWMOut
elif detector.board.any_rock_pi_board:
from adafruit_blinka.microcontroller.rockchip.PWMOut import PWMOut
elif detector.board.binho_nova:
from adafruit_blinka.microcontroller.nova.pwmout import PWMOut
elif detector.board.greatfet_one:
from adafruit_blinka.microcontroller.nxp_lpc4330.pwmout import PWMOut
elif detector.board.any_lubancat:
from adafruit_blinka.microcontroller.generic_linux.sysfs_pwmout import PWMOut
elif detector.board.pico_u2if:
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif (
detector.board.feather_u2if
or detector.board.qtpy_u2if
or detector.board.itsybitsy_u2if
or detector.board.macropad_u2if
or detector.board.qt2040_trinkey_u2if
):
from adafruit_blinka.microcontroller.rp2040_u2if.pwmio import PWMOut
elif "sphinx" in sys.modules:
pass
else:
raise NotImplementedError("pwmio not supported for this board.")
| true
| true
|
f7075ff2b3abe58c51e44cf54aa559f3bfa6ae2f
| 1,295
|
py
|
Python
|
CV/migrations/0005_auto_20180903_1348.py
|
bdribault/Perso
|
cdf115f5901a845adc6222eddfbda9536847dad0
|
[
"MIT"
] | null | null | null |
CV/migrations/0005_auto_20180903_1348.py
|
bdribault/Perso
|
cdf115f5901a845adc6222eddfbda9536847dad0
|
[
"MIT"
] | null | null | null |
CV/migrations/0005_auto_20180903_1348.py
|
bdribault/Perso
|
cdf115f5901a845adc6222eddfbda9536847dad0
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.1 on 2018-09-03 13:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CV', '0004_auto_20180903_1229'),
]
operations = [
migrations.RemoveField(
model_name='cv',
name='availability_string',
),
migrations.AddField(
model_name='cv',
name='availability_offset_number',
field=models.IntegerField(blank=True, help_text="if 'Availability type' is 'offset'", null=True),
),
migrations.AddField(
model_name='cv',
name='availability_offset_quantity',
field=models.CharField(choices=[('DAY', 'day'), ('MONTH', 'month')], default='MONTH', help_text="if 'Availability type' is 'offset'", max_length=64),
),
migrations.AlterField(
model_name='cv',
name='availability_date',
field=models.DateField(blank=True, help_text="if 'Availability type' is 'at'", null=True),
),
migrations.AlterField(
model_name='cv',
name='availability_type',
field=models.CharField(choices=[('NOW', 'now'), ('OFFSET', 'offset'), ('AT', 'at')], default='NOW', max_length=64),
),
]
| 34.078947
| 161
| 0.577606
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('CV', '0004_auto_20180903_1229'),
]
operations = [
migrations.RemoveField(
model_name='cv',
name='availability_string',
),
migrations.AddField(
model_name='cv',
name='availability_offset_number',
field=models.IntegerField(blank=True, help_text="if 'Availability type' is 'offset'", null=True),
),
migrations.AddField(
model_name='cv',
name='availability_offset_quantity',
field=models.CharField(choices=[('DAY', 'day'), ('MONTH', 'month')], default='MONTH', help_text="if 'Availability type' is 'offset'", max_length=64),
),
migrations.AlterField(
model_name='cv',
name='availability_date',
field=models.DateField(blank=True, help_text="if 'Availability type' is 'at'", null=True),
),
migrations.AlterField(
model_name='cv',
name='availability_type',
field=models.CharField(choices=[('NOW', 'now'), ('OFFSET', 'offset'), ('AT', 'at')], default='NOW', max_length=64),
),
]
| true
| true
|
f70760e9efe6724016ed206729de4193cdbba1e9
| 482
|
py
|
Python
|
Examples/Session08/properties_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
Examples/Session08/properties_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
Examples/Session08/properties_example.py
|
Sharmila8/intropython2016
|
a69aa6f6d0cd28c6a29d0b8adb9ef1ff9e2e8479
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
"""
Example code for properties
NOTE: if your getters and setters are this simple: don't do this!
"""
class C:
def __init__(self):
self._x = None
@property
def x(self):
print("in getter")
return self._x
@x.setter
def x(self, value):
print("in setter", value)
self._x = value
@x.deleter
def x(self):
del self._x
if __name__ == "__main__":
c = C()
c.x = 5
print(c.x)
| 15.548387
| 65
| 0.549793
|
class C:
def __init__(self):
self._x = None
@property
def x(self):
print("in getter")
return self._x
@x.setter
def x(self, value):
print("in setter", value)
self._x = value
@x.deleter
def x(self):
del self._x
if __name__ == "__main__":
c = C()
c.x = 5
print(c.x)
| true
| true
|
f707625de59843ab74eaddf4cd84de3a68dd5c75
| 933
|
py
|
Python
|
chocs/middleware/application_middleware.py
|
danballance/chocs
|
8a64ce47d98ec327ce709b46c6389c5c627f4157
|
[
"MIT"
] | null | null | null |
chocs/middleware/application_middleware.py
|
danballance/chocs
|
8a64ce47d98ec327ce709b46c6389c5c627f4157
|
[
"MIT"
] | null | null | null |
chocs/middleware/application_middleware.py
|
danballance/chocs
|
8a64ce47d98ec327ce709b46c6389c5c627f4157
|
[
"MIT"
] | null | null | null |
from chocs.http_error import HttpError
from chocs.http_request import HttpRequest
from chocs.http_response import HttpResponse
from chocs.routing import Router
from chocs.serverless.serverless import ServerlessFunction
from .middleware import Middleware, MiddlewareHandler
class ApplicationMiddleware(Middleware):
def __init__(self, router: Router):
self.router = router
def handle(self, request: HttpRequest, next: MiddlewareHandler) -> HttpResponse:
try:
handler = request.attributes["__handler__"]
response: HttpResponse
if isinstance(handler, ServerlessFunction):
response = handler.function(request)
else:
response = handler(request)
return response
except HttpError as error:
return HttpResponse(status=error.status_code, body=error.http_message)
__all__ = ["ApplicationMiddleware"]
| 31.1
| 84
| 0.706324
|
from chocs.http_error import HttpError
from chocs.http_request import HttpRequest
from chocs.http_response import HttpResponse
from chocs.routing import Router
from chocs.serverless.serverless import ServerlessFunction
from .middleware import Middleware, MiddlewareHandler
class ApplicationMiddleware(Middleware):
def __init__(self, router: Router):
self.router = router
def handle(self, request: HttpRequest, next: MiddlewareHandler) -> HttpResponse:
try:
handler = request.attributes["__handler__"]
response: HttpResponse
if isinstance(handler, ServerlessFunction):
response = handler.function(request)
else:
response = handler(request)
return response
except HttpError as error:
return HttpResponse(status=error.status_code, body=error.http_message)
__all__ = ["ApplicationMiddleware"]
| true
| true
|
f7076503a185e2d56681d0575d10cc00c4ccc108
| 4,397
|
py
|
Python
|
hw4/libs/new_isomap.py
|
ardihikaru/mlsp
|
db38972bcceac7b95808132457c4de9170546c9d
|
[
"Apache-2.0"
] | null | null | null |
hw4/libs/new_isomap.py
|
ardihikaru/mlsp
|
db38972bcceac7b95808132457c4de9170546c9d
|
[
"Apache-2.0"
] | null | null | null |
hw4/libs/new_isomap.py
|
ardihikaru/mlsp
|
db38972bcceac7b95808132457c4de9170546c9d
|
[
"Apache-2.0"
] | 1
|
2020-01-07T14:25:54.000Z
|
2020-01-07T14:25:54.000Z
|
# Source: https://github.com/tracy-talent/curriculum/blob/ecaf850cb7932f23b5d7c0323e80a9f9a408bef6/Machine%20Learning/Dimension%20Reduction/src/ISOMAP.py
from numpy import *
from hw4.libs.metrics import _1NN
from queue import PriorityQueue
from os import path
import time
def loadData(filename):
content = open(filename).readlines()
# Split data and labels
data = [list(map(float32, line.strip().split(",")[:-1])) for line in content]
tag = [list(map(int, line.strip().split(",")[-1:])) for line in content]
return mat(data), mat(tag)
def calc_distance(dataMat):
dataSize = len(dataMat)
Euc_distanceMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
Euc_distanceMat[i][j] = linalg.norm(dataMat[i] - dataMat[j])
return Euc_distanceMat
# Adjacency table edge
class edge(object):
def __init__(self, cost, to):
self.cost = cost # 边权重
self.to = to # 入点
def __lt__(self, other):
return self.cost < other.cost
# dijkstra (Shortest path algorithm)
# @param{dist: Distance matrix, graph: Adjacency, src: Source}
def dijkstra(dist, graph, src):
que = PriorityQueue()
que.put(edge(0, src))
while not que.empty():
p = que.get()
v = p.to
if dist[src][v] < p.cost:
continue
for i in range(len(graph[v])):
if dist[src][graph[v][i].to] > dist[src][v] + graph[v][i].cost:
dist[src][graph[v][i].to] = dist[src][v] + graph[v][i].cost
que.put(edge(dist[src][graph[v][i].to], graph[v][i].to))
# @param{dist:Distance matrix,dims: Dimensionality reduction / Number of Components}
# return:降维后的矩阵
def mds(dist, dims):
dataSize = len(dist)
if dims > dataSize:
print('Dimension reduction dimension %d is greater than the dimension of the matrix to be reduced %d' % (dims, dist.shape()))
return
dist_i_dot_2 = zeros([dataSize], float32)
dist_dot_j_2 = zeros([dataSize], float32)
dist_dot_dot_2 = 0.0
bMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
dist_i_dot_2[i] += dist_i_j_2
dist_dot_j_2[j] += dist_i_j_2 / dataSize
dist_dot_dot_2 += dist_i_j_2
dist_i_dot_2[i] /= dataSize
dist_dot_dot_2 /= square(dataSize)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
bMat[i][j] = -0.5 * (dist_i_j_2 - dist_i_dot_2[i] - dist_dot_j_2[j] + dist_dot_dot_2)
# Eigenvalues and eigenvectors
eigVals, eigVecs = linalg.eig(bMat)
# Index for large eigenvalues
eigVals_Idx = argpartition(eigVals, -dims)[:-(dims + 1):-1]
# Constructing a Diagonal Matrix of Eigenvalues
eigVals_Diag = diag(maximum(eigVals[eigVals_Idx], 0.0))
return matmul(eigVecs[:, eigVals_Idx], sqrt(eigVals_Diag))
# param{dataMat: Image Dataset (n_data, n_dimension),dims: Number of Components,KNN_K: Number of Neighbours}
# return:Dimensionality-reduced matrix
def isomap(dataMat, dims, KNN_K):
set_printoptions(threshold=None)
inf = float('inf')
dataSize = len(dataMat)
if KNN_K >= dataSize:
# raise ValueError('KNN_K的值最大为数据个数 - 1:%d' % dataSize - 1)
raise ValueError("The maximum value is the number of data = ", (dataSize-1))
Euc_distanceMat = calc_distance(dataMat)
# Setup KNN Connection diagram
knn_distanceMat = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
knn_disIdx = argpartition(Euc_distanceMat[i], KNN_K)[:KNN_K + 1]
knn_distanceMat[i][knn_disIdx] = Euc_distanceMat[i][knn_disIdx]
for j in knn_disIdx:
knn_distanceMat[j][i] = knn_distanceMat[i][j]
# Build adjacency list
adjacencyTable = []
for i in range(dataSize):
edgelist = []
for j in range(dataSize):
if knn_distanceMat[i][j] != inf:
edgelist.append(edge(knn_distanceMat[i][j], j))
adjacencyTable.append(edgelist)
# dijkstra: Find the shortest
# dist: Store the shortest distance between any two points
dist = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
dist[i][i] = 0.0
dijkstra(dist, adjacencyTable, i)
return mds(dist, dims)
| 36.641667
| 153
| 0.643848
|
from numpy import *
from hw4.libs.metrics import _1NN
from queue import PriorityQueue
from os import path
import time
def loadData(filename):
content = open(filename).readlines()
data = [list(map(float32, line.strip().split(",")[:-1])) for line in content]
tag = [list(map(int, line.strip().split(",")[-1:])) for line in content]
return mat(data), mat(tag)
def calc_distance(dataMat):
dataSize = len(dataMat)
Euc_distanceMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
Euc_distanceMat[i][j] = linalg.norm(dataMat[i] - dataMat[j])
return Euc_distanceMat
class edge(object):
def __init__(self, cost, to):
self.cost = cost
self.to = to
def __lt__(self, other):
return self.cost < other.cost
def dijkstra(dist, graph, src):
que = PriorityQueue()
que.put(edge(0, src))
while not que.empty():
p = que.get()
v = p.to
if dist[src][v] < p.cost:
continue
for i in range(len(graph[v])):
if dist[src][graph[v][i].to] > dist[src][v] + graph[v][i].cost:
dist[src][graph[v][i].to] = dist[src][v] + graph[v][i].cost
que.put(edge(dist[src][graph[v][i].to], graph[v][i].to))
def mds(dist, dims):
dataSize = len(dist)
if dims > dataSize:
print('Dimension reduction dimension %d is greater than the dimension of the matrix to be reduced %d' % (dims, dist.shape()))
return
dist_i_dot_2 = zeros([dataSize], float32)
dist_dot_j_2 = zeros([dataSize], float32)
dist_dot_dot_2 = 0.0
bMat = zeros([dataSize, dataSize], float32)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
dist_i_dot_2[i] += dist_i_j_2
dist_dot_j_2[j] += dist_i_j_2 / dataSize
dist_dot_dot_2 += dist_i_j_2
dist_i_dot_2[i] /= dataSize
dist_dot_dot_2 /= square(dataSize)
for i in range(dataSize):
for j in range(dataSize):
dist_i_j_2 = square(dist[i][j])
bMat[i][j] = -0.5 * (dist_i_j_2 - dist_i_dot_2[i] - dist_dot_j_2[j] + dist_dot_dot_2)
eigVals, eigVecs = linalg.eig(bMat)
eigVals_Idx = argpartition(eigVals, -dims)[:-(dims + 1):-1]
eigVals_Diag = diag(maximum(eigVals[eigVals_Idx], 0.0))
return matmul(eigVecs[:, eigVals_Idx], sqrt(eigVals_Diag))
def isomap(dataMat, dims, KNN_K):
set_printoptions(threshold=None)
inf = float('inf')
dataSize = len(dataMat)
if KNN_K >= dataSize:
raise ValueError("The maximum value is the number of data = ", (dataSize-1))
Euc_distanceMat = calc_distance(dataMat)
knn_distanceMat = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
knn_disIdx = argpartition(Euc_distanceMat[i], KNN_K)[:KNN_K + 1]
knn_distanceMat[i][knn_disIdx] = Euc_distanceMat[i][knn_disIdx]
for j in knn_disIdx:
knn_distanceMat[j][i] = knn_distanceMat[i][j]
adjacencyTable = []
for i in range(dataSize):
edgelist = []
for j in range(dataSize):
if knn_distanceMat[i][j] != inf:
edgelist.append(edge(knn_distanceMat[i][j], j))
adjacencyTable.append(edgelist)
dist = ones([dataSize, dataSize], float32) * inf
for i in range(dataSize):
dist[i][i] = 0.0
dijkstra(dist, adjacencyTable, i)
return mds(dist, dims)
| true
| true
|
f7076571e1f2a78d4b8409e2ded824ce37f2f625
| 956
|
gyp
|
Python
|
binding.gyp
|
Calyhre/node-simconnect
|
076e5725e735a3f81989412c0a9024685daf440c
|
[
"MIT"
] | 65
|
2017-07-31T09:46:31.000Z
|
2022-02-24T07:00:17.000Z
|
binding.gyp
|
Calyhre/node-simconnect
|
076e5725e735a3f81989412c0a9024685daf440c
|
[
"MIT"
] | 29
|
2017-07-31T14:39:26.000Z
|
2022-02-21T16:41:17.000Z
|
binding.gyp
|
Calyhre/node-simconnect
|
076e5725e735a3f81989412c0a9024685daf440c
|
[
"MIT"
] | 26
|
2018-01-04T15:11:26.000Z
|
2022-03-18T02:21:03.000Z
|
{
"targets": [
{
"target_name": "node-simconnect",
"sources": [ "src/addon.cc" ],
"include_dirs": [
"SimConnect/Inc",
"<!(node -e \"require('nan')\")"
],
"link_settings": {
"libraries": [
"../SimConnect/lib/SimConnect"
]
},
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '3' # /MDd
}
}
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '2' # /MD
}
}
}
}
}
]
}
| 26.555556
| 64
| 0.264644
|
{
"targets": [
{
"target_name": "node-simconnect",
"sources": [ "src/addon.cc" ],
"include_dirs": [
"SimConnect/Inc",
"<!(node -e \"require('nan')\")"
],
"link_settings": {
"libraries": [
"../SimConnect/lib/SimConnect"
]
},
'configurations': {
'Debug': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '3'
}
}
},
'Release': {
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': '2'
}
}
}
}
}
]
}
| true
| true
|
f70765eebcf4a1048f40d5dbf0d0748b1d2c2301
| 1,654
|
py
|
Python
|
flavio/physics/quarkonium/test_Vllgamma.py
|
micha-a-schmidt/flavio
|
fb89a11cdf45e536f2d72de8a4a2657130c4e09f
|
[
"MIT"
] | null | null | null |
flavio/physics/quarkonium/test_Vllgamma.py
|
micha-a-schmidt/flavio
|
fb89a11cdf45e536f2d72de8a4a2657130c4e09f
|
[
"MIT"
] | null | null | null |
flavio/physics/quarkonium/test_Vllgamma.py
|
micha-a-schmidt/flavio
|
fb89a11cdf45e536f2d72de8a4a2657130c4e09f
|
[
"MIT"
] | 1
|
2017-11-09T01:40:01.000Z
|
2017-11-09T01:40:01.000Z
|
import unittest
import flavio
from wilson import Wilson
from .Vllgamma import *
### implement test
class TestVllgamma(unittest.TestCase):
def test_np(self):
wc,br=Wilson({'CVRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),8.3949e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),6.2935e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CVRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),1.2887e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),9.1097e-7
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
| 66.16
| 182
| 0.688029
|
import unittest
import flavio
from wilson import Wilson
from .Vllgamma import *
Case):
def test_np(self):
wc,br=Wilson({'CVRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),8.3949e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_muecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),6.2935e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->muegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->muegamma)',wc),flavio.np_prediction('BR(J/psi->muegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CVRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),1.2887e-6
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
wc,br=Wilson({'CSRR_tauecc' : 1e-2},scale=2.,eft='WET',basis='flavio'),9.1097e-7
self.assertAlmostEqual(flavio.np_prediction('BR(J/psi->tauegamma)',wc), br,delta=0.01*br)
self.assertAlmostEqual(flavio.np_prediction('R(J/psi->tauegamma)',wc),flavio.np_prediction('BR(J/psi->tauegamma)',wc)/flavio.np_prediction('BR(J/psi->ee)',wc),delta=0.001*br)
| true
| true
|
f7076853ea2f7368c6eda5ddd044c690d7cb536a
| 4,748
|
py
|
Python
|
seqgra/seqgras.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/seqgras.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | null | null | null |
seqgra/seqgras.py
|
gifford-lab/seqgra
|
3c7547878ecda4c00572746b8a07e0d614c9dbef
|
[
"MIT"
] | 2
|
2021-06-14T20:27:40.000Z
|
2021-06-14T20:29:29.000Z
|
#!/usr/bin/env python
"""MIT - CSAIL - Gifford Lab - seqgra
seqgra complete pipeline:
1. generate data based on data definition (once), see run_simulator.py
2. train model on data (once), see run_learner.py
3. evaluate model performance with SIS, see run_sis.py
@author: Konstantin Krismer
"""
import argparse
import logging
import os
from typing import List, Optional
import seqgra
import seqgra.constants as c
from seqgra import MiscHelper
from seqgra.comparator import Comparator
from seqgra.idresolver import IdResolver
def get_all_grammar_ids(output_dir: str) -> List[str]:
folder = output_dir + "evaluation/"
return [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
def get_all_model_ids(output_dir: str, grammar_ids: List[str]) -> List[str]:
model_ids: List[str] = []
for grammar_id in grammar_ids:
folder = output_dir + "evaluation/" + grammar_id + "/"
model_ids += [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
return list(set(model_ids))
def run_seqgra_summary(analysis_id: str,
comparator_ids: List[str],
output_dir: str,
grammar_ids: Optional[List[str]] = None,
model_ids: Optional[List[str]] = None,
set_names: Optional[List[str]] = None,
model_labels: Optional[List[str]] = None) -> None:
analysis_id = MiscHelper.sanitize_id(analysis_id)
output_dir = MiscHelper.format_output_dir(output_dir.strip())
if comparator_ids:
for comparator_id in comparator_ids:
comparator: Comparator = IdResolver.get_comparator(analysis_id,
comparator_id,
output_dir,
model_labels)
if not grammar_ids:
grammar_ids = get_all_grammar_ids(output_dir)
if not model_ids:
model_ids = get_all_model_ids(output_dir, grammar_ids)
comparator.compare_models(grammar_ids, model_ids, set_names)
def create_parser():
parser = argparse.ArgumentParser(
prog="seqgras",
description="seqgra summary: Gather metrics across grammars, models, "
"evaluators")
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s " + seqgra.__version__)
parser.add_argument(
"-a",
"--analysis-id",
type=str,
required=True,
help="analysis id (folder name for output)"
)
parser.add_argument(
"-c",
"--comparators",
type=str,
required=True,
nargs="+",
help="comparator ID or IDs: IDs of "
"comparators include " +
", ".join(sorted(c.ComparatorID.ALL_COMPARATOR_IDS))
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
required=True,
help="output directory, subdirectories are created for generated "
"data, trained model, and model evaluation"
)
parser.add_argument(
"-g",
"--grammar-ids",
type=str,
default=None,
nargs="+",
help="one or more grammar IDs; defaults to all grammar IDs in "
"output dir"
)
parser.add_argument(
"-m",
"--model-ids",
type=str,
default=None,
nargs="+",
help="one or more model IDs; defaults to all model IDs for specified "
"grammars in output dir"
)
parser.add_argument(
"-s",
"--sets",
type=str,
default=["test"],
nargs="+",
help="one or more of the following: training, validation, or test"
)
parser.add_argument(
"-l",
"--model-labels",
type=str,
default=None,
nargs="+",
help="labels for models, must be same length as model_ids"
)
return parser
def main():
logging.basicConfig(level=logging.INFO)
parser = create_parser()
args = parser.parse_args()
for comparator in args.comparators:
if comparator not in c.ComparatorID.ALL_COMPARATOR_IDS:
raise ValueError(
"invalid comparator ID {s!r}".format(s=comparator))
run_seqgra_summary(args.analysis_id,
args.comparators,
args.output_dir,
args.grammar_ids,
args.model_ids,
args.sets,
args.model_labels)
if __name__ == "__main__":
main()
| 29.861635
| 78
| 0.566344
|
import argparse
import logging
import os
from typing import List, Optional
import seqgra
import seqgra.constants as c
from seqgra import MiscHelper
from seqgra.comparator import Comparator
from seqgra.idresolver import IdResolver
def get_all_grammar_ids(output_dir: str) -> List[str]:
folder = output_dir + "evaluation/"
return [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
def get_all_model_ids(output_dir: str, grammar_ids: List[str]) -> List[str]:
model_ids: List[str] = []
for grammar_id in grammar_ids:
folder = output_dir + "evaluation/" + grammar_id + "/"
model_ids += [o for o in os.listdir(folder)
if os.path.isdir(os.path.join(folder, o))]
return list(set(model_ids))
def run_seqgra_summary(analysis_id: str,
comparator_ids: List[str],
output_dir: str,
grammar_ids: Optional[List[str]] = None,
model_ids: Optional[List[str]] = None,
set_names: Optional[List[str]] = None,
model_labels: Optional[List[str]] = None) -> None:
analysis_id = MiscHelper.sanitize_id(analysis_id)
output_dir = MiscHelper.format_output_dir(output_dir.strip())
if comparator_ids:
for comparator_id in comparator_ids:
comparator: Comparator = IdResolver.get_comparator(analysis_id,
comparator_id,
output_dir,
model_labels)
if not grammar_ids:
grammar_ids = get_all_grammar_ids(output_dir)
if not model_ids:
model_ids = get_all_model_ids(output_dir, grammar_ids)
comparator.compare_models(grammar_ids, model_ids, set_names)
def create_parser():
parser = argparse.ArgumentParser(
prog="seqgras",
description="seqgra summary: Gather metrics across grammars, models, "
"evaluators")
parser.add_argument(
"-v",
"--version",
action="version",
version="%(prog)s " + seqgra.__version__)
parser.add_argument(
"-a",
"--analysis-id",
type=str,
required=True,
help="analysis id (folder name for output)"
)
parser.add_argument(
"-c",
"--comparators",
type=str,
required=True,
nargs="+",
help="comparator ID or IDs: IDs of "
"comparators include " +
", ".join(sorted(c.ComparatorID.ALL_COMPARATOR_IDS))
)
parser.add_argument(
"-o",
"--output-dir",
type=str,
required=True,
help="output directory, subdirectories are created for generated "
"data, trained model, and model evaluation"
)
parser.add_argument(
"-g",
"--grammar-ids",
type=str,
default=None,
nargs="+",
help="one or more grammar IDs; defaults to all grammar IDs in "
"output dir"
)
parser.add_argument(
"-m",
"--model-ids",
type=str,
default=None,
nargs="+",
help="one or more model IDs; defaults to all model IDs for specified "
"grammars in output dir"
)
parser.add_argument(
"-s",
"--sets",
type=str,
default=["test"],
nargs="+",
help="one or more of the following: training, validation, or test"
)
parser.add_argument(
"-l",
"--model-labels",
type=str,
default=None,
nargs="+",
help="labels for models, must be same length as model_ids"
)
return parser
def main():
logging.basicConfig(level=logging.INFO)
parser = create_parser()
args = parser.parse_args()
for comparator in args.comparators:
if comparator not in c.ComparatorID.ALL_COMPARATOR_IDS:
raise ValueError(
"invalid comparator ID {s!r}".format(s=comparator))
run_seqgra_summary(args.analysis_id,
args.comparators,
args.output_dir,
args.grammar_ids,
args.model_ids,
args.sets,
args.model_labels)
if __name__ == "__main__":
main()
| true
| true
|
f70769608dcb19db04a4c79f52e6922891d42c29
| 9,743
|
py
|
Python
|
geometry/shapes.py
|
kasmith/geometry
|
805b525ae8ffebb6bb1d84c094f76533d88dbb7a
|
[
"MIT"
] | null | null | null |
geometry/shapes.py
|
kasmith/geometry
|
805b525ae8ffebb6bb1d84c094f76533d88dbb7a
|
[
"MIT"
] | null | null | null |
geometry/shapes.py
|
kasmith/geometry
|
805b525ae8ffebb6bb1d84c094f76533d88dbb7a
|
[
"MIT"
] | null | null | null |
"""Functions that work on collections of shapes
"""
from __future__ import division, print_function
import numpy as np
from .convex import convex_area, convex_centroid
__all__ = ['recenter_polygon', 'centroid_for_shapes',
'centroid_for_uncomputed_shapes', 'recenter_system',
'rescale_and_recenter_system', 'rotate_polygon',
'rotate_system', 'mirror_polygon', 'mirror_system',
'find_concave_outline']
def recenter_polygon(vertices):
"""Returns a new convex polygon with centroid at (0,0)
Args:
vertices (list): list of (x,y) vertices of convex polygon
Returns:
A list just like the input with the recentered vertices (but possibly
transformed into numpy arrays)
"""
centroid = convex_centroid(vertices)
new_verts = []
for v in vertices:
v = np.array(v)
new_verts.append(v - centroid)
return new_verts
def centroid_for_shapes(centroids, areas = None):
"""Calculates the centroid for a set of shapes
Requires pre-computed centroids and areas
Args:
centroids (list): list of (x,y) centroids for each shape
areas (list): list of areas (floats) for each shape (if not given,
assumes they are all equal)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
gc = np.zeros(2)
area = 0
if areas is None:
areas = np.ones(len(centroids))
for pc, a in zip(centroids, areas):
gc += np.array(pc)*a
area += a
gc /= area
return np.array(gc)
def centroid_for_uncomputed_shapes(shape_list):
"""Like centroid_for_shapes but calculates centroids & areas
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
The (x,y) position of the weighted centroid (as np.array)
"""
centroids = []
areas = []
for s in shape_list:
centroids.append(convex_centroid(s))
areas.append(convex_area(s))
return centroid_for_shapes(centroids, areas)
def recenter_system(shape_list):
"""Recenters a set of shapes around the centroid of all of them
Args:
shape_list (list): a list of list of vertices (one for each shape)
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
centroid_for_uncomputed_shapes() on that list returns (0,0)
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([s+c for s in ns])
return final_shapes, center
def rescale_and_recenter_system(shape_list, total_area):
"""Recenters a set of shapes and resizes them to have a total fixed area
Args:
shape_list (list): a list of list of vertices (one for each shape)
total_area (float): the area to fix the shapes to
Returns:
List of two items:
* Similar format as input, but transformed so that calculating the
`centroid_for_uncomputed_shapes()` on that list returns (0,0) and summing
the areas gets to `total_area`
* The grand centroid for the system in original coordinates
"""
centroids = []
areas = []
new_shapes = []
# Decompose each of the individual shapes
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
# Find the grand centroid & new centers of each shape
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
# Find rescaling factor
tot_a = sum(areas)
dim_scale = np.sqrt(total_area / tot_a)
# Go back and change the vertices of each shape
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([(s+c)*dim_scale for s in ns])
return final_shapes, center
def rotate_polygon(vertices, angle, center_point = [0., 0.]):
"""Rotates a shape around a given point (the origin)
Args:
vertices (list): A list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A list of vertices rotated around the center point
"""
np_o = np.array(center_point)
np_vs = [np.array(v) - np_o for v in vertices]
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return [np.dot(rot_mat, v)+np_o for v in np_vs]
def rotate_system(shape_list, angle, center_point = None):
"""Rotates a set of shapes around a given point
If no center point is given, assume the center of mass of the shape
Args:
shape_list (list): A list of list of (x,y) vertices
angle (float): Angle in radians to rotate counterclockwise
center_point ([float, float]): (x,y) point to rotate around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list]
def mirror_polygon(vertices, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
If center_point is None, mirror around the center of the shape
Args:
vertices (list): A list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new polygon with rotated vertices
"""
if center_point is None:
center_point = convex_centroid(vertices)
xm = -1 if axes[0] else 1
ym = -1 if axes[1] else 1
return [np.array([xm*(v[0]-center_point[0])+center_point[0],
ym*(v[1]-center_point[1])+center_point[1]]) for v
in vertices]
def mirror_system(shape_list, axes=(False, True), center_point=None):
"""Mirrors a polygon around an x or y line
Mirrors around the center of the system if center_point is None
Args:
shape_list (list): A list of list of (x,y) vertices
axes ([bool, bool]): Whether to mirror around the (x,y) axes
center_point ([float, float]): (x,y) point to mirror around
Returns:
A new shape list with rotated vertices
"""
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [mirror_polygon(s, axes, center_point) for s in shape_list]
def _point_equal(p1, p2):
return p1[0]==p2[0] and p1[1] == p2[1]
def _arr_eq(a1, a2):
return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))
def find_concave_outline(shape_list):
"""Find the outline of a set of shapes
Assuming all shapes have edges in common with other shapes where they touch,
provides a set of vertices for drawing the outline
Args:
shape_list (list): A list of list of (x,y) vertices
Returns:
A list of ordered (x,y) vertices for drawing an outline
"""
# Find the most lower-right point
current_shape = shape_list[0]
current_pt = current_shape[0]
test_idx = 1
next_test_dir = 1
for s in shape_list:
for i in range(len(s)):
p = s[i]
if ((p[0] < current_pt[0]) or
(p[0] == current_pt[0] and p[1] < current_pt[1])):
# Replace
current_pt = p
current_shape = s
test_idx = (i+1) % len(s)
next_test_dir = 1
vertex_list = [current_pt]
# Keep going until you reach back to the first point
while not _point_equal(current_shape[test_idx], vertex_list[0]):
# Iterate through all the shapes to try to find a matching edge
checking = True
for s in (s for s in shape_list if not _arr_eq(s, current_shape)):
if checking: # Way to break out if match found
for i in range(len(s)):
spt = s[i]
if _point_equal(current_pt, spt):
spt_after = s[(i+1) % len(s)]
spt_before = s[(i-1) % len(s)]
test_pt = current_shape[test_idx]
if _point_equal(test_pt, spt_after):
test_idx = (i-1) % len(s)
next_test_dir = -1
current_shape = s
checking = False
elif _point_equal(test_pt, spt_before):
test_idx = (i+1) % len(s)
next_test_dir = 1
current_shape = s
checking = False
# Have you exhausted all shapes?
if checking:
current_pt = current_shape[test_idx]
vertex_list.append(current_pt)
test_idx += next_test_dir
test_idx %= len(current_shape)
return vertex_list
| 34.549645
| 81
| 0.615621
|
from __future__ import division, print_function
import numpy as np
from .convex import convex_area, convex_centroid
__all__ = ['recenter_polygon', 'centroid_for_shapes',
'centroid_for_uncomputed_shapes', 'recenter_system',
'rescale_and_recenter_system', 'rotate_polygon',
'rotate_system', 'mirror_polygon', 'mirror_system',
'find_concave_outline']
def recenter_polygon(vertices):
centroid = convex_centroid(vertices)
new_verts = []
for v in vertices:
v = np.array(v)
new_verts.append(v - centroid)
return new_verts
def centroid_for_shapes(centroids, areas = None):
gc = np.zeros(2)
area = 0
if areas is None:
areas = np.ones(len(centroids))
for pc, a in zip(centroids, areas):
gc += np.array(pc)*a
area += a
gc /= area
return np.array(gc)
def centroid_for_uncomputed_shapes(shape_list):
centroids = []
areas = []
for s in shape_list:
centroids.append(convex_centroid(s))
areas.append(convex_area(s))
return centroid_for_shapes(centroids, areas)
def recenter_system(shape_list):
centroids = []
areas = []
new_shapes = []
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([s+c for s in ns])
return final_shapes, center
def rescale_and_recenter_system(shape_list, total_area):
centroids = []
areas = []
new_shapes = []
for s in shape_list:
c = convex_centroid(s)
a = convex_area(s)
new_s = []
for v in s:
new_s.append(np.array(v) - c)
centroids.append(c)
areas.append(a)
new_shapes.append(new_s)
center = centroid_for_shapes(centroids, areas)
re_centroids = [c - center for c in centroids]
tot_a = sum(areas)
dim_scale = np.sqrt(total_area / tot_a)
final_shapes = []
for ns,c in zip(new_shapes, re_centroids):
final_shapes.append([(s+c)*dim_scale for s in ns])
return final_shapes, center
def rotate_polygon(vertices, angle, center_point = [0., 0.]):
np_o = np.array(center_point)
np_vs = [np.array(v) - np_o for v in vertices]
rot_mat = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
return [np.dot(rot_mat, v)+np_o for v in np_vs]
def rotate_system(shape_list, angle, center_point = None):
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [rotate_polygon(s, angle, center_point) for s in shape_list]
def mirror_polygon(vertices, axes=(False, True), center_point=None):
if center_point is None:
center_point = convex_centroid(vertices)
xm = -1 if axes[0] else 1
ym = -1 if axes[1] else 1
return [np.array([xm*(v[0]-center_point[0])+center_point[0],
ym*(v[1]-center_point[1])+center_point[1]]) for v
in vertices]
def mirror_system(shape_list, axes=(False, True), center_point=None):
if center_point is None:
center_point = centroid_for_uncomputed_shapes(shape_list)
return [mirror_polygon(s, axes, center_point) for s in shape_list]
def _point_equal(p1, p2):
return p1[0]==p2[0] and p1[1] == p2[1]
def _arr_eq(a1, a2):
return all(_point_equal(p1,p2) for p1, p2 in zip(a1, a2))
def find_concave_outline(shape_list):
current_shape = shape_list[0]
current_pt = current_shape[0]
test_idx = 1
next_test_dir = 1
for s in shape_list:
for i in range(len(s)):
p = s[i]
if ((p[0] < current_pt[0]) or
(p[0] == current_pt[0] and p[1] < current_pt[1])):
current_pt = p
current_shape = s
test_idx = (i+1) % len(s)
next_test_dir = 1
vertex_list = [current_pt]
while not _point_equal(current_shape[test_idx], vertex_list[0]):
checking = True
for s in (s for s in shape_list if not _arr_eq(s, current_shape)):
if checking:
for i in range(len(s)):
spt = s[i]
if _point_equal(current_pt, spt):
spt_after = s[(i+1) % len(s)]
spt_before = s[(i-1) % len(s)]
test_pt = current_shape[test_idx]
if _point_equal(test_pt, spt_after):
test_idx = (i-1) % len(s)
next_test_dir = -1
current_shape = s
checking = False
elif _point_equal(test_pt, spt_before):
test_idx = (i+1) % len(s)
next_test_dir = 1
current_shape = s
checking = False
if checking:
current_pt = current_shape[test_idx]
vertex_list.append(current_pt)
test_idx += next_test_dir
test_idx %= len(current_shape)
return vertex_list
| true
| true
|
f7076a1be7f95b94829ffd29a534b75219784e7e
| 1,610
|
py
|
Python
|
tests/test_messages.py
|
felixcheruiyot/twilio-python
|
bbef708f0aa2879c582bd705f74b67f7b7c3787a
|
[
"MIT"
] | null | null | null |
tests/test_messages.py
|
felixcheruiyot/twilio-python
|
bbef708f0aa2879c582bd705f74b67f7b7c3787a
|
[
"MIT"
] | null | null | null |
tests/test_messages.py
|
felixcheruiyot/twilio-python
|
bbef708f0aa2879c582bd705f74b67f7b7c3787a
|
[
"MIT"
] | null | null | null |
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
from datetime import date
from mock import Mock
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
self.resource.get_instances = Mock()
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_after(self):
self.resource.get_instances = Mock()
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_before(self):
self.resource.get_instances = Mock()
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_create(self):
self.resource.create_instance = Mock()
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
self.resource.create_instance.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
| 27.758621
| 67
| 0.606832
|
import six
if six.PY3:
import unittest
else:
import unittest2 as unittest
from datetime import date
from mock import Mock
from six import u
from twilio.rest.resources import Messages
DEFAULT = {
'From': None,
'DateSent<': None,
'DateSent>': None,
'DateSent': None,
}
class MessageTest(unittest.TestCase):
def setUp(self):
self.resource = Messages("foo", ("sid", "token"))
self.params = DEFAULT.copy()
def test_list_on(self):
self.resource.get_instances = Mock()
self.resource.list(date_sent=date(2011, 1, 1))
self.params['DateSent'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_after(self):
self.resource.get_instances = Mock()
self.resource.list(after=date(2011, 1, 1))
self.params['DateSent>'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_list_before(self):
self.resource.get_instances = Mock()
self.resource.list(before=date(2011, 1, 1))
self.params['DateSent<'] = "2011-01-01"
self.resource.get_instances.assert_called_with(self.params)
def test_create(self):
self.resource.create_instance = Mock()
self.resource.create(
from_='+14155551234',
to='+14155556789',
body=u('ahoy hoy'),
)
self.resource.create_instance.assert_called_with(
{
'from': '+14155551234',
'to': '+14155556789',
'body': u('ahoy hoy'),
},
)
| true
| true
|
f7076adaa536e7eadf0ced46b2116f6273d1415a
| 1,336
|
py
|
Python
|
app/core/tests/test_admin.py
|
Saurav-Shrivastav/recipe-app-api
|
bbb024828784ca91c34b81cdf1aacfd5a3021f30
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
Saurav-Shrivastav/recipe-app-api
|
bbb024828784ca91c34b81cdf1aacfd5a3021f30
|
[
"MIT"
] | null | null | null |
app/core/tests/test_admin.py
|
Saurav-Shrivastav/recipe-app-api
|
bbb024828784ca91c34b81cdf1aacfd5a3021f30
|
[
"MIT"
] | null | null | null |
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
"""Test that users are listed on user page"""
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
"""User edit page works"""
url = reverse('admin:core_user_change', args=[self.user.id])
# admin/user/change/<id>
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
def test_create_user_page(self):
"""Test that the create userpage works"""
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
| 31.809524
| 68
| 0.636228
|
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from django.urls import reverse
class AdminSiteTests(TestCase):
def setUp(self):
self.client = Client()
self.admin_user = get_user_model().objects.create_superuser(
email='admin@gmail.com',
password='password123'
)
self.client.force_login(self.admin_user)
self.user = get_user_model().objects.create_user(
email='test@gmail.com',
password='password123',
name='Test user full name'
)
def test_users_listed(self):
url = reverse('admin:core_user_changelist')
res = self.client.get(url)
self.assertContains(res, self.user.name)
self.assertContains(res, self.user.email)
def test_user_change_page(self):
url = reverse('admin:core_user_change', args=[self.user.id])
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
def test_create_user_page(self):
url = reverse('admin:core_user_add')
res = self.client.get(url)
self.assertEquals(res.status_code, 200)
| true
| true
|
f7076ae73f0a6e979238dc4f62e5f4a973930177
| 17,240
|
py
|
Python
|
python/tvm/relax/transform/transform.py
|
sunggg/relax
|
fad6e9f6c9f4a519ebdba0c604106b1674c5d1ff
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relax/transform/transform.py
|
sunggg/relax
|
fad6e9f6c9f4a519ebdba0c604106b1674c5d1ff
|
[
"Apache-2.0"
] | null | null | null |
python/tvm/relax/transform/transform.py
|
sunggg/relax
|
fad6e9f6c9f4a519ebdba0c604106b1674c5d1ff
|
[
"Apache-2.0"
] | 1
|
2021-11-16T01:01:52.000Z
|
2021-11-16T01:01:52.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Relax transformation passes."""
import functools
import inspect
import types
from typing import Callable, Dict, Union
import tvm.ir
from tvm.target import Target
from tvm.meta_schedule.database import PyDatabase
from . import _ffi_api
@tvm._ffi.register_object("relax.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relax.Function in a module. A function
pass class should be created through `function_pass`.
"""
@tvm._ffi.register_object("relax.DataflowBlockPass")
class DataflowBlockPass(tvm.ir.transform.Pass):
"""A pass that works on each tvm.relax.DataflowBlock in a module."""
def FailTestRewrite() -> tvm.ir.transform.Pass:
"""Incorrectly transform the dataflow structure as fail testcases.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FailTestRewrite()
def RewriteFMA() -> tvm.ir.transform.Pass:
"""Perform fused multiply add rewriting in dataflow blocks.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.RewriteFMA()
def FuseFMA() -> tvm.ir.transform.Pass:
"""Perform fused multiply add rewriting, generate a subgraph(sub function),
and call into the sub function in the main function.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FuseFMA()
def ToNonDataflow() -> tvm.ir.transform.Pass:
"""Transform all dataflow structure to non-dataflow version.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.ToNonDataflow()
def CallTIRRewrite() -> tvm.ir.transform.Pass:
"""Perform explicit tensor allocation for call_tir.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.CallTIRRewrite()
def VMMemoryLower() -> tvm.ir.transform.Pass:
"""Perform memory lowering. Lowers the relax.builtin.alloc_tensor intrinsic to VM intrinsics.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMMemoryLower()
def VMShapeLower() -> tvm.ir.transform.Pass:
"""Lower the shape expressions in relax to VM shape heap manipulations and generate related
TIR functions to do shape calculations.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.VMShapeLower()
def Normalize() -> tvm.ir.transform.Pass:
"""Transforming Relax IR to normal form, i.e., the expressions are normalized(no nesting
and hence the AST is in ANF), and all checked_type_ and shape_ of expressions are available.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.Normalize()
def ResolveGlobals() -> tvm.ir.transform.Pass:
"""Resolve global variables using string equality. This ensures all GlobalVars in the IR refer
to the correct GlobalVar of the input IRModule. An error is reported if any GlobalVar cannot be
resolved.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.ResolveGlobals()
def MetaScheduleApplyHistoryBest(
database: PyDatabase,
target: Target,
) -> tvm.ir.transform.Pass:
"""Apply the best schedule from tuning database.
Parameters
----------
database : metaschedule tuning database
target: target info
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.MetaScheduleApplyHistoryBest(database, target)
def BindParams(func_name: str, params: Dict[str, tvm.runtime.NDArray]) -> tvm.ir.transform.Pass:
"""Bind params of function of the module to constant tensors.
Parameters
----------
func_name: str
The function name to be bound
params : dict from str to ndarray
The map from param name to constant tensors.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.BindParams(func_name, params)
def FoldConstant() -> tvm.ir.transform.Pass:
"""Fold constant expressions.
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.FoldConstant()
def AnnotateTIROpPattern() -> tvm.ir.transform.Pass:
"""Annotate Op Pattern Kind for TIR functions
Returns
-------
ret: tvm.ir.transform.Pass
"""
return _ffi_api.AnnotateTIROpPattern()
def FuseOps(fuse_opt_level=-1) -> tvm.ir.transform.Pass:
"""This pass groups bindings in a dataflow block of Relax functions and generate a new grouped
Relax function for each group, according to the fusion algorithm described in the pass
implementation. By grouping bindings into new Relax functions, we substitute the bindings in
the function being manipulated into function calls to the new grouped function.
A follow-up pass named "FuseTIR" will generate a TIR PrimFunc for each grouped function.
Parameters
----------
fuse_opt_level : int
The level of fuse optimization. -1 indicates that the level will be
inferred from pass context.
Returns
-------
ret : tvm.transform.Pass
The registered pass for operator fusion.
"""
return _ffi_api.FuseOps(fuse_opt_level)
def FuseTIR() -> tvm.ir.transform.Pass:
"""Fuse primitive relax function into a larger TIR function if possible
Returns
-------
ret : tvm.transform.Pass
The registered pass for tir fusion.
"""
return _ffi_api.FuseTIR()
def _wrap_class_function_pass(pass_cls, pass_info):
"""Wrap a python class as function pass."""
class PyFunctionPass(FunctionPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pass_cls creation failed.
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(
pass_func=None,
opt_level=None,
name=None,
required=None,
traceable=False,
) -> Union[Callable, FunctionPass]:
"""Decorate a function pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created function pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(Function, Module, PassContext) -> Function]]
The transformation function or class.
opt_level : int
The optimization level of this function pass.
name : Optional[str]
The name of the function pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the function pass is dependent on.
traceable: Boolean
Boolean variable whether the function pass is traceable
Returns
-------
create_function_pass : Union[Callable, FunctionPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new FunctionPass will be returned when we decorate a pass function.
A new FunctionPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a function pass class.
.. code-block:: python
@relax.transform.function_pass(opt_level=1)
class TestReplaceFunc:
def __init__(self, new_func):
self.new_func = new_func
def transform_function(self, func, mod, ctx):
# just for demo purposes
# transform func to new_func
return self.new_func
@R.function
def f1(x: Tensor[(m, n), "float32"]):
return x
@tvm.script.ir_module
class InputMod:
@R.function
def f2(x: Tensor[(m, n), "float32"]):
gv0 = relax.add(x, x)
return gv0
# fpass is now a special pass that replaces every
# function to f1
fpass = TestReplaceFunc(f1)
# now every function in InputMod is replaced by f1
updated_mod = fpass(InputMod)
The following code creates a function pass by decorating
a user defined transform function.
.. code-block:: python
@relax.transform.function_pass(opt_level=2)
def transform(func, mod, ctx):
# my transformations here.
return func
function_pass = transform
assert isinstance(function_pass, relax.transform.FunctionPass)
assert function_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = function_pass(m)
# Now transform should have been applied to every function in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
"""Internal function that creates a function pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Function pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
def _wrap_class_dataflowblock_pass(pass_cls, pass_info):
"""Wrap a python class as dataflowblock pass"""
class PyDataflowBlockPass(DataflowBlockPass):
"""Internal wrapper class to create a class instance."""
def __init__(self, *args, **kwargs):
# initialize handle in case pass_cls creation failed.
self.handle = None
inst = pass_cls(*args, **kwargs)
# it is important not to capture self to
# avoid a cyclic dependency
def _pass_func(func, mod, ctx):
return inst.transform_dataflowblock(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.MakeDataflowBlockPass, _pass_func, pass_info
)
self._inst = inst
def __getattr__(self, name):
# fall back to instance attribute if there is not any
return self._inst.__getattribute__(name)
functools.update_wrapper(PyDataflowBlockPass.__init__, pass_cls.__init__)
PyDataflowBlockPass.__name__ = pass_cls.__name__
PyDataflowBlockPass.__doc__ = pass_cls.__doc__
PyDataflowBlockPass.__module__ = pass_cls.__module__
return PyDataflowBlockPass
def dataflowblock_pass(
pass_func=None, opt_level=None, name=None, required=None, traceable=False
) -> Union[Callable, DataflowBlockPass]:
"""Decorate a dataflowblock pass.
This function returns a callback when pass_func
is provided. Otherwise, it returns the created dataflowblock pass using the
given optimization function.
Parameters
----------
pass_func : Optional[Callable[(DataflowBlock, Module, PassContext) -> DataflowBlock]]
The transformation function or class.
opt_level : int
The optimization level of this dataflowblock pass.
name : Optional[str]
The name of the dataflowblock pass. The name could be empty. In this case, the
name of the optimization function will be used as the pass name.
required : Optional[List[str]]
The list of passes that the dataflowblock pass is dependent on.
traceable: Boolean
Boolean variable whether the dataflowblock pass is traceable
Returns
-------
create_dataflowblock_pass : Union[Callable, DataflowBlockPass]
A decorator will be returned if pass_func is not provided,
otherwise return the decorated result.
The returned decorator has two behaviors depending on the input:
A new DataflowBlockPass will be returned when we decorate a pass function.
A new DataflowBlockPass class will be returned when we decorate a class type.
Examples
--------
The following code block decorates a dataflowblock pass class.
.. code-block:: python
@relax.transform.dataflowblock_pass(opt_level=1)
class TestReplaceBinding:
# Simple test function to replace the first VarBinding to another.
def __init__(self):
# create a new VarBinding
m, n = tir.Var("m", "int64"), tir.Var("n", "int64")
type_anno = relax.DynTensorType(2, "float32")
lv0 = relax.Var("lv1", [m, n], type_anno)
val = relax.const(np.random.rand(24, 56))
self.new_binding = relax.VarBinding(lv0, val)
def transform_dataflowblock(self, block, mod, ctx):
# just for demo purposes
# Replace the first binding in the DataflowBlock
new_bindings = [self.new_binding, block.bindings[1]]
new_block = relax.expr.DataflowBlock(new_bindings, block.span)
return new_block
@tvm.script.ir_module
class InputMod:
@R.function
def f1(x: Tensor[(m, n), "float32"]):
with relax.dataflow():
lv0 = relax.multiply(x, x)
gv0 = relax.add(x, x)
relax.output(gv0)
return gv0
# block_pass is now a special pass that replaces every
# first binding to the constant value binding
block_pass = TestReplaceBinding()
# now every first binding in DataflowBlock of InputMod
# is replaced by new_binding
updated_mod = block_pass(InputMod)
The following code creates a dataflowblock pass by decorating
a user defined transform function.
.. code-block:: python
@relax.transform.dataflowblock_pass(opt_level=2)
def transform(block, mod, ctx):
# my transformations here.
return block
block_pass = transform
assert isinstance(block_pass, relax.transform.DataflowBlockPass)
assert block_pass.info.opt_level == 2
# Given a module m, the optimization could be invoked as the follwoing:
updated_mod = block_pass(m)
# Now transform should have been applied to every DataflowBlock in
# the provided module m. And the updated module will be returned.
"""
if opt_level is None:
raise ValueError("Please provide opt_level for the dataflowblock pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_dataflowblock_pass(pass_arg):
"""Internal function that creates a dataflowblock pass"""
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_dataflowblock_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for DataflowBlock pass")
return _ffi_api.MakeDataflowBlockPass(pass_arg, info)
if pass_func:
return create_dataflowblock_pass(pass_func)
return create_dataflowblock_pass
| 32.838095
| 99
| 0.669432
|
import functools
import inspect
import types
from typing import Callable, Dict, Union
import tvm.ir
from tvm.target import Target
from tvm.meta_schedule.database import PyDatabase
from . import _ffi_api
@tvm._ffi.register_object("relax.FunctionPass")
class FunctionPass(tvm.ir.transform.Pass):
@tvm._ffi.register_object("relax.DataflowBlockPass")
class DataflowBlockPass(tvm.ir.transform.Pass):
def FailTestRewrite() -> tvm.ir.transform.Pass:
return _ffi_api.FailTestRewrite()
def RewriteFMA() -> tvm.ir.transform.Pass:
return _ffi_api.RewriteFMA()
def FuseFMA() -> tvm.ir.transform.Pass:
return _ffi_api.FuseFMA()
def ToNonDataflow() -> tvm.ir.transform.Pass:
return _ffi_api.ToNonDataflow()
def CallTIRRewrite() -> tvm.ir.transform.Pass:
return _ffi_api.CallTIRRewrite()
def VMMemoryLower() -> tvm.ir.transform.Pass:
return _ffi_api.VMMemoryLower()
def VMShapeLower() -> tvm.ir.transform.Pass:
return _ffi_api.VMShapeLower()
def Normalize() -> tvm.ir.transform.Pass:
return _ffi_api.Normalize()
def ResolveGlobals() -> tvm.ir.transform.Pass:
return _ffi_api.ResolveGlobals()
def MetaScheduleApplyHistoryBest(
database: PyDatabase,
target: Target,
) -> tvm.ir.transform.Pass:
return _ffi_api.MetaScheduleApplyHistoryBest(database, target)
def BindParams(func_name: str, params: Dict[str, tvm.runtime.NDArray]) -> tvm.ir.transform.Pass:
return _ffi_api.BindParams(func_name, params)
def FoldConstant() -> tvm.ir.transform.Pass:
return _ffi_api.FoldConstant()
def AnnotateTIROpPattern() -> tvm.ir.transform.Pass:
return _ffi_api.AnnotateTIROpPattern()
def FuseOps(fuse_opt_level=-1) -> tvm.ir.transform.Pass:
return _ffi_api.FuseOps(fuse_opt_level)
def FuseTIR() -> tvm.ir.transform.Pass:
return _ffi_api.FuseTIR()
def _wrap_class_function_pass(pass_cls, pass_info):
class PyFunctionPass(FunctionPass):
def __init__(self, *args, **kwargs):
self.handle = None
inst = pass_cls(*args, **kwargs)
def _pass_func(func, mod, ctx):
return inst.transform_function(func, mod, ctx)
self.__init_handle_by_constructor__(_ffi_api.MakeFunctionPass, _pass_func, pass_info)
self._inst = inst
def __getattr__(self, name):
return self._inst.__getattribute__(name)
functools.update_wrapper(PyFunctionPass.__init__, pass_cls.__init__)
PyFunctionPass.__name__ = pass_cls.__name__
PyFunctionPass.__doc__ = pass_cls.__doc__
PyFunctionPass.__module__ = pass_cls.__module__
return PyFunctionPass
def function_pass(
pass_func=None,
opt_level=None,
name=None,
required=None,
traceable=False,
) -> Union[Callable, FunctionPass]:
if opt_level is None:
raise ValueError("Please provide opt_level for the function pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_function_pass(pass_arg):
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_function_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for Function pass")
return _ffi_api.MakeFunctionPass(pass_arg, info)
if pass_func:
return create_function_pass(pass_func)
return create_function_pass
def _wrap_class_dataflowblock_pass(pass_cls, pass_info):
class PyDataflowBlockPass(DataflowBlockPass):
def __init__(self, *args, **kwargs):
self.handle = None
inst = pass_cls(*args, **kwargs)
def _pass_func(func, mod, ctx):
return inst.transform_dataflowblock(func, mod, ctx)
self.__init_handle_by_constructor__(
_ffi_api.MakeDataflowBlockPass, _pass_func, pass_info
)
self._inst = inst
def __getattr__(self, name):
return self._inst.__getattribute__(name)
functools.update_wrapper(PyDataflowBlockPass.__init__, pass_cls.__init__)
PyDataflowBlockPass.__name__ = pass_cls.__name__
PyDataflowBlockPass.__doc__ = pass_cls.__doc__
PyDataflowBlockPass.__module__ = pass_cls.__module__
return PyDataflowBlockPass
def dataflowblock_pass(
pass_func=None, opt_level=None, name=None, required=None, traceable=False
) -> Union[Callable, DataflowBlockPass]:
if opt_level is None:
raise ValueError("Please provide opt_level for the dataflowblock pass.")
required = required if required else []
if not isinstance(required, (list, tuple)):
raise TypeError("Required is expected to be the type of " + "list/tuple.")
def create_dataflowblock_pass(pass_arg):
fname = name if name else pass_arg.__name__
info = tvm.transform.PassInfo(opt_level, fname, required, traceable)
if inspect.isclass(pass_arg):
return _wrap_class_dataflowblock_pass(pass_arg, info)
if not isinstance(pass_arg, (types.FunctionType, types.LambdaType)):
raise TypeError("pass_func must be a callable for DataflowBlock pass")
return _ffi_api.MakeDataflowBlockPass(pass_arg, info)
if pass_func:
return create_dataflowblock_pass(pass_func)
return create_dataflowblock_pass
| true
| true
|
f7076c0d99ab4811272b1a459a9679fad5e7f7dd
| 4,300
|
py
|
Python
|
virtual/lib/python3.6/site-packages/PIL/WmfImagePlugin.py
|
GabrielSpear/PersonalGallery
|
e863339af357469598ed3380d1449f579ce2a3b2
|
[
"MIT"
] | 1
|
2021-02-08T07:49:35.000Z
|
2021-02-08T07:49:35.000Z
|
virtual/lib/python3.6/site-packages/PIL/WmfImagePlugin.py
|
GabrielSpear/PersonalGallery
|
e863339af357469598ed3380d1449f579ce2a3b2
|
[
"MIT"
] | 2
|
2017-05-07T06:30:37.000Z
|
2017-05-08T06:58:23.000Z
|
virtual/lib/python3.6/site-packages/PIL/WmfImagePlugin.py
|
GabrielSpear/PersonalGallery
|
e863339af357469598ed3380d1449f579ce2a3b2
|
[
"MIT"
] | 2
|
2019-05-19T08:12:45.000Z
|
2021-08-28T07:16:42.000Z
|
#
# The Python Imaging Library
# $Id$
#
# WMF stub codec
#
# history:
# 1996-12-14 fl Created
# 2004-02-22 fl Turned into a stub driver
# 2004-02-23 fl Added EMF support
#
# Copyright (c) Secret Labs AB 1997-2004. All rights reserved.
# Copyright (c) Fredrik Lundh 1996.
#
# See the README file for information on usage and redistribution.
#
# WMF/EMF reference documentation:
# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/[MS-WMF].pdf
# http://wvware.sourceforge.net/caolan/index.html
# http://wvware.sourceforge.net/caolan/ora-wmf.html
from __future__ import print_function
from . import Image, ImageFile
from ._binary import i16le as word, si16le as short, i32le as dword, si32le as _long
__version__ = "0.2"
_handler = None
if str != bytes:
long = int
def register_handler(handler):
"""
Install application-specific WMF image handler.
:param handler: Handler object.
"""
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
# install default handler (windows only)
class WmfHandler(object):
def open(self, im):
im.mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im):
im.fp.seek(0) # rewind
return Image.frombytes(
"RGB", im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw", "BGR", (im.size[0]*3 + 3) & -4, -1
)
register_handler(WmfHandler())
#
# --------------------------------------------------------------------
# Read WMF file
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or
prefix[:4] == b"\x01\x00\x00\x00"
)
##
# Image plugin for Windows metafiles.
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
# check placable header
s = self.fp.read(80)
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
# placeable windows metafile
# get units per inch
inch = word(s, 14)
# get bounding box
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
# normalize size to 72 dots per inch
size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch
self.info["wmf_bbox"] = x0, y0, x1, y1
self.info["dpi"] = 72
# print(self.mode, self.size, self.info)
# sanity check (standard metafile header)
if s[22:26] != b"\x01\x00\t\x00":
raise SyntaxError("Unsupported WMF file format")
elif dword(s) == 1 and s[40:44] == b" EMF":
# enhanced metafile
# get bounding box
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
# get frame (in 0.01 millimeter units)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
# normalize size to 72 dots per inch
size = x1 - x0, y1 - y0
# calculate dots per inch from bbox and frame
xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0])
ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
else:
raise SyntaxError("Unsupported file format")
self.mode = "RGB"
self.size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("WMF save handler not installed")
_handler.save(im, fp, filename)
#
# --------------------------------------------------------------------
# Registry stuff
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extension(WmfStubImageFile.format, ".wmf")
Image.register_extension(WmfStubImageFile.format, ".emf")
| 25.443787
| 92
| 0.550233
|
from __future__ import print_function
from . import Image, ImageFile
from ._binary import i16le as word, si16le as short, i32le as dword, si32le as _long
__version__ = "0.2"
_handler = None
if str != bytes:
long = int
def register_handler(handler):
global _handler
_handler = handler
if hasattr(Image.core, "drawwmf"):
class WmfHandler(object):
def open(self, im):
im.mode = "RGB"
self.bbox = im.info["wmf_bbox"]
def load(self, im):
im.fp.seek(0)
return Image.frombytes(
"RGB", im.size,
Image.core.drawwmf(im.fp.read(), im.size, self.bbox),
"raw", "BGR", (im.size[0]*3 + 3) & -4, -1
)
register_handler(WmfHandler())
def _accept(prefix):
return (
prefix[:6] == b"\xd7\xcd\xc6\x9a\x00\x00" or
prefix[:4] == b"\x01\x00\x00\x00"
)
class WmfStubImageFile(ImageFile.StubImageFile):
format = "WMF"
format_description = "Windows Metafile"
def _open(self):
s = self.fp.read(80)
if s[:6] == b"\xd7\xcd\xc6\x9a\x00\x00":
inch = word(s, 14)
x0 = short(s, 6)
y0 = short(s, 8)
x1 = short(s, 10)
y1 = short(s, 12)
size = (x1 - x0) * 72 // inch, (y1 - y0) * 72 // inch
self.info["wmf_bbox"] = x0, y0, x1, y1
self.info["dpi"] = 72
if s[22:26] != b"\x01\x00\t\x00":
raise SyntaxError("Unsupported WMF file format")
elif dword(s) == 1 and s[40:44] == b" EMF":
x0 = _long(s, 8)
y0 = _long(s, 12)
x1 = _long(s, 16)
y1 = _long(s, 20)
frame = _long(s, 24), _long(s, 28), _long(s, 32), _long(s, 36)
size = x1 - x0, y1 - y0
xdpi = 2540 * (x1 - y0) // (frame[2] - frame[0])
ydpi = 2540 * (y1 - y0) // (frame[3] - frame[1])
self.info["wmf_bbox"] = x0, y0, x1, y1
if xdpi == ydpi:
self.info["dpi"] = xdpi
else:
self.info["dpi"] = xdpi, ydpi
else:
raise SyntaxError("Unsupported file format")
self.mode = "RGB"
self.size = size
loader = self._load()
if loader:
loader.open(self)
def _load(self):
return _handler
def _save(im, fp, filename):
if _handler is None or not hasattr("_handler", "save"):
raise IOError("WMF save handler not installed")
_handler.save(im, fp, filename)
Image.register_open(WmfStubImageFile.format, WmfStubImageFile, _accept)
Image.register_save(WmfStubImageFile.format, _save)
Image.register_extension(WmfStubImageFile.format, ".wmf")
Image.register_extension(WmfStubImageFile.format, ".emf")
| true
| true
|
f7076cd82473a73c1260bf024f4491e7a0bec4c1
| 1,069
|
py
|
Python
|
dev/circuitpython/examples/rockblock_send_data.py
|
scripsi/picodeebee
|
0ec77e92f09fa8711705623482e57a5e0b702696
|
[
"MIT"
] | 7
|
2021-03-15T10:06:20.000Z
|
2022-03-23T02:53:15.000Z
|
Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/rockblock_send_data.py
|
IanSMoyes/SpiderPi
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
[
"Apache-2.0"
] | 5
|
2021-04-27T18:21:11.000Z
|
2021-05-02T14:17:14.000Z
|
Lights/adafruit-circuitpython-bundle-6.x-mpy-20210310/examples/rockblock_send_data.py
|
IanSMoyes/SpiderPi
|
cc3469980ae87b92d0dc43c05dbd579f0fa8c4b1
|
[
"Apache-2.0"
] | null | null | null |
# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries
# SPDX-License-Identifier: MIT
# pylint: disable=wrong-import-position
import time
import struct
# CircuitPython / Blinka
import board
uart = board.UART()
uart.baudrate = 19200
# via USB cable
# import serial
# uart = serial.Serial("/dev/ttyUSB0", 19200)
from adafruit_rockblock import RockBlock
rb = RockBlock(uart)
# create some data
some_int = 2112
some_float = 42.123456789
some_text = "hello world"
text_len = len(some_text)
# create binary data
data = struct.pack("i", some_int)
data += struct.pack("f", some_float)
data += struct.pack("i", len(some_text))
data += struct.pack("{}s".format(text_len), some_text.encode())
# put data in outbound buffer
rb.data_out = data
# try a satellite Short Burst Data transfer
print("Talking to satellite...")
status = rb.satellite_transfer()
# loop as needed
retry = 0
while status[0] > 8:
time.sleep(10)
status = rb.satellite_transfer()
print(retry, status)
retry += 1
print("\nDONE.")
| 21.816327
| 64
| 0.6913
|
import time
import struct
import board
uart = board.UART()
uart.baudrate = 19200
from adafruit_rockblock import RockBlock
rb = RockBlock(uart)
some_int = 2112
some_float = 42.123456789
some_text = "hello world"
text_len = len(some_text)
data = struct.pack("i", some_int)
data += struct.pack("f", some_float)
data += struct.pack("i", len(some_text))
data += struct.pack("{}s".format(text_len), some_text.encode())
rb.data_out = data
print("Talking to satellite...")
status = rb.satellite_transfer()
retry = 0
while status[0] > 8:
time.sleep(10)
status = rb.satellite_transfer()
print(retry, status)
retry += 1
print("\nDONE.")
| true
| true
|
f7076cece7ffd30ca69295da749759a13db9576d
| 3,203
|
py
|
Python
|
code-sdc/models/chauffeur.py
|
mosin26/selforacle
|
c99478bf65fd137014f3b7947ed83d105b9f038a
|
[
"MIT"
] | 17
|
2020-02-29T08:12:01.000Z
|
2022-01-31T16:17:43.000Z
|
code-sdc/models/chauffeur.py
|
mosin26/selforacle
|
c99478bf65fd137014f3b7947ed83d105b9f038a
|
[
"MIT"
] | 6
|
2020-03-02T10:06:55.000Z
|
2022-02-22T15:13:04.000Z
|
code-sdc/models/chauffeur.py
|
mosin26/selforacle
|
c99478bf65fd137014f3b7947ed83d105b9f038a
|
[
"MIT"
] | 12
|
2020-02-28T14:22:48.000Z
|
2022-02-22T15:13:20.000Z
|
import logging
from keras import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dropout, Dense, SpatialDropout2D, K
from keras.optimizers import SGD
from keras.regularizers import l2
from utils_train_self_driving_car import INPUT_SHAPE, rmse
from models.abstract_model_provider import AbstractModelProvider
logger = logging.Logger("Chauffeur")
NAME = "chauffeur"
# Note: For chauffeur you still have to change the following in the drive method
# (not yet done automatically and im not working on it as it does not look like we're going to use chauffeur')
# def rmse(y_true, y_pred):
# '''Calculates RMSE
# '''
# return K.sqrt(K.mean(K.square(y_pred - y_true)))
#
#
# model = load_model(filepath=args.model, custom_objects={"rmse": rmse}, compile=True)
class Chauffeur(AbstractModelProvider):
def get_name(self) -> str:
return NAME
def get_model(self, args):
logger.warning("We are currently still ignoring the args settings (e.g. args.learning_rate) in chauffeur")
# Taken from https://github.com/udacity/self-driving-car/blob/master/steering-models/community-models/chauffeur/models.py
use_adadelta = True
learning_rate=0.01
W_l2=0.0001
input_shape = INPUT_SHAPE # Original Chauffeur uses input_shape=(120, 320, 3)
model = Sequential()
model.add(Convolution2D(16, 5, 5,
input_shape=input_shape,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(20, 5, 5,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(40, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(60, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(80, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(
output_dim=1,
init='he_normal',
W_regularizer=l2(W_l2)))
optimizer = ('adadelta' if use_adadelta
else SGD(lr=learning_rate, momentum=0.9))
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
metrics=[rmse])
return model
| 34.815217
| 129
| 0.603185
|
import logging
from keras import Sequential
from keras.layers import Convolution2D, MaxPooling2D, Flatten, Dropout, Dense, SpatialDropout2D, K
from keras.optimizers import SGD
from keras.regularizers import l2
from utils_train_self_driving_car import INPUT_SHAPE, rmse
from models.abstract_model_provider import AbstractModelProvider
logger = logging.Logger("Chauffeur")
NAME = "chauffeur"
# '''
class Chauffeur(AbstractModelProvider):
def get_name(self) -> str:
return NAME
def get_model(self, args):
logger.warning("We are currently still ignoring the args settings (e.g. args.learning_rate) in chauffeur")
use_adadelta = True
learning_rate=0.01
W_l2=0.0001
input_shape = INPUT_SHAPE
model = Sequential()
model.add(Convolution2D(16, 5, 5,
input_shape=input_shape,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(20, 5, 5,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(40, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(60, 3, 3,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(80, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(SpatialDropout2D(0.1))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Convolution2D(128, 2, 2,
init= "he_normal",
activation='relu',
border_mode='same'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(
output_dim=1,
init='he_normal',
W_regularizer=l2(W_l2)))
optimizer = ('adadelta' if use_adadelta
else SGD(lr=learning_rate, momentum=0.9))
model.compile(
loss='mean_squared_error',
optimizer=optimizer,
metrics=[rmse])
return model
| true
| true
|
f7076d9c53440bb973967a498a80e0ceda5bc56e
| 552
|
py
|
Python
|
dogAccountantProject/manage.py
|
cs-fullstack-2019-fall/django-models-cw-tdude0175
|
71dc308508f8eb6d9480509d6288aaa2fd47ff1d
|
[
"Apache-2.0"
] | null | null | null |
dogAccountantProject/manage.py
|
cs-fullstack-2019-fall/django-models-cw-tdude0175
|
71dc308508f8eb6d9480509d6288aaa2fd47ff1d
|
[
"Apache-2.0"
] | null | null | null |
dogAccountantProject/manage.py
|
cs-fullstack-2019-fall/django-models-cw-tdude0175
|
71dc308508f8eb6d9480509d6288aaa2fd47ff1d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dogAccountantProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.5
| 84
| 0.693841
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dogAccountantProject.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true
| true
|
f7076eecf0603a14c3c760df4b5a73836e595690
| 686
|
py
|
Python
|
app/core/migrations/0003_ingredient.py
|
alextfos/receipe-app-api
|
3bcb2e5eba232615d8fc4292e9335e4d55685d5e
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
alextfos/receipe-app-api
|
3bcb2e5eba232615d8fc4292e9335e4d55685d5e
|
[
"MIT"
] | null | null | null |
app/core/migrations/0003_ingredient.py
|
alextfos/receipe-app-api
|
3bcb2e5eba232615d8fc4292e9335e4d55685d5e
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.15 on 2021-08-12 11:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.583333
| 118
| 0.618076
|
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0002_tag'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true
| true
|
f7076ef79bd6c5f48bb073783a0a1f332d581a9d
| 6,322
|
py
|
Python
|
sdk/eventhub/azure-eventhubs/samples/sync_samples/sample_code_eventhub.py
|
quxiaozha/azure-sdk-for-python
|
60af58e4e356714383ced0a2cb0b1433237a0ee1
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/samples/sync_samples/sample_code_eventhub.py
|
quxiaozha/azure-sdk-for-python
|
60af58e4e356714383ced0a2cb0b1433237a0ee1
|
[
"MIT"
] | null | null | null |
sdk/eventhub/azure-eventhubs/samples/sync_samples/sample_code_eventhub.py
|
quxiaozha/azure-sdk-for-python
|
60af58e4e356714383ced0a2cb0b1433237a0ee1
|
[
"MIT"
] | null | null | null |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
import time
import logging
def create_eventhub_producer_client():
# [START create_eventhub_producer_client_from_conn_str_sync]
import os
from azure.eventhub import EventHubProducerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
# [END create_eventhub_producer_client_from_conn_str_sync]
# [START create_eventhub_producer_client_sync]
import os
from azure.eventhub import EventHubProducerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
producer = EventHubProducerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential
)
# [END create_eventhub_producer_client_sync]
return producer
def create_eventhub_consumer_client():
# [START create_eventhub_consumer_client_from_conn_str_sync]
import os
from azure.eventhub import EventHubConsumerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
# [END create_eventhub_consumer_client_from_conn_str_sync]
# [START create_eventhub_consumer_client_sync]
import os
from azure.eventhub import EventHubConsumerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
consumer = EventHubConsumerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential)
# [END create_eventhub_consumer_client_sync]
return consumer
def example_eventhub_sync_send_and_receive():
producer = create_eventhub_producer_client()
consumer = create_eventhub_consumer_client()
try:
logger = logging.getLogger("azure.eventhub")
# [START create_event_data]
from azure.eventhub import EventData
event_data = EventData("String data")
event_data = EventData(b"Bytes data")
# [END create_event_data]
# [START eventhub_producer_client_create_batch_sync]
event_data_batch = producer.create_batch(max_size=10000)
while True:
try:
event_data_batch.try_add(EventData('Message inside EventBatchData'))
except ValueError:
# The EventDataBatch object reaches its max_size.
# You can send the full EventDataBatch object and create a new one here.
break
# [END eventhub_producer_client_create_batch_sync]
# [START eventhub_producer_client_send_sync]
with producer:
event_data = EventData(b"A single event")
producer.send(event_data)
# [END eventhub_producer_client_send_sync]
time.sleep(1)
# [START eventhub_consumer_client_receive_sync]
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
# Do ops on received events
with consumer:
consumer.receive(on_event=on_event, consumer_group='$Default')
# [END eventhub_consumer_client_receive_sync]
finally:
pass
def example_eventhub_producer_ops():
# [START eventhub_producer_client_close_sync]
import os
from azure.eventhub import EventHubProducerClient, EventData
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
try:
producer.send(EventData(b"A single event"))
finally:
# Close down the producer handler.
producer.close()
# [END eventhub_producer_client_close_sync]
def example_eventhub_consumer_ops():
# [START eventhub_consumer_client_close_sync]
import os
import threading
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
from azure.eventhub import EventHubConsumerClient
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
# Do ops on received events
# The receive method is blocking call, so execute it in a thread to
# better demonstrate how to stop the receiving by calling he close method.
worker = threading.Thread(
target=consumer.receive,
kwargs={"on_event": on_event, "consumer_group": "$Default"}
)
worker.start()
time.sleep(10) # Keep receiving for 10s then close.
# Close down the consumer handler explicitly.
consumer.close()
# [END eventhub_consumer_client_close_sync]
if __name__ == '__main__':
example_eventhub_producer_ops()
example_eventhub_consumer_ops()
# example_eventhub_sync_send_and_receive()
| 35.920455
| 99
| 0.715913
|
import time
import logging
def create_eventhub_producer_client():
import os
from azure.eventhub import EventHubProducerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
import os
from azure.eventhub import EventHubProducerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
producer = EventHubProducerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential
)
return producer
def create_eventhub_consumer_client():
import os
from azure.eventhub import EventHubConsumerClient
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
import os
from azure.eventhub import EventHubConsumerClient, EventHubSharedKeyCredential
fully_qualified_namespace = os.environ['EVENT_HUB_HOSTNAME']
eventhub_name = os.environ['EVENT_HUB_NAME']
shared_access_policy = os.environ['EVENT_HUB_SAS_POLICY']
shared_access_key = os.environ['EVENT_HUB_SAS_KEY']
credential = EventHubSharedKeyCredential(shared_access_policy, shared_access_key)
consumer = EventHubConsumerClient(
fully_qualified_namespace=fully_qualified_namespace,
eventhub_name=eventhub_name,
credential=credential)
return consumer
def example_eventhub_sync_send_and_receive():
producer = create_eventhub_producer_client()
consumer = create_eventhub_consumer_client()
try:
logger = logging.getLogger("azure.eventhub")
from azure.eventhub import EventData
event_data = EventData("String data")
event_data = EventData(b"Bytes data")
event_data_batch = producer.create_batch(max_size=10000)
while True:
try:
event_data_batch.try_add(EventData('Message inside EventBatchData'))
except ValueError:
break
with producer:
event_data = EventData(b"A single event")
producer.send(event_data)
time.sleep(1)
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
with consumer:
consumer.receive(on_event=on_event, consumer_group='$Default')
finally:
pass
def example_eventhub_producer_ops():
import os
from azure.eventhub import EventHubProducerClient, EventData
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
producer = EventHubProducerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
try:
producer.send(EventData(b"A single event"))
finally:
producer.close()
def example_eventhub_consumer_ops():
import os
import threading
event_hub_connection_str = os.environ['EVENT_HUB_CONN_STR']
eventhub_name = os.environ['EVENT_HUB_NAME']
from azure.eventhub import EventHubConsumerClient
consumer = EventHubConsumerClient.from_connection_string(
conn_str=event_hub_connection_str,
eventhub_name=eventhub_name
)
logger = logging.getLogger("azure.eventhub")
def on_event(partition_context, event):
logger.info("Received event from partition: {}".format(partition_context.partition_id))
worker = threading.Thread(
target=consumer.receive,
kwargs={"on_event": on_event, "consumer_group": "$Default"}
)
worker.start()
time.sleep(10)
consumer.close()
if __name__ == '__main__':
example_eventhub_producer_ops()
example_eventhub_consumer_ops()
| true
| true
|
f7077019a17e6790f579a777930a023004695713
| 1,137
|
py
|
Python
|
deeple/config.py
|
luisfrvz/deeple
|
9e3a58ca69bcb7939e55f592855b642150c4e466
|
[
"MIT"
] | null | null | null |
deeple/config.py
|
luisfrvz/deeple
|
9e3a58ca69bcb7939e55f592855b642150c4e466
|
[
"MIT"
] | null | null | null |
deeple/config.py
|
luisfrvz/deeple
|
9e3a58ca69bcb7939e55f592855b642150c4e466
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Module to define CONSTANTS used across the project
"""
from os import path
# identify basedir for the package
BASE_DIR = path.dirname(path.normpath(path.dirname(__file__)))
# Training and predict(deepaas>=0.5.0) arguments as a dict of dicts
# with the following structure to feed the deepaas API parser:
# (see also get_train_args() )
# { 'arg1' : {'default': 1, # default value
# 'help': '', # can be an empty string
# 'required': False # bool
# },
# 'arg2' : {'default': 'value1',
# 'choices': ['value1', 'value2', 'value3'],
# 'help': 'multi-choice argument',
# 'required': False
# },
# 'arg3' : {...
# },
# ...
# }
train_args = { 'arg1': {'default': 1,
'help': '',
'required': False
},
}
# !!! deepaas>=0.5.0 calls get_test_args() to get args for 'predict'
predict_args = { 'arg2': {'default': 1,
'help': '',
'required': False
},
}
| 29.153846
| 68
| 0.475814
|
from os import path
BASE_DIR = path.dirname(path.normpath(path.dirname(__file__)))
': 1,
'help': '',
'required': False
},
}
predict_args = { 'arg2': {'default': 1,
'help': '',
'required': False
},
}
| true
| true
|
f70770281b179107c384838fb40f824cdf5615f7
| 3,269
|
py
|
Python
|
configs/mixSETR/CNN_SETR_PUP_768x768_80k_cityscapes_bs_8.py
|
HaitaoWang97/SETR
|
6b8c8db2fc66bfa854c336f31ad18b700e9541ac
|
[
"MIT"
] | null | null | null |
configs/mixSETR/CNN_SETR_PUP_768x768_80k_cityscapes_bs_8.py
|
HaitaoWang97/SETR
|
6b8c8db2fc66bfa854c336f31ad18b700e9541ac
|
[
"MIT"
] | null | null | null |
configs/mixSETR/CNN_SETR_PUP_768x768_80k_cityscapes_bs_8.py
|
HaitaoWang97/SETR
|
6b8c8db2fc66bfa854c336f31ad18b700e9541ac
|
[
"MIT"
] | null | null | null |
_base_ = [
'../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=[dict(
type='VitFpn',
img_size=[192, 96, 48, 24],
patch_size=[4, 4, 4, 4],
in_chans=[256, 512, 1024, 2048],
embed_dim=[256, 512, 1024, 2048],
depth=3,
num_heads=8,
num_classes=19,
drop_rate=0.1,
norm_cfg=norm_cfg,
pos_embed_interp=True,
align_corners=False),
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4,
)],
decode_head=dict(
type='TransFpnHead',
in_channels=256,
channels=128,
in_index=23,
img_size=768,
embed_dim=256,
num_classes=19,
norm_cfg=norm_cfg,
num_conv=4,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
# model training and testing settings
optimizer = dict(lr=0.01, weight_decay=0.0,
paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})
)
crop_size = (768, 768)
train_cfg = dict()
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=2)
# model settings
# 1. resnet output [H/4, W/4, 256], [H/8, W/8, 512], [H/16, W/16, 1024], [H/32, W/32, 1024], [192, 96, 48, 24]
# 2. patch size [4, 4, 4, 4], L[48*48, 24*24, 12*12, 6*6] pyramid size
# 3. transformer [256, 512, 1024, 2048]
# 4. img_size 768*768, batch size 2 for each gpu, 8682M for each gpu
# 5. official cityscapes lable should be converted by scripts tools/convert_datasets/cityscapes.py else raise runtimeerror cuda error
# model result
# 2021-10-04 08:35:52,622 - mmseg - INFO - Iter(val) [80000]
# Class IoU Acc
# road 96.98 98.51
# sidewalk 77.47 86.84
# building 88.15 94.70
# wall 45.00 53.99
# fence 43.74 58.45
# pole 39.98 49.01
# traffic light 46.02 58.42
# traffic sign 56.44 66.34
# vegetation 88.85 94.80
# terrain 52.96 67.15
# sky 90.97 95.54
# person 66.23 81.86
# rider 33.90 43.57
# car 90.36 95.72
# truck 56.61 69.87
# bus 58.94 79.92
# train 44.69 55.91
# motorcycle 20.97 25.95
# bicycle 62.36 79.14
# Summary:
# Scope mIoU mAcc aAcc
# global 61.09 71.35 93.44
| 33.357143
| 133
| 0.531967
|
_base_ = [
'../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py',
'../_base_/schedules/schedule_160k.py'
]
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='EncoderDecoder',
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=[dict(
type='VitFpn',
img_size=[192, 96, 48, 24],
patch_size=[4, 4, 4, 4],
in_chans=[256, 512, 1024, 2048],
embed_dim=[256, 512, 1024, 2048],
depth=3,
num_heads=8,
num_classes=19,
drop_rate=0.1,
norm_cfg=norm_cfg,
pos_embed_interp=True,
align_corners=False),
dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4,
)],
decode_head=dict(
type='TransFpnHead',
in_channels=256,
channels=128,
in_index=23,
img_size=768,
embed_dim=256,
num_classes=19,
norm_cfg=norm_cfg,
num_conv=4,
upsampling_method='bilinear',
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
optimizer = dict(lr=0.01, weight_decay=0.0,
paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})
)
crop_size = (768, 768)
train_cfg = dict()
test_cfg = dict(mode='slide', crop_size=crop_size, stride=(512, 512))
find_unused_parameters = True
data = dict(samples_per_gpu=2)
| true
| true
|
f70770b49aa86d0bb703062d89e9f8abff723490
| 6,957
|
py
|
Python
|
Serial2MQTT_python/serial2MQTT.py
|
GanonKuppa/umouse_webapp_kuwaganon
|
f0a26a2e8a20dfaeb6730040933ea01561949268
|
[
"MIT"
] | null | null | null |
Serial2MQTT_python/serial2MQTT.py
|
GanonKuppa/umouse_webapp_kuwaganon
|
f0a26a2e8a20dfaeb6730040933ea01561949268
|
[
"MIT"
] | null | null | null |
Serial2MQTT_python/serial2MQTT.py
|
GanonKuppa/umouse_webapp_kuwaganon
|
f0a26a2e8a20dfaeb6730040933ea01561949268
|
[
"MIT"
] | null | null | null |
#coding: UTF-8
import serial
import sys
import time
import binascii
import json
import math
from collections import deque
import paho.mqtt.client as mqtt
# 設定系変数(デフォルト値で初期化)
MQTT_BROKER_IP = "localhost"
MQTT_BROKER_PORT = 1883
SERIAL_PORT = "COM3"
MESSAGE_LEN = 240
NO_MESSAGE_TIME_OUT = 5.0
NO_MESSAGE_MAX_COUNT = 30000
# グローバル変数
cmd_gamepad = [99,109,100,254,253,252,0,0,0,128,128,128,128,128,128,252]
cmd_queue = deque([])
key_dict = {
"cross_x":0,
"cross_y":0,
"L3D_x":0,
"L3D_y":0,
"R3D_x":0,
"R3D_y":0,
"RT":0,
"LT":0,
"A":0,
"B":0,
"X":0,
"Y":0,
"RB":0,
"LB":0,
"BACK":0,
"START":0
}
def load_config():
with open('config.json', 'r') as f:
config = json.load(f)
MQTT_BROKER_IP = config["MQTT_BROKER_IP"]
MQTT_BROKER_PORT = config["MQTT_BROKER_PORT"]
MESSAGE_LEN = config["MESSAGE_LEN"]
NO_MESSAGE_TIME_OUT = config["NO_MESSAGE_TIME_OUT"]
NO_MESSAGE_MAX_COUNT = config["NO_MESSAGE_MAX_COUNT"]
def on_message(client, userdata, msg):
if msg.topic == "gamepad":
cmd_byte = [0 for x in range(16)]
key_dict = json.loads(msg.payload.decode('utf-8') )
cmd_byte[0] = 99 # ヘッダー
cmd_byte[1] = 109
cmd_byte[2] = 100
cmd_byte[3] = 254# ID0
cmd_byte[4] = 253 # ID1
cmd_byte[9] = 128
cmd_byte[10] = 128
cmd_byte[11] = 128
cmd_byte[12] = 128
cmd_byte[13] = 128
cmd_byte[14] = 128
cmd_byte[6] = key_dict["A"] + \
(key_dict["B"] << 1) + \
(key_dict["X"] << 2) +\
(key_dict["Y"] << 3) +\
(key_dict["RB"] << 4) +\
(key_dict["LB"] << 5) +\
(key_dict["BACK"] << 6) +\
(key_dict["START"] << 7)
cmd_byte[7] = key_dict["RT"]
cmd_byte[8] = key_dict["LT"]
cmd_byte[9] = key_dict["cross_x"]
cmd_byte[10] = key_dict["cross_y"]
cmd_byte[11] = key_dict["R3D_x"]
cmd_byte[12] = key_dict["R3D_y"]
cmd_byte[13] = key_dict["L3D_x"]
cmd_byte[14] = key_dict["L3D_y"]
cmd_byte[15] = 252
chk_sum = 0
for i in range(6,16):
chk_sum += cmd_byte[i]
cmd_byte[5] = chk_sum % 256 # チェックサム
global cmd_gamepad
cmd_gamepad = [cmd_byte[i] for i in range(16)]
if msg.topic == "cmd":
global cmd_queue
print(msg.payload)
cmd_byte = [ msg.payload[i] for i in range(16) ]
print(cmd_byte)
cmd_queue.append(cmd_byte)
def create_mqtt_client():
host = MQTT_BROKER_IP
port = MQTT_BROKER_PORT
# インスタンス作成時に protocl v3.1.1を指定
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.connect(host, port=port, keepalive=60)
client.publish("presence","this is " + __file__)
client.on_message = on_message
client.subscribe("gamepad")
client.subscribe("cmd")
return client
def publish_data_loop(client,ser):
buff = []
s = []
st = []
st_bytes=b""
length = 0
i = 0
start_time = time.time()
timestamp = 0
timestamp_pre = 0
elapsed_time = 0
no_message_count = 0
ser.write(cmd_gamepad)
print (len(buff))
while True:
#client.publish("TEST","While Loop")
s = [ele for ele in ser.read(ser.in_waiting)]
buff.extend(s)
if len(s) == 0:
no_message_count = no_message_count + 1
if no_message_count > NO_MESSAGE_MAX_COUNT:
client.loop_stop(force=False)
ser.close()
print("COM" + " is busy.")
client.publish("TEST", "Serial no message" +" error!")
client.disconnect()
print("exit!")
sys.exit(0)
return
else:
no_message_count = 0
length = len(buff)
if length < MESSAGE_LEN + 5:
continue
for i in range(length-MESSAGE_LEN-2):
if (buff[i] == 0xff) and (buff[i+1] == 0xff) and \
(buff[i+2]==0x48) and (buff[i+3]==0x45) and \
(buff[i+4] == 0x41) and (buff[i+5]==0x44): #and \
#(buff[i+message_len] == 0xff) and (buff[i+1+message_len] == 0xff):
polution_check = False
for j in range(i+6, i+MESSAGE_LEN - 3):
if (buff[j] == 0xff) and (buff[j+1] == 0xff) and (buff[j+2]==0x48):
buff = buff[j:]
polution_check = True
break
if polution_check == True:
break
start_time = time.time()
timestamp_pre = timestamp
timestamp = buff[11]
st = buff[i:i+MESSAGE_LEN]
st_bytes = binascii.hexlify(bytes(list(st)))
chk_sum = 0
for k in range(7,MESSAGE_LEN):
chk_sum = chk_sum + st[k]
if chk_sum%256 != st[6] or (timestamp-timestamp_pre+256)%256 != 10:
client.publish("error", str(timestamp)+" "+str(timestamp_pre) + " " + str(chk_sum%256) + " " + str(st[6])+" "+ str(len(buff)))
client.publish("mouse", bytes(list(st)))
try:
ser.write(cmd_gamepad)
client.publish("TEST", "in msg loop")
if len(cmd_queue) != 0:
ser.write(cmd_queue.popleft())
except:
ser.close()
client.publish("TEST", "serial write" +" error!")
return
buff = buff[i+MESSAGE_LEN:]
break
end_time = time.time()
elapsed_time = end_time - start_time
client.publish("TEST","elapsed:"+str(elapsed_time))
#print (elapsed_time,len(buff),timestamp, (timestamp-timestamp_pre+256)%256 )
#print(cmd_list)
if(elapsed_time > NO_MESSAGE_TIME_OUT):
client.loop_stop(force=False)
ser.close()
print("serial" + " is busy.")
client.publish("TEST", "serial" +" is busy!")
client.disconnect()
print("exit!")
sys.exit(0)
return
def main():
load_config()
client = create_mqtt_client()
while True:
try:
ser = serial.Serial(SERIAL_PORT,timeout = 0.05, write_timeout=0.05)
client.publish("TEST", "Serial connected")
print("Serial connected")
break
except:
client.publish("TEST", "Cannot connect serial!")
print("Cannot connect serial")
client.publish("TEST", "data send start!")
client.loop_start()
publish_data_loop(client,ser)
if __name__ == "__main__":
main()
| 28.165992
| 146
| 0.51459
|
import serial
import sys
import time
import binascii
import json
import math
from collections import deque
import paho.mqtt.client as mqtt
MQTT_BROKER_IP = "localhost"
MQTT_BROKER_PORT = 1883
SERIAL_PORT = "COM3"
MESSAGE_LEN = 240
NO_MESSAGE_TIME_OUT = 5.0
NO_MESSAGE_MAX_COUNT = 30000
cmd_gamepad = [99,109,100,254,253,252,0,0,0,128,128,128,128,128,128,252]
cmd_queue = deque([])
key_dict = {
"cross_x":0,
"cross_y":0,
"L3D_x":0,
"L3D_y":0,
"R3D_x":0,
"R3D_y":0,
"RT":0,
"LT":0,
"A":0,
"B":0,
"X":0,
"Y":0,
"RB":0,
"LB":0,
"BACK":0,
"START":0
}
def load_config():
with open('config.json', 'r') as f:
config = json.load(f)
MQTT_BROKER_IP = config["MQTT_BROKER_IP"]
MQTT_BROKER_PORT = config["MQTT_BROKER_PORT"]
MESSAGE_LEN = config["MESSAGE_LEN"]
NO_MESSAGE_TIME_OUT = config["NO_MESSAGE_TIME_OUT"]
NO_MESSAGE_MAX_COUNT = config["NO_MESSAGE_MAX_COUNT"]
def on_message(client, userdata, msg):
if msg.topic == "gamepad":
cmd_byte = [0 for x in range(16)]
key_dict = json.loads(msg.payload.decode('utf-8') )
cmd_byte[0] = 99
cmd_byte[1] = 109
cmd_byte[2] = 100
cmd_byte[3] = 254
cmd_byte[4] = 253
cmd_byte[9] = 128
cmd_byte[10] = 128
cmd_byte[11] = 128
cmd_byte[12] = 128
cmd_byte[13] = 128
cmd_byte[14] = 128
cmd_byte[6] = key_dict["A"] + \
(key_dict["B"] << 1) + \
(key_dict["X"] << 2) +\
(key_dict["Y"] << 3) +\
(key_dict["RB"] << 4) +\
(key_dict["LB"] << 5) +\
(key_dict["BACK"] << 6) +\
(key_dict["START"] << 7)
cmd_byte[7] = key_dict["RT"]
cmd_byte[8] = key_dict["LT"]
cmd_byte[9] = key_dict["cross_x"]
cmd_byte[10] = key_dict["cross_y"]
cmd_byte[11] = key_dict["R3D_x"]
cmd_byte[12] = key_dict["R3D_y"]
cmd_byte[13] = key_dict["L3D_x"]
cmd_byte[14] = key_dict["L3D_y"]
cmd_byte[15] = 252
chk_sum = 0
for i in range(6,16):
chk_sum += cmd_byte[i]
cmd_byte[5] = chk_sum % 256
global cmd_gamepad
cmd_gamepad = [cmd_byte[i] for i in range(16)]
if msg.topic == "cmd":
global cmd_queue
print(msg.payload)
cmd_byte = [ msg.payload[i] for i in range(16) ]
print(cmd_byte)
cmd_queue.append(cmd_byte)
def create_mqtt_client():
host = MQTT_BROKER_IP
port = MQTT_BROKER_PORT
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.connect(host, port=port, keepalive=60)
client.publish("presence","this is " + __file__)
client.on_message = on_message
client.subscribe("gamepad")
client.subscribe("cmd")
return client
def publish_data_loop(client,ser):
buff = []
s = []
st = []
st_bytes=b""
length = 0
i = 0
start_time = time.time()
timestamp = 0
timestamp_pre = 0
elapsed_time = 0
no_message_count = 0
ser.write(cmd_gamepad)
print (len(buff))
while True:
s = [ele for ele in ser.read(ser.in_waiting)]
buff.extend(s)
if len(s) == 0:
no_message_count = no_message_count + 1
if no_message_count > NO_MESSAGE_MAX_COUNT:
client.loop_stop(force=False)
ser.close()
print("COM" + " is busy.")
client.publish("TEST", "Serial no message" +" error!")
client.disconnect()
print("exit!")
sys.exit(0)
return
else:
no_message_count = 0
length = len(buff)
if length < MESSAGE_LEN + 5:
continue
for i in range(length-MESSAGE_LEN-2):
if (buff[i] == 0xff) and (buff[i+1] == 0xff) and \
(buff[i+2]==0x48) and (buff[i+3]==0x45) and \
(buff[i+4] == 0x41) and (buff[i+5]==0x44):
polution_check = False
for j in range(i+6, i+MESSAGE_LEN - 3):
if (buff[j] == 0xff) and (buff[j+1] == 0xff) and (buff[j+2]==0x48):
buff = buff[j:]
polution_check = True
break
if polution_check == True:
break
start_time = time.time()
timestamp_pre = timestamp
timestamp = buff[11]
st = buff[i:i+MESSAGE_LEN]
st_bytes = binascii.hexlify(bytes(list(st)))
chk_sum = 0
for k in range(7,MESSAGE_LEN):
chk_sum = chk_sum + st[k]
if chk_sum%256 != st[6] or (timestamp-timestamp_pre+256)%256 != 10:
client.publish("error", str(timestamp)+" "+str(timestamp_pre) + " " + str(chk_sum%256) + " " + str(st[6])+" "+ str(len(buff)))
client.publish("mouse", bytes(list(st)))
try:
ser.write(cmd_gamepad)
client.publish("TEST", "in msg loop")
if len(cmd_queue) != 0:
ser.write(cmd_queue.popleft())
except:
ser.close()
client.publish("TEST", "serial write" +" error!")
return
buff = buff[i+MESSAGE_LEN:]
break
end_time = time.time()
elapsed_time = end_time - start_time
client.publish("TEST","elapsed:"+str(elapsed_time))
if(elapsed_time > NO_MESSAGE_TIME_OUT):
client.loop_stop(force=False)
ser.close()
print("serial" + " is busy.")
client.publish("TEST", "serial" +" is busy!")
client.disconnect()
print("exit!")
sys.exit(0)
return
def main():
load_config()
client = create_mqtt_client()
while True:
try:
ser = serial.Serial(SERIAL_PORT,timeout = 0.05, write_timeout=0.05)
client.publish("TEST", "Serial connected")
print("Serial connected")
break
except:
client.publish("TEST", "Cannot connect serial!")
print("Cannot connect serial")
client.publish("TEST", "data send start!")
client.loop_start()
publish_data_loop(client,ser)
if __name__ == "__main__":
main()
| true
| true
|
f70770e6eaced67c749412bd2f93605d8c2ffbe9
| 713
|
py
|
Python
|
spark_auto_mapper_fhir/extensions/custom/provider_search_system.py
|
imranq2/SparkAutoMapper.FHIR
|
dd23b218fb0097d1edc2f3e688e8d6d4d7278bd2
|
[
"Apache-2.0"
] | 1
|
2020-10-31T23:25:07.000Z
|
2020-10-31T23:25:07.000Z
|
spark_auto_mapper_fhir/extensions/custom/provider_search_system.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
spark_auto_mapper_fhir/extensions/custom/provider_search_system.py
|
icanbwell/SparkAutoMapper.FHIR
|
98f368e781b46523142c7cb513c670d659a93c9b
|
[
"Apache-2.0"
] | null | null | null |
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.classproperty import genericclassproperty
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
class ProviderSearchSystemExtensionItem(ExtensionBase):
# noinspection PyPep8Naming
def __init__(self, valueUri: FhirUri):
"""
:param valueUri:
"""
super().__init__(
url=ProviderSearchSystemExtensionItem.codeset,
valueUri=valueUri,
)
# noinspection PyMethodParameters
@genericclassproperty
def codeset(cls) -> FhirUri:
"""
forSystem
:return:
:rtype:
"""
return "forSystem"
| 25.464286
| 74
| 0.674614
|
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.classproperty import genericclassproperty
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
class ProviderSearchSystemExtensionItem(ExtensionBase):
def __init__(self, valueUri: FhirUri):
super().__init__(
url=ProviderSearchSystemExtensionItem.codeset,
valueUri=valueUri,
)
@genericclassproperty
def codeset(cls) -> FhirUri:
return "forSystem"
| true
| true
|
f707713f0caa3a96250f01703cacd23700c5d207
| 5,538
|
py
|
Python
|
util/summary.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-11-16T00:58:43.000Z
|
2021-11-16T00:58:43.000Z
|
util/summary.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 44
|
2022-01-20T01:31:32.000Z
|
2022-03-31T01:50:41.000Z
|
util/summary.py
|
tdilauro/circulation-core
|
8086ca8cbedd5f4b2a0c44df97889d078ff79aac
|
[
"Apache-2.0"
] | 1
|
2021-05-12T19:11:52.000Z
|
2021-05-12T19:11:52.000Z
|
import logging
import re
from collections import Counter
from textblob import TextBlob
from textblob.exceptions import MissingCorpusError
from . import Bigrams, english_bigrams
class SummaryEvaluator(object):
"""Evaluate summaries of a book to find a usable summary.
A usable summary will have good coverage of the popular noun
phrases found across all summaries of the book, will have an
approximate length of four sentences (this is customizable), and
will not mention words that indicate it's a summary of a specific
edition of the book.
All else being equal, a shorter summary is better.
A summary is penalized for apparently not being in English.
"""
# These phrases are indicative of a description we can't use for
# whatever reason.
default_bad_phrases = set(
[
"version of",
"retelling of",
"abridged",
"retelling",
"condensed",
"adaptation of",
"look for",
"new edition",
"excerpts",
"version",
"edition",
"selections",
"complete texts",
"in one volume",
"contains",
"--container",
"--original container",
"playaway",
"complete novels",
"all rights reserved",
]
)
bad_res = set(
[
re.compile("the [^ ]+ Collection"),
re.compile("Includes"),
re.compile("This is"),
]
)
_nltk_installed = True
log = logging.getLogger("Summary Evaluator")
def __init__(
self,
optimal_number_of_sentences=4,
noun_phrases_to_consider=10,
bad_phrases=None,
):
self.optimal_number_of_sentences = optimal_number_of_sentences
self.summaries = []
self.noun_phrases = Counter()
self.blobs = dict()
self.scores = dict()
self.noun_phrases_to_consider = float(noun_phrases_to_consider)
self.top_noun_phrases = None
if bad_phrases is None:
self.bad_phrases = self.default_bad_phrases
else:
self.bad_phrases = bad_phrases
def add(self, summary, parser=None):
parser_class = parser or TextBlob
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.blobs:
# We already evaluated this summary. Don't count it more than once
return
blob = parser_class(summary)
self.blobs[summary] = blob
self.summaries.append(summary)
if self._nltk_installed:
try:
for phrase in blob.noun_phrases:
self.noun_phrases[phrase] = self.noun_phrases[phrase] + 1
except MissingCorpusError as e:
self._nltk_installed = False
self.log.error("Summary cannot be evaluated: NLTK not installed %r" % e)
def ready(self):
"""We are done adding to the corpus and ready to start evaluating."""
self.top_noun_phrases = set(
[
k
for k, v in self.noun_phrases.most_common(
int(self.noun_phrases_to_consider)
)
]
)
def best_choice(self):
c = self.best_choices(1)
if c:
return c[0]
else:
return None, None
def best_choices(self, n=3):
"""Choose the best `n` choices among the current summaries."""
scores = Counter()
for summary in self.summaries:
scores[summary] = self.score(summary)
return scores.most_common(n)
def score(self, summary, apply_language_penalty=True):
"""Score a summary relative to our current view of the dataset."""
if not self._nltk_installed:
# Without NLTK, there's no need to evaluate the score.
return 1
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.scores:
return self.scores[summary]
score = 1
blob = self.blobs[summary]
top_noun_phrases_used = len(
[p for p in self.top_noun_phrases if p in blob.noun_phrases]
)
score = 1 * (top_noun_phrases_used / self.noun_phrases_to_consider)
try:
sentences = len(blob.sentences)
except Exception as e:
# Can't parse into sentences for whatever reason.
# Make a really bad guess.
sentences = summary.count(". ") + 1
off_from_optimal = abs(sentences - self.optimal_number_of_sentences)
if off_from_optimal == 1:
off_from_optimal = 1.5
if off_from_optimal:
# This summary is too long or too short.
score /= off_from_optimal ** 1.5
bad_phrases = 0
l = summary.lower()
for i in self.bad_phrases:
if i in l:
bad_phrases += 1
for i in self.bad_res:
if i.search(summary):
bad_phrases += 1
if l.count(" -- ") > 3:
bad_phrases += l.count(" -- ") - 3
score *= 0.5 ** bad_phrases
if apply_language_penalty:
language_difference = english_bigrams.difference_from(
Bigrams.from_string(summary)
)
if language_difference > 1:
score *= 0.5 ** (language_difference - 1)
return score
| 30.766667
| 88
| 0.570061
|
import logging
import re
from collections import Counter
from textblob import TextBlob
from textblob.exceptions import MissingCorpusError
from . import Bigrams, english_bigrams
class SummaryEvaluator(object):
# whatever reason.
default_bad_phrases = set(
[
"version of",
"retelling of",
"abridged",
"retelling",
"condensed",
"adaptation of",
"look for",
"new edition",
"excerpts",
"version",
"edition",
"selections",
"complete texts",
"in one volume",
"contains",
"--container",
"--original container",
"playaway",
"complete novels",
"all rights reserved",
]
)
bad_res = set(
[
re.compile("the [^ ]+ Collection"),
re.compile("Includes"),
re.compile("This is"),
]
)
_nltk_installed = True
log = logging.getLogger("Summary Evaluator")
def __init__(
self,
optimal_number_of_sentences=4,
noun_phrases_to_consider=10,
bad_phrases=None,
):
self.optimal_number_of_sentences = optimal_number_of_sentences
self.summaries = []
self.noun_phrases = Counter()
self.blobs = dict()
self.scores = dict()
self.noun_phrases_to_consider = float(noun_phrases_to_consider)
self.top_noun_phrases = None
if bad_phrases is None:
self.bad_phrases = self.default_bad_phrases
else:
self.bad_phrases = bad_phrases
def add(self, summary, parser=None):
parser_class = parser or TextBlob
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.blobs:
# We already evaluated this summary. Don't count it more than once
return
blob = parser_class(summary)
self.blobs[summary] = blob
self.summaries.append(summary)
if self._nltk_installed:
try:
for phrase in blob.noun_phrases:
self.noun_phrases[phrase] = self.noun_phrases[phrase] + 1
except MissingCorpusError as e:
self._nltk_installed = False
self.log.error("Summary cannot be evaluated: NLTK not installed %r" % e)
def ready(self):
self.top_noun_phrases = set(
[
k
for k, v in self.noun_phrases.most_common(
int(self.noun_phrases_to_consider)
)
]
)
def best_choice(self):
c = self.best_choices(1)
if c:
return c[0]
else:
return None, None
def best_choices(self, n=3):
scores = Counter()
for summary in self.summaries:
scores[summary] = self.score(summary)
return scores.most_common(n)
def score(self, summary, apply_language_penalty=True):
if not self._nltk_installed:
return 1
if isinstance(summary, bytes):
summary = summary.decode("utf8")
if summary in self.scores:
return self.scores[summary]
score = 1
blob = self.blobs[summary]
top_noun_phrases_used = len(
[p for p in self.top_noun_phrases if p in blob.noun_phrases]
)
score = 1 * (top_noun_phrases_used / self.noun_phrases_to_consider)
try:
sentences = len(blob.sentences)
except Exception as e:
# Can't parse into sentences for whatever reason.
sentences = summary.count(". ") + 1
off_from_optimal = abs(sentences - self.optimal_number_of_sentences)
if off_from_optimal == 1:
off_from_optimal = 1.5
if off_from_optimal:
score /= off_from_optimal ** 1.5
bad_phrases = 0
l = summary.lower()
for i in self.bad_phrases:
if i in l:
bad_phrases += 1
for i in self.bad_res:
if i.search(summary):
bad_phrases += 1
if l.count(" -- ") > 3:
bad_phrases += l.count(" -- ") - 3
score *= 0.5 ** bad_phrases
if apply_language_penalty:
language_difference = english_bigrams.difference_from(
Bigrams.from_string(summary)
)
if language_difference > 1:
score *= 0.5 ** (language_difference - 1)
return score
| true
| true
|
f7077143ea9e2cee482d69a3f3252c29505bc3b9
| 2,030
|
py
|
Python
|
week5/workshop5.py
|
jaycvilla/nucamppython_fundamentals
|
a53533e4459a10ff5fbc8e6b4c066412278cd7c1
|
[
"MIT"
] | null | null | null |
week5/workshop5.py
|
jaycvilla/nucamppython_fundamentals
|
a53533e4459a10ff5fbc8e6b4c066412278cd7c1
|
[
"MIT"
] | null | null | null |
week5/workshop5.py
|
jaycvilla/nucamppython_fundamentals
|
a53533e4459a10ff5fbc8e6b4c066412278cd7c1
|
[
"MIT"
] | null | null | null |
import random
def guess_random_number(tries, start, stop):
number = random.randint(start, stop)
while tries != 0:
print('Number of tries left: ', tries)
guess = int(input("Guess a number between 1 and 10: "))
tries -= 1
if guess < number:
print('Guess higher!')
if guess > number:
print('Guess lower!')
if guess == number:
break
if guess == number:
print('You guessed the correct number', str(number))
else:
print('You did not guess the number: ', str(number))
#guess_random_number(5, 0, 10)
def guess_random_num_linear(tries, start, stop):
number = random.randint(start, stop)
print('The number for the program to guess is:', number)
for x in range(0, 10) :
if tries != 0:
tries -= 1
print('The number for the program to guess is... ', x)
print('Number of tries left: ', tries)
if x == number:
print('You guessed the correct number', str(number))
return x
else:
print('You have failed to guess the correct number.')
break
#guess_random_num_linear(5, 0, 10)
def binary_search(tries, start, stop):
number = random.randint(start, stop)
lower_bound = int(start)
upper_bound = int(stop)
print("Random number to find:", number)
while tries != 0:
pivot = (lower_bound + upper_bound) // 2
pivot_value = pivot
tries -= 1
if pivot_value == number:
print('You guessed the correct number', str(pivot))
return pivot
elif pivot_value > number:
upper_bound = pivot - 1
print('Guessing Lower!')
else:
lower_bound = pivot + 1
print('Guessing Higher!')
else:
print('Your program has failed to find the number')
binary_search(5, 0, 10)
| 29.42029
| 69
| 0.539901
|
import random
def guess_random_number(tries, start, stop):
number = random.randint(start, stop)
while tries != 0:
print('Number of tries left: ', tries)
guess = int(input("Guess a number between 1 and 10: "))
tries -= 1
if guess < number:
print('Guess higher!')
if guess > number:
print('Guess lower!')
if guess == number:
break
if guess == number:
print('You guessed the correct number', str(number))
else:
print('You did not guess the number: ', str(number))
def guess_random_num_linear(tries, start, stop):
number = random.randint(start, stop)
print('The number for the program to guess is:', number)
for x in range(0, 10) :
if tries != 0:
tries -= 1
print('The number for the program to guess is... ', x)
print('Number of tries left: ', tries)
if x == number:
print('You guessed the correct number', str(number))
return x
else:
print('You have failed to guess the correct number.')
break
def binary_search(tries, start, stop):
number = random.randint(start, stop)
lower_bound = int(start)
upper_bound = int(stop)
print("Random number to find:", number)
while tries != 0:
pivot = (lower_bound + upper_bound) // 2
pivot_value = pivot
tries -= 1
if pivot_value == number:
print('You guessed the correct number', str(pivot))
return pivot
elif pivot_value > number:
upper_bound = pivot - 1
print('Guessing Lower!')
else:
lower_bound = pivot + 1
print('Guessing Higher!')
else:
print('Your program has failed to find the number')
binary_search(5, 0, 10)
| true
| true
|
f707719d8c68857afc88f397486764c6de386f1c
| 8,215
|
py
|
Python
|
deep-web-scanner.py
|
Leetcore/deepweb
|
28e93d5368f6b1496ea5675f6c6aa9e4c794da3f
|
[
"Apache-2.0"
] | null | null | null |
deep-web-scanner.py
|
Leetcore/deepweb
|
28e93d5368f6b1496ea5675f6c6aa9e4c794da3f
|
[
"Apache-2.0"
] | null | null | null |
deep-web-scanner.py
|
Leetcore/deepweb
|
28e93d5368f6b1496ea5675f6c6aa9e4c794da3f
|
[
"Apache-2.0"
] | null | null | null |
import threading
import ipaddress
import socket
import time
from typing import Optional, Union
import requests
requests.packages.urllib3.disable_warnings() # type: ignore
from concurrent.futures import ThreadPoolExecutor
import colorama
colorama.init(autoreset=True)
import os
import bs4
import argparse
folder = os.path.dirname(__file__)
output_strings: list[str] = []
ports = [80, 443, 8080, 8081, 8443, 4434]
keywords = ["cam", "rasp", " hp ", "system", "index of", "dashboard"]
output_tmp = ""
last_write = time.time()
global_lock = threading.Lock()
banner_targets: list[dict[str, Union[str, int]]] = []
def main():
print("----------------------------")
print(" Deep Web Scanner! ")
print("----------------------------\n")
print("Every active webserver url will be logged in the output file.")
print("This terminal will only show urls/metadata with the following keywords: " + ", ".join(keywords))
if indexof.lower() == "true":
print ("'Index of /' filenames will be logged!")
print("Scan will start...")
with open(input_file, "r") as myfile:
content = myfile.readlines()
for line in content:
# split ip range 2.56.20.0-2.56.23.255
if "-" in line:
ip_range_array = line.split("-")
ip_range_start = ip_range_array[0].strip()
ip_range_end = ip_range_array[1].strip()
print(f"Start scan from range: {ip_range_start} - {ip_range_end}")
current_ip = ipaddress.IPv4Address(ip_range_start)
end_ip = ipaddress.IPv4Address(ip_range_end)
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
while current_ip < end_ip:
executor_portcheck.submit(start_portcheck, current_ip.exploded)
current_ip += 1
elif "/" in line:
ip_range = ipaddress.ip_network(line.strip())
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
for ip in ip_range.hosts():
executor_portcheck.submit(start_portcheck, ip.exploded)
else:
print("No valid input file! Should be something like 2.56.20.0-2.56.23.255 per line!")
global banner_targets
print(f"{len(banner_targets)} responses")
for target in banner_targets:
start_request(target["ip"], target["port"]) # type: ignore
banner_targets.clear()
write_line("", True)
def start_portcheck(ip: str) -> None:
global banner_targets
# fast webserver port checking
for port in ports:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(3)
result = sock.connect_ex((ip, port))
if result == 0:
# queue normal browser request
banner_targets.append({"ip": ip, "port": port})
def start_request(ip: str, port: int) -> None:
# check for running websites
try:
url = "https://" + ip + ":" + str(port)
if port == 80:
url = "http://" + ip
elif port == 8080:
url = "http://" + ip + ":8080"
elif port == 8081:
url = "http://" + ip + ":8081"
site_result = request_url(url)
if not isinstance(site_result, bool) and site_result is not False:
# if the site is reachable get some information
get_banner(site_result[0], site_result[1])
except Exception as e:
print(e)
def request_url(url: str) -> Union[tuple[requests.Response, bs4.BeautifulSoup], bool]:
# request url and return the response
try:
session = requests.session()
session.headers[
"User-Agent"
] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36"
header = session.head(url=url, timeout=20, verify=False)
# check content type
one_allowed_content_type = False
content_type_header = header.headers.get("content-type")
if content_type_header is not None:
for allowed_content_type in ["html", "plain", "xml", "text", "json"]:
if allowed_content_type in content_type_header.lower():
one_allowed_content_type = True
if not one_allowed_content_type:
return False
else:
return False
response = session.get(url=url, timeout=30, verify=False)
session.close()
soup = bs4.BeautifulSoup(response.text, "html.parser")
return (response, soup)
except Exception:
return False
def get_banner(request: requests.Response, soup: bs4.BeautifulSoup):
# get banner information, show console output and save them to file
banner_array: list[str] = []
banner_array.append(request.url)
server_header = request.headers.get("Server")
if isinstance(server_header, str):
banner_array.append(server_header)
title = soup.find("title")
if isinstance(title, bs4.Tag):
title = title.get_text().strip().replace("\n", "")
banner_array.append(title)
meta_tags: bs4.element.ResultSet[bs4.Tag] = soup.find_all("meta", attrs={"name": "generator"})
if len(meta_tags) > 0:
for meta_tag in meta_tags:
attrs = meta_tag.attr
if isinstance(attrs, bs4.Tag):
generator = attrs.get("content")
if isinstance(generator, str):
banner_array.append(generator)
# has this site a password field?
password_fields = soup.find_all(attrs={"type": "password"})
if len(password_fields) > 0:
banner_array.append("login required")
# check for "index of" websites and show root files/folders
global indexof
if indexof.lower() == "true" and "index of" in request.text.lower():
a_array: list[bs4.Tag] = soup.find_all("a")
for a in a_array:
href = a.attrs.get("href")
if isinstance(href, str):
if href.find("?") != 0:
banner_array.append(href)
banner_array.append(f"{str(len(request.content))} content size")
fullstring = ", ".join(banner_array)
if fullstring not in output_strings:
output_strings.append(fullstring)
for keyword in keywords:
if keyword in fullstring.lower():
if "login required" in fullstring:
print(colorama.Fore.RED + fullstring)
elif "Index of /" in fullstring:
print(colorama.Fore.YELLOW + fullstring)
else:
print(colorama.Fore.GREEN + fullstring)
write_line(fullstring)
def write_line(line: str, force: Optional[bool] = False):
# buffers and writes output to file
global output_tmp, last_write
output_tmp += line + "\n"
if last_write + 30 < time.time() or force:
last_write = time.time()
while global_lock.locked():
continue
global_lock.acquire()
lines_to_write = output_tmp.count("\n")
with open(output_file, "a") as output_1:
output_1.write(output_tmp)
output_tmp = ""
if lines_to_write > 1:
print(f"{lines_to_write} webservers found and written to file")
else:
print(f"{lines_to_write} webserver found and written to file")
global_lock.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check if domain has an active website and grab banner."
)
parser.add_argument(
"-i", type=str, default="./asn-country-ipv4.csv", help="Path to input file"
)
parser.add_argument(
"-o", type=str, default="./deep-web.txt", help="Path to output file"
)
parser.add_argument(
"-indexof", type=str, default="no", help="Show files from index of sites"
)
args = parser.parse_args()
input_file = args.i
output_file = args.o
indexof = args.indexof
main()
| 37.340909
| 123
| 0.59574
|
import threading
import ipaddress
import socket
import time
from typing import Optional, Union
import requests
requests.packages.urllib3.disable_warnings()
from concurrent.futures import ThreadPoolExecutor
import colorama
colorama.init(autoreset=True)
import os
import bs4
import argparse
folder = os.path.dirname(__file__)
output_strings: list[str] = []
ports = [80, 443, 8080, 8081, 8443, 4434]
keywords = ["cam", "rasp", " hp ", "system", "index of", "dashboard"]
output_tmp = ""
last_write = time.time()
global_lock = threading.Lock()
banner_targets: list[dict[str, Union[str, int]]] = []
def main():
print("----------------------------")
print(" Deep Web Scanner! ")
print("----------------------------\n")
print("Every active webserver url will be logged in the output file.")
print("This terminal will only show urls/metadata with the following keywords: " + ", ".join(keywords))
if indexof.lower() == "true":
print ("'Index of /' filenames will be logged!")
print("Scan will start...")
with open(input_file, "r") as myfile:
content = myfile.readlines()
for line in content:
if "-" in line:
ip_range_array = line.split("-")
ip_range_start = ip_range_array[0].strip()
ip_range_end = ip_range_array[1].strip()
print(f"Start scan from range: {ip_range_start} - {ip_range_end}")
current_ip = ipaddress.IPv4Address(ip_range_start)
end_ip = ipaddress.IPv4Address(ip_range_end)
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
while current_ip < end_ip:
executor_portcheck.submit(start_portcheck, current_ip.exploded)
current_ip += 1
elif "/" in line:
ip_range = ipaddress.ip_network(line.strip())
with ThreadPoolExecutor(max_workers=100) as executor_portcheck:
for ip in ip_range.hosts():
executor_portcheck.submit(start_portcheck, ip.exploded)
else:
print("No valid input file! Should be something like 2.56.20.0-2.56.23.255 per line!")
global banner_targets
print(f"{len(banner_targets)} responses")
for target in banner_targets:
start_request(target["ip"], target["port"])
banner_targets.clear()
write_line("", True)
def start_portcheck(ip: str) -> None:
global banner_targets
for port in ports:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(3)
result = sock.connect_ex((ip, port))
if result == 0:
banner_targets.append({"ip": ip, "port": port})
def start_request(ip: str, port: int) -> None:
try:
url = "https://" + ip + ":" + str(port)
if port == 80:
url = "http://" + ip
elif port == 8080:
url = "http://" + ip + ":8080"
elif port == 8081:
url = "http://" + ip + ":8081"
site_result = request_url(url)
if not isinstance(site_result, bool) and site_result is not False:
get_banner(site_result[0], site_result[1])
except Exception as e:
print(e)
def request_url(url: str) -> Union[tuple[requests.Response, bs4.BeautifulSoup], bool]:
try:
session = requests.session()
session.headers[
"User-Agent"
] = "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.152 Safari/537.36"
header = session.head(url=url, timeout=20, verify=False)
one_allowed_content_type = False
content_type_header = header.headers.get("content-type")
if content_type_header is not None:
for allowed_content_type in ["html", "plain", "xml", "text", "json"]:
if allowed_content_type in content_type_header.lower():
one_allowed_content_type = True
if not one_allowed_content_type:
return False
else:
return False
response = session.get(url=url, timeout=30, verify=False)
session.close()
soup = bs4.BeautifulSoup(response.text, "html.parser")
return (response, soup)
except Exception:
return False
def get_banner(request: requests.Response, soup: bs4.BeautifulSoup):
banner_array: list[str] = []
banner_array.append(request.url)
server_header = request.headers.get("Server")
if isinstance(server_header, str):
banner_array.append(server_header)
title = soup.find("title")
if isinstance(title, bs4.Tag):
title = title.get_text().strip().replace("\n", "")
banner_array.append(title)
meta_tags: bs4.element.ResultSet[bs4.Tag] = soup.find_all("meta", attrs={"name": "generator"})
if len(meta_tags) > 0:
for meta_tag in meta_tags:
attrs = meta_tag.attr
if isinstance(attrs, bs4.Tag):
generator = attrs.get("content")
if isinstance(generator, str):
banner_array.append(generator)
password_fields = soup.find_all(attrs={"type": "password"})
if len(password_fields) > 0:
banner_array.append("login required")
global indexof
if indexof.lower() == "true" and "index of" in request.text.lower():
a_array: list[bs4.Tag] = soup.find_all("a")
for a in a_array:
href = a.attrs.get("href")
if isinstance(href, str):
if href.find("?") != 0:
banner_array.append(href)
banner_array.append(f"{str(len(request.content))} content size")
fullstring = ", ".join(banner_array)
if fullstring not in output_strings:
output_strings.append(fullstring)
for keyword in keywords:
if keyword in fullstring.lower():
if "login required" in fullstring:
print(colorama.Fore.RED + fullstring)
elif "Index of /" in fullstring:
print(colorama.Fore.YELLOW + fullstring)
else:
print(colorama.Fore.GREEN + fullstring)
write_line(fullstring)
def write_line(line: str, force: Optional[bool] = False):
global output_tmp, last_write
output_tmp += line + "\n"
if last_write + 30 < time.time() or force:
last_write = time.time()
while global_lock.locked():
continue
global_lock.acquire()
lines_to_write = output_tmp.count("\n")
with open(output_file, "a") as output_1:
output_1.write(output_tmp)
output_tmp = ""
if lines_to_write > 1:
print(f"{lines_to_write} webservers found and written to file")
else:
print(f"{lines_to_write} webserver found and written to file")
global_lock.release()
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Check if domain has an active website and grab banner."
)
parser.add_argument(
"-i", type=str, default="./asn-country-ipv4.csv", help="Path to input file"
)
parser.add_argument(
"-o", type=str, default="./deep-web.txt", help="Path to output file"
)
parser.add_argument(
"-indexof", type=str, default="no", help="Show files from index of sites"
)
args = parser.parse_args()
input_file = args.i
output_file = args.o
indexof = args.indexof
main()
| true
| true
|
f70772545aea1123adb436514a0719a0eca58607
| 567
|
py
|
Python
|
examples/example_events/service2.py
|
vladcalin/pymicroservice
|
325a49d17621b9d45ffd2b5eca6f0de284de8ba4
|
[
"MIT"
] | 2
|
2016-12-17T13:09:14.000Z
|
2016-12-31T18:38:57.000Z
|
examples/example_events/service2.py
|
vladcalin/pymicroservice
|
325a49d17621b9d45ffd2b5eca6f0de284de8ba4
|
[
"MIT"
] | 15
|
2016-11-27T13:28:25.000Z
|
2017-01-10T09:09:30.000Z
|
examples/example_events/service2.py
|
vladcalin/pymicroservice
|
325a49d17621b9d45ffd2b5eca6f0de284de8ba4
|
[
"MIT"
] | null | null | null |
from gemstone import MicroService, event_handler, exposed_method
from gemstone.event.transport import rabbitmq, redis_transport
class EventTestService2(MicroService):
name = "event.test2"
host = "127.0.0.1"
port = 8000
event_transports = [
redis_transport.RedisEventTransport("redis://127.0.0.1:6379/0")
]
@exposed_method()
def say_hello(self, name):
self.emit_event("said_hello", {"name": name})
return "Hello {}".format(name)
if __name__ == '__main__':
service = EventTestService2()
service.start()
| 24.652174
| 71
| 0.679012
|
from gemstone import MicroService, event_handler, exposed_method
from gemstone.event.transport import rabbitmq, redis_transport
class EventTestService2(MicroService):
name = "event.test2"
host = "127.0.0.1"
port = 8000
event_transports = [
redis_transport.RedisEventTransport("redis://127.0.0.1:6379/0")
]
@exposed_method()
def say_hello(self, name):
self.emit_event("said_hello", {"name": name})
return "Hello {}".format(name)
if __name__ == '__main__':
service = EventTestService2()
service.start()
| true
| true
|
f7077369b4a25c7679ffe42278eadb846b8055d9
| 1,396
|
py
|
Python
|
musicbot/pubg.py
|
gueishe/music_bot
|
cb6be80171fb599de1a933c6bd64928193af6377
|
[
"MIT"
] | null | null | null |
musicbot/pubg.py
|
gueishe/music_bot
|
cb6be80171fb599de1a933c6bd64928193af6377
|
[
"MIT"
] | null | null | null |
musicbot/pubg.py
|
gueishe/music_bot
|
cb6be80171fb599de1a933c6bd64928193af6377
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
class Pubg(object):
def __init__(self):
self.top_history = []
def _last_top(self, players=None):
if not players:
if self.top_history:
return self.top_history[-1]
else:
return None
for top in self.top_history:
print(top['players'] == players)
if top['players'] == players:
return top
def get_last(self, players=None):
now = datetime.now()
if players:
top = self._last_top(players)
else:
top = self._last_top()
if top:
msg = 'Last top for team ' + ' '.join(top['players'].split(',')) + ' was ' + str(now - top['time']) + ' ago'
else:
msg = 'No last top'
if players:
msg += ' for this team'
self._remove_old()
return msg
def new_top(self, players):
top = {
'time': datetime.now(),
'players': players
}
self.top_history.append(top)
print(self.top_history)
self._remove_old()
def _remove_old(self):
now = datetime.now()
for top in self.top_history:
if top['time'] < now - timedelta(days=30):
self.top_history.remove(top)
| 28.489796
| 121
| 0.485673
|
from datetime import datetime, timedelta
class Pubg(object):
def __init__(self):
self.top_history = []
def _last_top(self, players=None):
if not players:
if self.top_history:
return self.top_history[-1]
else:
return None
for top in self.top_history:
print(top['players'] == players)
if top['players'] == players:
return top
def get_last(self, players=None):
now = datetime.now()
if players:
top = self._last_top(players)
else:
top = self._last_top()
if top:
msg = 'Last top for team ' + ' '.join(top['players'].split(',')) + ' was ' + str(now - top['time']) + ' ago'
else:
msg = 'No last top'
if players:
msg += ' for this team'
self._remove_old()
return msg
def new_top(self, players):
top = {
'time': datetime.now(),
'players': players
}
self.top_history.append(top)
print(self.top_history)
self._remove_old()
def _remove_old(self):
now = datetime.now()
for top in self.top_history:
if top['time'] < now - timedelta(days=30):
self.top_history.remove(top)
| true
| true
|
f7077379de2caec8b5306a111f10b19a8551c5f6
| 5,165
|
py
|
Python
|
src/webargs/tornadoparser.py
|
hugovk/webargs
|
f62dc822a5d7e1add1f25de7bf040685ce8f7089
|
[
"MIT"
] | null | null | null |
src/webargs/tornadoparser.py
|
hugovk/webargs
|
f62dc822a5d7e1add1f25de7bf040685ce8f7089
|
[
"MIT"
] | null | null | null |
src/webargs/tornadoparser.py
|
hugovk/webargs
|
f62dc822a5d7e1add1f25de7bf040685ce8f7089
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Tornado request argument parsing module.
Example: ::
import tornado.web
from marshmallow import fields
from webargs.tornadoparser import use_args
class HelloHandler(tornado.web.RequestHandler):
@use_args({'name': fields.Str(missing='World')})
def get(self, args):
response = {'message': 'Hello {}'.format(args['name'])}
self.write(response)
"""
import tornado.web
import tornado.concurrent
from tornado.escape import _unicode
from webargs import core
from webargs.compat import basestring
from webargs.multidictproxy import MultiDictProxy
class HTTPError(tornado.web.HTTPError):
"""`tornado.web.HTTPError` that stores validation errors."""
def __init__(self, *args, **kwargs):
self.messages = kwargs.pop("messages", {})
self.headers = kwargs.pop("headers", None)
super(HTTPError, self).__init__(*args, **kwargs)
def is_json_request(req):
content_type = req.headers.get("Content-Type")
return content_type is not None and core.is_json(content_type)
class WebArgsTornadoMultiDictProxy(MultiDictProxy):
"""
Override class for Tornado multidicts, handles argument decoding
requirements.
"""
def __getitem__(self, key):
try:
value = self.data.get(key, core.missing)
if value is core.missing:
return core.missing
elif key in self.multiple_keys:
return [_unicode(v) if isinstance(v, basestring) else v for v in value]
elif value and isinstance(value, (list, tuple)):
value = value[0]
if isinstance(value, basestring):
return _unicode(value)
else:
return value
# based on tornado.web.RequestHandler.decode_argument
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" % (key, value[:40]))
class WebArgsTornadoCookiesMultiDictProxy(MultiDictProxy):
"""
And a special override for cookies because they come back as objects with a
`value` attribute we need to extract.
Also, does not use the `_unicode` decoding step
"""
def __getitem__(self, key):
cookie = self.data.get(key, core.missing)
if cookie is core.missing:
return core.missing
elif key in self.multiple_keys:
return [cookie.value]
else:
return cookie.value
class TornadoParser(core.Parser):
"""Tornado request argument parser."""
def _raw_load_json(self, req):
"""Return a json payload from the request for the core parser's load_json
Checks the input mimetype and may return 'missing' if the mimetype is
non-json, even if the request body is parseable as json."""
if not is_json_request(req):
return core.missing
# request.body may be a concurrent.Future on streaming requests
# this would cause a TypeError if we try to parse it
if isinstance(req.body, tornado.concurrent.Future):
return core.missing
return core.parse_json(req.body)
def load_querystring(self, req, schema):
"""Return query params from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.query_arguments, schema)
def load_form(self, req, schema):
"""Return form values from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.body_arguments, schema)
def load_headers(self, req, schema):
"""Return headers from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.headers, schema)
def load_cookies(self, req, schema):
"""Return cookies from the request as a MultiDictProxy."""
# use the specialized subclass specifically for handling Tornado
# cookies
return WebArgsTornadoCookiesMultiDictProxy(req.cookies, schema)
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
return WebArgsTornadoMultiDictProxy(req.files, schema)
def handle_error(self, error, req, schema, error_status_code, error_headers):
"""Handles errors during parsing. Raises a `tornado.web.HTTPError`
with a 400 error.
"""
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
)
def _handle_invalid_json_error(self, error, req, *args, **kwargs):
raise HTTPError(
400,
log_message="Invalid JSON body.",
reason="Bad Request",
messages={"json": ["Invalid JSON body."]},
)
def get_request_from_view_args(self, view, args, kwargs):
return args[0].request
parser = TornadoParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
| 33.538961
| 87
| 0.651113
|
import tornado.web
import tornado.concurrent
from tornado.escape import _unicode
from webargs import core
from webargs.compat import basestring
from webargs.multidictproxy import MultiDictProxy
class HTTPError(tornado.web.HTTPError):
def __init__(self, *args, **kwargs):
self.messages = kwargs.pop("messages", {})
self.headers = kwargs.pop("headers", None)
super(HTTPError, self).__init__(*args, **kwargs)
def is_json_request(req):
content_type = req.headers.get("Content-Type")
return content_type is not None and core.is_json(content_type)
class WebArgsTornadoMultiDictProxy(MultiDictProxy):
def __getitem__(self, key):
try:
value = self.data.get(key, core.missing)
if value is core.missing:
return core.missing
elif key in self.multiple_keys:
return [_unicode(v) if isinstance(v, basestring) else v for v in value]
elif value and isinstance(value, (list, tuple)):
value = value[0]
if isinstance(value, basestring):
return _unicode(value)
else:
return value
except UnicodeDecodeError:
raise HTTPError(400, "Invalid unicode in %s: %r" % (key, value[:40]))
class WebArgsTornadoCookiesMultiDictProxy(MultiDictProxy):
def __getitem__(self, key):
cookie = self.data.get(key, core.missing)
if cookie is core.missing:
return core.missing
elif key in self.multiple_keys:
return [cookie.value]
else:
return cookie.value
class TornadoParser(core.Parser):
def _raw_load_json(self, req):
if not is_json_request(req):
return core.missing
if isinstance(req.body, tornado.concurrent.Future):
return core.missing
return core.parse_json(req.body)
def load_querystring(self, req, schema):
return WebArgsTornadoMultiDictProxy(req.query_arguments, schema)
def load_form(self, req, schema):
return WebArgsTornadoMultiDictProxy(req.body_arguments, schema)
def load_headers(self, req, schema):
return WebArgsTornadoMultiDictProxy(req.headers, schema)
def load_cookies(self, req, schema):
return WebArgsTornadoCookiesMultiDictProxy(req.cookies, schema)
def load_files(self, req, schema):
return WebArgsTornadoMultiDictProxy(req.files, schema)
def handle_error(self, error, req, schema, error_status_code, error_headers):
status_code = error_status_code or self.DEFAULT_VALIDATION_STATUS
if status_code == 422:
reason = "Unprocessable Entity"
else:
reason = None
raise HTTPError(
status_code,
log_message=str(error.messages),
reason=reason,
messages=error.messages,
headers=error_headers,
)
def _handle_invalid_json_error(self, error, req, *args, **kwargs):
raise HTTPError(
400,
log_message="Invalid JSON body.",
reason="Bad Request",
messages={"json": ["Invalid JSON body."]},
)
def get_request_from_view_args(self, view, args, kwargs):
return args[0].request
parser = TornadoParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
| true
| true
|
f7077401251a66e208db2a6e65da8bcf1a0f9567
| 6,791
|
py
|
Python
|
java/kotlin-extractor/build.py
|
taus-semmle/ql
|
234a36ff61e9b06476c4356f0f7ac160115d60ed
|
[
"MIT"
] | null | null | null |
java/kotlin-extractor/build.py
|
taus-semmle/ql
|
234a36ff61e9b06476c4356f0f7ac160115d60ed
|
[
"MIT"
] | null | null | null |
java/kotlin-extractor/build.py
|
taus-semmle/ql
|
234a36ff61e9b06476c4356f0f7ac160115d60ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import argparse
import kotlin_plugin_versions
import glob
import platform
import re
import subprocess
import shutil
import os
import os.path
import sys
import shlex
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dependencies', default='../../../resources/kotlin-dependencies',
help='Folder containing the dependencies')
parser.add_argument('--many', action='store_true',
help='Build for all versions/kinds')
parser.add_argument('--single', action='store_false',
dest='many', help='Build for a single version/kind')
return parser.parse_args()
args = parse_args()
def is_windows():
'''Whether we appear to be running on Windows'''
if platform.system() == 'Windows':
return True
if platform.system().startswith('CYGWIN'):
return True
return False
kotlinc = 'kotlinc.bat' if is_windows() else 'kotlinc'
javac = 'javac'
kotlin_dependency_folder = args.dependencies
def quote_for_batch(arg):
if ';' in arg or '=' in arg:
if '"' in arg:
raise Exception('Need to quote something containing a quote')
return '"' + arg + '"'
else:
return arg
def run_process(cmd, capture_output=False):
print("Running command: " + shlex.join(cmd))
if is_windows():
cmd = ' '.join(map(quote_for_batch, cmd))
print("Converted to Windows command: " + cmd)
try:
return subprocess.run(cmd, check=True, capture_output=capture_output)
except subprocess.CalledProcessError as e:
print("In: " + os.getcwd(), file=sys.stderr)
shell_cmd = cmd if is_windows() else shlex.join(cmd)
print("Command failed: " + shell_cmd, file=sys.stderr)
if capture_output:
print("stdout output:\n" + e.stdout.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
print("stderr output:\n" + e.stderr.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
raise e
def compile_to_dir(srcs, classpath, java_classpath, output):
# Use kotlinc to compile .kt files:
run_process([kotlinc,
# kotlinc can default to 256M, which isn't enough when we are extracting the build
'-J-Xmx2G',
'-Xopt-in=kotlin.RequiresOptIn',
'-d', output,
'-module-name', 'codeql-kotlin-extractor',
'-no-reflect', '-no-stdlib',
'-jvm-target', '1.8',
'-classpath', classpath] + srcs)
# Use javac to compile .java files, referencing the Kotlin class files:
run_process([javac,
'-d', output,
'-source', '8', '-target', '8',
'-classpath', os.path.pathsep.join([output, classpath, java_classpath])] + [s for s in srcs if s.endswith(".java")])
def compile_to_jar(srcs, classpath, java_classpath, output):
builddir = 'build/classes'
if os.path.exists(builddir):
shutil.rmtree(builddir)
os.makedirs(builddir)
compile_to_dir(srcs, classpath, java_classpath, builddir)
run_process(['jar', 'cf', output,
'-C', builddir, '.',
'-C', 'src/main/resources', 'META-INF'])
shutil.rmtree(builddir)
def find_sources(path):
return glob.glob(path + '/**/*.kt', recursive=True) + glob.glob(path + '/**/*.java', recursive=True)
def get_kotlin_lib_folder():
x = run_process([kotlinc, '-version', '-verbose'], capture_output=True)
output = x.stderr.decode(encoding='UTF-8', errors='strict')
m = re.match(
r'.*\nlogging: using Kotlin home directory ([^\n]+)\n.*', output)
if m is None:
raise Exception('Cannot determine kotlinc home directory')
kotlin_home = m.group(1)
print("Kotlin home directory: " + kotlin_home)
return kotlin_home + '/lib'
def get_gradle_lib_folder():
x = run_process(['gradle', 'getHomeDir'], capture_output=True)
output = x.stdout.decode(encoding='UTF-8', errors='strict')
m = re.search(r'(?m)^> Task :getHomeDir\n([^\n]+)$', output)
if m is None:
print("gradle getHomeDir output:\n" + output, file=sys.stderr)
raise Exception('Cannot determine gradle home directory')
gradle_home = m.group(1)
print("Gradle home directory: " + gradle_home)
return gradle_home + '/lib'
def find_jar(path, pattern):
result = glob.glob(path + '/' + pattern + '*.jar')
if len(result) == 0:
raise Exception('Cannot find jar file %s under path %s' %
(pattern, path))
return result
def patterns_to_classpath(path, patterns):
result = []
for pattern in patterns:
result += find_jar(path, pattern)
return os.path.pathsep.join(result)
def transform_to_embeddable(srcs):
# replace imports in files:
for src in srcs:
with open(src, 'r') as f:
content = f.read()
content = content.replace('import com.intellij',
'import org.jetbrains.kotlin.com.intellij')
with open(src, 'w') as f:
f.write(content)
def compile(jars, java_jars, dependency_folder, transform_to_embeddable, output, tmp_dir, version):
classpath = patterns_to_classpath(dependency_folder, jars)
java_classpath = patterns_to_classpath(dependency_folder, java_jars)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
shutil.copytree('src', tmp_dir)
for v in kotlin_plugin_versions.many_versions:
if v != version:
shutil.rmtree(
tmp_dir + '/main/kotlin/utils/versions/v_' + v.replace('.', '_'))
srcs = find_sources(tmp_dir)
transform_to_embeddable(srcs)
compile_to_jar(srcs, classpath, java_classpath, output)
shutil.rmtree(tmp_dir)
def compile_embeddable(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-embeddable-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
transform_to_embeddable,
'codeql-extractor-kotlin-embeddable-%s.jar' % (version),
'build/temp_src',
version)
def compile_standalone(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
lambda srcs: None,
'codeql-extractor-kotlin-standalone-%s.jar' % (version),
'build/temp_src',
version)
if args.many:
for version in kotlin_plugin_versions.many_versions:
compile_standalone(version)
compile_embeddable(version)
else:
compile_standalone(kotlin_plugin_versions.get_single_version())
| 32.806763
| 133
| 0.621558
|
import argparse
import kotlin_plugin_versions
import glob
import platform
import re
import subprocess
import shutil
import os
import os.path
import sys
import shlex
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--dependencies', default='../../../resources/kotlin-dependencies',
help='Folder containing the dependencies')
parser.add_argument('--many', action='store_true',
help='Build for all versions/kinds')
parser.add_argument('--single', action='store_false',
dest='many', help='Build for a single version/kind')
return parser.parse_args()
args = parse_args()
def is_windows():
if platform.system() == 'Windows':
return True
if platform.system().startswith('CYGWIN'):
return True
return False
kotlinc = 'kotlinc.bat' if is_windows() else 'kotlinc'
javac = 'javac'
kotlin_dependency_folder = args.dependencies
def quote_for_batch(arg):
if ';' in arg or '=' in arg:
if '"' in arg:
raise Exception('Need to quote something containing a quote')
return '"' + arg + '"'
else:
return arg
def run_process(cmd, capture_output=False):
print("Running command: " + shlex.join(cmd))
if is_windows():
cmd = ' '.join(map(quote_for_batch, cmd))
print("Converted to Windows command: " + cmd)
try:
return subprocess.run(cmd, check=True, capture_output=capture_output)
except subprocess.CalledProcessError as e:
print("In: " + os.getcwd(), file=sys.stderr)
shell_cmd = cmd if is_windows() else shlex.join(cmd)
print("Command failed: " + shell_cmd, file=sys.stderr)
if capture_output:
print("stdout output:\n" + e.stdout.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
print("stderr output:\n" + e.stderr.decode(encoding='UTF-8',
errors='replace'), file=sys.stderr)
raise e
def compile_to_dir(srcs, classpath, java_classpath, output):
# Use kotlinc to compile .kt files:
run_process([kotlinc,
# kotlinc can default to 256M, which isn't enough when we are extracting the build
'-J-Xmx2G',
'-Xopt-in=kotlin.RequiresOptIn',
'-d', output,
'-module-name', 'codeql-kotlin-extractor',
'-no-reflect', '-no-stdlib',
'-jvm-target', '1.8',
'-classpath', classpath] + srcs)
# Use javac to compile .java files, referencing the Kotlin class files:
run_process([javac,
'-d', output,
'-source', '8', '-target', '8',
'-classpath', os.path.pathsep.join([output, classpath, java_classpath])] + [s for s in srcs if s.endswith(".java")])
def compile_to_jar(srcs, classpath, java_classpath, output):
builddir = 'build/classes'
if os.path.exists(builddir):
shutil.rmtree(builddir)
os.makedirs(builddir)
compile_to_dir(srcs, classpath, java_classpath, builddir)
run_process(['jar', 'cf', output,
'-C', builddir, '.',
'-C', 'src/main/resources', 'META-INF'])
shutil.rmtree(builddir)
def find_sources(path):
return glob.glob(path + '/**/*.kt', recursive=True) + glob.glob(path + '/**/*.java', recursive=True)
def get_kotlin_lib_folder():
x = run_process([kotlinc, '-version', '-verbose'], capture_output=True)
output = x.stderr.decode(encoding='UTF-8', errors='strict')
m = re.match(
r'.*\nlogging: using Kotlin home directory ([^\n]+)\n.*', output)
if m is None:
raise Exception('Cannot determine kotlinc home directory')
kotlin_home = m.group(1)
print("Kotlin home directory: " + kotlin_home)
return kotlin_home + '/lib'
def get_gradle_lib_folder():
x = run_process(['gradle', 'getHomeDir'], capture_output=True)
output = x.stdout.decode(encoding='UTF-8', errors='strict')
m = re.search(r'(?m)^> Task :getHomeDir\n([^\n]+)$', output)
if m is None:
print("gradle getHomeDir output:\n" + output, file=sys.stderr)
raise Exception('Cannot determine gradle home directory')
gradle_home = m.group(1)
print("Gradle home directory: " + gradle_home)
return gradle_home + '/lib'
def find_jar(path, pattern):
result = glob.glob(path + '/' + pattern + '*.jar')
if len(result) == 0:
raise Exception('Cannot find jar file %s under path %s' %
(pattern, path))
return result
def patterns_to_classpath(path, patterns):
result = []
for pattern in patterns:
result += find_jar(path, pattern)
return os.path.pathsep.join(result)
def transform_to_embeddable(srcs):
# replace imports in files:
for src in srcs:
with open(src, 'r') as f:
content = f.read()
content = content.replace('import com.intellij',
'import org.jetbrains.kotlin.com.intellij')
with open(src, 'w') as f:
f.write(content)
def compile(jars, java_jars, dependency_folder, transform_to_embeddable, output, tmp_dir, version):
classpath = patterns_to_classpath(dependency_folder, jars)
java_classpath = patterns_to_classpath(dependency_folder, java_jars)
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
shutil.copytree('src', tmp_dir)
for v in kotlin_plugin_versions.many_versions:
if v != version:
shutil.rmtree(
tmp_dir + '/main/kotlin/utils/versions/v_' + v.replace('.', '_'))
srcs = find_sources(tmp_dir)
transform_to_embeddable(srcs)
compile_to_jar(srcs, classpath, java_classpath, output)
shutil.rmtree(tmp_dir)
def compile_embeddable(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-embeddable-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
transform_to_embeddable,
'codeql-extractor-kotlin-embeddable-%s.jar' % (version),
'build/temp_src',
version)
def compile_standalone(version):
compile(['kotlin-stdlib-' + version, 'kotlin-compiler-' + version],
['kotlin-stdlib-' + version],
kotlin_dependency_folder,
lambda srcs: None,
'codeql-extractor-kotlin-standalone-%s.jar' % (version),
'build/temp_src',
version)
if args.many:
for version in kotlin_plugin_versions.many_versions:
compile_standalone(version)
compile_embeddable(version)
else:
compile_standalone(kotlin_plugin_versions.get_single_version())
| true
| true
|
f707741208aa7e225a0d5059459340c1fec7afbd
| 8,602
|
py
|
Python
|
tests/strategies/test_ddp_strategy_with_comm_hook.py
|
krfricke/pytorch-lightning
|
fbd887df9d487da4c57d884e01b3401af140b1bc
|
[
"Apache-2.0"
] | 3,469
|
2019-03-31T03:09:16.000Z
|
2020-01-13T15:06:31.000Z
|
tests/strategies/test_ddp_strategy_with_comm_hook.py
|
krfricke/pytorch-lightning
|
fbd887df9d487da4c57d884e01b3401af140b1bc
|
[
"Apache-2.0"
] | 524
|
2019-04-02T12:33:39.000Z
|
2020-01-14T02:53:33.000Z
|
tests/strategies/test_ddp_strategy_with_comm_hook.py
|
krfricke/pytorch-lightning
|
fbd887df9d487da4c57d884e01b3401af140b1bc
|
[
"Apache-2.0"
] | 365
|
2019-04-02T22:14:04.000Z
|
2020-01-13T17:21:54.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if torch.distributed.is_available():
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD
if _TORCH_GREATER_EQUAL_1_10:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
class TestDDPStrategy(DDPStrategy):
def __init__(self, expected_ddp_comm_hook_name, *args, **kwargs):
self.expected_ddp_comm_hook_name = expected_ddp_comm_hook_name
super().__init__(*args, **kwargs)
def teardown(self):
# check here before unwrapping DistributedDataParallel in self.teardown
attached_ddp_comm_hook_name = self.model._get_ddp_logging_data()["comm_hook"]
assert attached_ddp_comm_hook_name == self.expected_ddp_comm_hook_name
return super().teardown()
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_comm_hook(tmpdir):
"""Test for DDP FP16 compress hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_hook.__qualname__,
ddp_comm_hook=default.fp16_compress_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_sgd_comm_hook(tmpdir):
"""Test for DDP FP16 compress hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=powerSGD.powerSGD_hook.__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):
"""Test for DDP FP16 compress wrapper for SGD hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_wrapper(powerSGD.powerSGD_hook).__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
ddp_comm_wrapper=default.fp16_compress_wrapper,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_spawn_fp16_compress_comm_hook(tmpdir):
"""Test for DDP Spawn FP16 compress hook."""
model = BoringModel()
strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.10.0", skip_windows=True, standalone=True)
def test_ddp_post_local_sgd_comm_hook(tmpdir):
"""Test for DDP post-localSGD hook."""
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=post_localSGD.post_localSGD_hook.__qualname__,
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD, model averaging is called."""
model = BoringModel()
# test regular ddp does not call model averaging
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy="ddp",
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
average_parameters_mock.assert_not_called()
# test ddp with post-localSGD does call model averaging
ddp_strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=ddp_strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
)
trainer.fit(model)
average_parameters_mock.assert_called()
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging_value_error(average_parameters_mock, tmpdir):
"""Test that when using DDP with post-localSGD a ValueError is thrown when the optmizer is
ZeroRedundancyOptimizer."""
from torch.distributed.optim import ZeroRedundancyOptimizer
class OptimizerModel(BoringModel):
def configure_optimizers(self):
return ZeroRedundancyOptimizer(params=self.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)
model = OptimizerModel()
strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
with pytest.raises(ValueError, match="Currently model averaging cannot work with a distributed optimizer"):
trainer.fit(model)
average_parameters_mock.assert_not_called()
| 34.825911
| 111
| 0.713439
|
from unittest import mock
import pytest
import torch
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPSpawnStrategy, DDPStrategy
from pytorch_lightning.utilities import _TORCH_GREATER_EQUAL_1_10
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
if torch.distributed.is_available():
from torch.distributed.algorithms.ddp_comm_hooks import default_hooks as default
from torch.distributed.algorithms.ddp_comm_hooks import powerSGD_hook as powerSGD
if _TORCH_GREATER_EQUAL_1_10:
import torch.distributed.algorithms.ddp_comm_hooks.post_localSGD_hook as post_localSGD
class TestDDPStrategy(DDPStrategy):
def __init__(self, expected_ddp_comm_hook_name, *args, **kwargs):
self.expected_ddp_comm_hook_name = expected_ddp_comm_hook_name
super().__init__(*args, **kwargs)
def teardown(self):
attached_ddp_comm_hook_name = self.model._get_ddp_logging_data()["comm_hook"]
assert attached_ddp_comm_hook_name == self.expected_ddp_comm_hook_name
return super().teardown()
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_comm_hook(tmpdir):
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_hook.__qualname__,
ddp_comm_hook=default.fp16_compress_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_sgd_comm_hook(tmpdir):
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=powerSGD.powerSGD_hook.__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_fp16_compress_wrap_sgd_comm_hook(tmpdir):
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=default.fp16_compress_wrapper(powerSGD.powerSGD_hook).__qualname__,
ddp_comm_state=powerSGD.PowerSGDState(process_group=None),
ddp_comm_hook=powerSGD.powerSGD_hook,
ddp_comm_wrapper=default.fp16_compress_wrapper,
)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.9.0", skip_windows=True, standalone=True)
def test_ddp_spawn_fp16_compress_comm_hook(tmpdir):
model = BoringModel()
strategy = DDPSpawnStrategy(ddp_comm_hook=default.fp16_compress_hook)
trainer = Trainer(
max_epochs=1,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
fast_dev_run=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(min_cuda_gpus=2, min_torch="1.10.0", skip_windows=True, standalone=True)
def test_ddp_post_local_sgd_comm_hook(tmpdir):
model = BoringModel()
strategy = TestDDPStrategy(
expected_ddp_comm_hook_name=post_localSGD.post_localSGD_hook.__qualname__,
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
assert trainer.state.finished, f"Training failed with {trainer.state}"
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging(average_parameters_mock, tmpdir):
model = BoringModel()
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy="ddp",
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
trainer.fit(model)
average_parameters_mock.assert_not_called()
ddp_strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=ddp_strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
)
trainer.fit(model)
average_parameters_mock.assert_called()
@RunIf(skip_windows=True, min_torch="1.10.0", min_cuda_gpus=2, standalone=True)
@mock.patch("torch.distributed.algorithms.model_averaging.averagers.PeriodicModelAverager.average_parameters")
def test_post_local_sgd_model_averaging_value_error(average_parameters_mock, tmpdir):
from torch.distributed.optim import ZeroRedundancyOptimizer
class OptimizerModel(BoringModel):
def configure_optimizers(self):
return ZeroRedundancyOptimizer(params=self.parameters(), optimizer_class=torch.optim.Adam, lr=0.01)
model = OptimizerModel()
strategy = DDPStrategy(
ddp_comm_state=post_localSGD.PostLocalSGDState(
process_group=None,
subgroup=None,
start_localSGD_iter=8,
),
ddp_comm_hook=post_localSGD.post_localSGD_hook,
model_averaging_period=4,
)
trainer = Trainer(
fast_dev_run=True,
accelerator="gpu",
devices=2,
strategy=strategy,
default_root_dir=tmpdir,
sync_batchnorm=True,
enable_progress_bar=False,
enable_model_summary=False,
)
with pytest.raises(ValueError, match="Currently model averaging cannot work with a distributed optimizer"):
trainer.fit(model)
average_parameters_mock.assert_not_called()
| true
| true
|
f7077532ef595e987257cd5055e8eba48e9aa978
| 26,132
|
py
|
Python
|
alphagradient/utils.py
|
nathanheidacker/AlphaGradient
|
cf031058f3e91381575e2df44cc029bcc7f4cc73
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
alphagradient/utils.py
|
nathanheidacker/AlphaGradient
|
cf031058f3e91381575e2df44cc029bcc7f4cc73
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
alphagradient/utils.py
|
nathanheidacker/AlphaGradient
|
cf031058f3e91381575e2df44cc029bcc7f4cc73
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Standard utility functions used throughout AlphaGradient"""
# Standard Imports
from __future__ import annotations
from abc import ABC, abstractmethod
import builtins
from datetime import (
date,
datetime,
time,
timedelta,
)
import math
from pathlib import Path
# Third Party Imports
import numpy as np
import pandas as pd
# Typing
from typing import (
TYPE_CHECKING,
Any,
Literal,
Generator,
Generic,
Iterable,
Optional,
TypeVar,
Union,
)
T = TypeVar("T")
class PropertyType(Generic[T]):
"""A Type class for property objects themselves, before being bound to a class instance"""
def fget(self, *args: Any) -> T:
...
Property = builtins.property
"""A Type for builtin properties that have been bound to a class instance"""
PyNumber = Union[int, float]
"""Numeric type that does not include complex numbers (only native python types)"""
Number = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]
"""Numeric type that does not include complex numbers"""
DatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]
"""Objects convertable to python datetimes"""
TimeLike = Union[time, str]
"""Objects convertable to python time objects"""
DateOrTime = Union[DatetimeLike, time]
"""Objects that are either DatetimeLike or TimeLike in nature"""
if TYPE_CHECKING:
from typeshed import SupportsLessThanT as SLTT
_global_persistent_path: PropertyType[Path]
def auto_batch(iterable: Iterable) -> Generator:
"""
Returns a generator which yields automatically sized batches
Given a sized iterable, determines an optimal batch size to be used for
multiprocessing purposes. Using this batch size, returns a generator which
yields batches of the iterable with the optimal size
Parameters:
iterable: An iterable from which to create a batch generator
Returns:
The batch generator of the iterable input
"""
return get_batches(iterable, auto_batch_size(iterable))
def auto_batch_size(iterable: Iterable) -> int:
"""
Returns a multiprocessing-optimal batch size for an iterable
Given an iterable, returns an integer value representing an optimal batch
size for use in python's multiprocessing library
Parameters:
iterable (Iterable): Sized iterable to determine optimal batch size for
Returns:
The optimal batch size for multiprocessing
"""
# Converting to a sized iterable to guarantee __len__ functionality
iterable = list(iterable)
# Output Parameters
horizontal_offset = 10000
horizontal_stretch = 70 / 100_000_000
vertical_offset = 100
# Building the quadratic
output: Number
output = len(iterable) - horizontal_offset
output = output**2
output *= -1
output *= horizontal_stretch
output += vertical_offset
# Output bounded between 30 and 100
return bounded(int(output), lower=30, upper=100)
def bounded(
to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None
) -> SLTT:
"""
Bounds an object between a lower and upper bound
Given an object that defines behavior for comparison (__lt__, __gt__),
returns the object bounded between the lower and upper bounds. Boundaries
will be ommited if they are not provided (None). If lower and upper are not
None, they must be of the same type as to_bound.
Type Explanation:
SLTT (SupportsLessThanT): A TypeVar which implements the __lt__ method.
Parameters:
to_bound (SLTT): the object to be bounded
lower (Optional[SLTT]): the lower boundary of the operation
upper (Optional[SLTT]): the upper boundary of the operation
Returns:
The bounded object
"""
if lower is None and upper is None:
raise ValueError(
"Of the parameters 'lower' and 'upper', at least one must be" "specified"
)
if lower:
to_bound = max(to_bound, lower)
if upper:
to_bound = min(to_bound, upper)
return to_bound
def deconstruct_dt(dt: DateOrTime) -> dict[str, float]:
"""
Returns a dictionary of datetime attribute values on object 'dt'
Given a DatetimeLike object, returns a dictionary where keys are the
object's date and time related attribute names, and values are the object's
associated attribute values.
Parameters:
dt (DateOrTime): the dt to deconstruct
Returns:
A dictionary of attributes and their associated values on dt
Raises:
TypeError: Raised if dt is not a datetime-like object, as it wont have
the proper attributes.
"""
# The potential attributes to be accessed
d = ["year", "month", "day"]
t = ["hour", "minute", "second", "microsecond"]
attrs = []
# Accept string arguments to convert to datetime
if isinstance(dt, str):
dt = read_timestring(dt)
# Determine which elements should be accessed on the dt
if isinstance(dt, datetime):
attrs = d + t
elif isinstance(dt, time):
attrs = t
elif isinstance(dt, date):
attrs = d
else:
raise TypeError(f"{dt=} is not a valid datetime object")
# Collecting the attributes
dtdict = {}
for attr in attrs:
dtdict[attr] = getattr(dt, attr)
return dtdict
def get_batches(iterable: Iterable, size: int = 100) -> Generator:
"""
Returns a generator of the iterable which yields batches of the given size
Given an iterable, uses the size parameter to create a generator which
yields batches of the iterable of the given size.
Parameter:
iterable: The iterable to yield batches of
size: The batch size of the returned generator
Returns:
A generator which yields batches of size 'size' of the iterable
"""
# Because we will be indexing the iterable, we must instantiate the entire
# thing in memory in case it isnt (ie generators)
iterable = list(iterable)
last = len(iterable)
for i in range(math.ceil(last / size)):
start = i * size
end = start + size
end = end if end < last else last
yield iterable[start:end]
def get_time(t: DateOrTime) -> time:
"""
Given a timestring or datetime-like object, returns a datetime.time object
Given an object t which represents a time or a datetime, returns a native
python datetime.time object of the appropriate time. t can be an isoformat
time string or datetime string, or a datetime-like object
Parameters:
dt (DateOrTime): The time object to convert
Returns:
The converted datetime.time object
"""
if isinstance(t, (time, str)):
return to_time(t)
return to_datetime(t).time()
def get_weekday(dt: DatetimeLike) -> str:
"""
Returns the day of the week on which a DatetimeLike object falls
Parameters:
dt (DatetimeLike): The object whose weekday is determined
Returns:
String of the day of the week on which the DatetimeLike object falls
"""
weekdays = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
}
return weekdays[to_datetime(dt).weekday()]
def is_func(f: Any) -> bool:
"""
Returns a boolean value indicating whether or not f is a kind of function
Given an object f, returns a boolean value indicating whether or not the
object is a function. Idenfities all python objects whose sole or primary
purpose is to be called directly, rather than objects that simply support
an implementation of __call__.
Behavior is slightly different than the inspect module's isfunction(), as it
includes methods (bound and unbound), as well as abstract, static, and class
methods.
A 'function' is an instance of any of the following:
* function
* method (bound or unbound)
* staticmethod
* classmethod
* abstractmethod
* lambda
* built-in-function
Parameters:
f: The object who's status as a function is being determined
Returns:
True if f is a method, function, builtin-method-or-function, or lambda,
else False
"""
# Fake class to access type 'method' and 'classmethod'
class C:
def method(self):
pass
# Getting abstract base methods
class ABCC(ABC):
@abstractmethod
def amethod(self):
pass
# Fake function to access type 'function'
def func():
pass
# Getting classic and static methods
cmethod = classmethod(func)
smethod = staticmethod(func)
# Fake lambda to access type 'lambda'
lamb = lambda: None
# Fake instance to access type 'bound method'
c = C()
# Gathering all callable types
functype = type(func)
methodtype = type(C.method)
classmethodtype = type(cmethod)
staticmethodtype = type(smethod)
abstractmethodtype = type(ABCC.amethod)
boundmethodtype = type(c.method)
lambdatype = type(lamb)
builtintype = type(print)
return isinstance(
f,
(
functype,
methodtype,
boundmethodtype,
lambdatype,
builtintype,
abstractmethodtype,
classmethodtype,
staticmethodtype,
),
)
def nearest_expiry(
expiry: DatetimeLike, method: Literal["after", "before", "both"] = "after"
) -> datetime:
"""
Returns the nearest valid expiry to the input datetime object
Determining expiries for options contracts can be difficult, because they
must fall on a business day, and their expiry time must be the market close.
Given an expiry whose validity is unknown, this function returns the
nearest expiry that is guaranteed to be valid. If the given expiry is
valid, it will be unchanged when it is returned.
The method argument is used to determine how the 'nearest' is defined. It
has three options: "after", "before", and "both"
Method must be one of the following string literals:
* "after": returns the nearest expiry that is AFTER the input expiry
* "before": returns the nearest expiry that is BEFORE the input expiry.
* | "both": compares the distances of the nearest before and after, and
| return the smaller of the two. In the case that they are equal, the
| date determined by "after" will be used.
The default argument is "after" because using "before" or "both" can
potentially lead to dangerous behavior for algorithms, as it can return an
expiry which is before the current date of the algorithm. This can cause
options contracts to initialize as expired. Only change the method
argument if you are positive that the returned expiry will be greater
than the algorithm's current date.
Parameters:
expiry (DatetimeLike):
The expiry who's closest valid expiry will be determined
method:
One of "after", "before", or "both"
Returns:
The nearest valid expiry
"""
# Ensuring expiry is a pydatetime
expiry = to_datetime(expiry)
# All expiries must expire at market close (4PM)
expiry = set_time(expiry, "4:00 PM")
# Change the expiry day if it is not a weekday
if expiry.weekday() > 4:
# Closest AFTER
if method == "after":
dist = 7 - expiry.weekday()
expiry += timedelta(days=dist)
# Closest BEFORE
elif method == "before":
dist = expiry.weekday() - 4
expiry -= timedelta(days=dist)
# Comparing both
elif method == "both":
bdist = expiry.weekday() - 4
adist = 7 - expiry.weekday()
if bdist < adist:
expiry -= timedelta(days=bdist)
else:
expiry += timedelta(days=adist)
return expiry
def optimal_start(
start: datetime,
max_start: datetime,
min_end: datetime,
end: Optional[DatetimeLike] = None,
t: Optional[TimeLike] = None,
) -> datetime:
"""
Based an Environment's instantiated/tracked assets, returns an optimal datetime
for starting a backtest
Returns a backtest starting datetime that:
* Is guaranteed to be within the date range of all intantiated assets
* | Is guaranteed to have ample time for calculations of historical
| volatility, beta, percent change etc. BEFORE the start date
* Automatically adjusts to accomodate shorter ending periods
Parameters:
start:
A datetime object indictating the actual starting datetime
max_start:
A datetime object indicating the maximum possible starting datetime
min_end:
A datetime object indicating the minimum possible ending datetime
end (Optional[DatetimeLike]):
The desired endpoint on which to base the optimal start point
t (Optional[TimeLike]):
The returned optimal start's time
Returns:
The optimal starting datetime
"""
end = min_end if end is None else to_datetime(end)
# If the maximum start date is before the minimum end date, there is
# no valid 'optimal start', because there is no date range that allows
# backtesting of all available data.
if max_start >= end:
return start
# Determining the optimal start period. To avoid errors, we will not sync to the beginning
optimal_delta = (end - max_start) / 2
optimal_date = max_start + optimal_delta
# Setting the optimal date's time to market open unless specified otherwise
t = "00:00:00" if t is None else to_time(t)
set_time(optimal_date, t)
# Bounding the date to acceptable minimums and maximums
lower_bound = set_time(max_start + timedelta(days=1), t)
upper_bound = set_time(max_start + timedelta(days=365), t)
optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)
return optimal_start
def progress_print(to_print: Any, last: list[int] = [0]) -> None:
"""Prints, but returns the carriage to the front of the last print"""
print("\r" + (" " * last[0]), end="\r", flush=True) # type: ignore[operator]
print(to_print, end="", flush=True)
last[0] = len(str(to_print))
def read_timestring(timestring: str) -> time:
"""
Given a timestring, returns a datetime.time object representative of the time
This function reads in 'timestrings', which are one of two things:
#. | Isoformat times as strings, using 24 hours
| (eg 04:00:00, 18:30, 02:59:59.99, etc)
#. | Strings based on 12 hour clocks
| (see ag.utils.read_twelve_hour_timestring docs)
Using this timestring, returns a python datetime.time object corresponding
to the time in the timestring. if dtype is set to dict, a deconstructed
datetime attr dictionary will instead be returned. For more info on
dtdicts, read the docs for ag.utils.deconstruct_dt
Parameters:
timestring:
string representing the time
dtype:
The type of data to return
Returns:
The time or dict object corresponding to the time in the timestring
"""
try:
return read_twelve_hour_timestring(timestring)
except (TypeError, ValueError) as e:
return time.fromisoformat(timestring)
def read_twelve_hour_timestring(timestring: str) -> time:
"""Reads a timestring based on a 12 hour clock and returns a time
Given a timestring representing a time on a 12 hour clock, returns the
appropriate time object
Must be formatted as follows:
* hour | This is the only required value, integer
* minute | separated from hour by a colon, optional, integer
* second | separated from minute by a colon, optional, float
* AM/PM | string 'AM' or 'PM', separated from second by a space
When AM or PM is not provided in the timestring, AM will be assumed.
Valid Examples:
* '4:30 PM'
* '4:30 AM'
* '1 PM'
* '1'
* '11:59:59.999 PM'
* '12:00:00 AM'
Invalid Examples:
* '0:00'
* '13:30'
* '103 PM'
* '0'
* '22'
* '4:30:99 PM'
* '3:99 PM'
Parameters:
timestring: The string containing the time to convert to a time object
Returns:
The corresponding time object
Raises:
TypeError:
When timestring is not a string. Only str objects can be parsed
ValueError:
When the timetring is invalid / improperly formatted.
"""
# Timestrings must be strs
if not isinstance(timestring, str):
raise TypeError(f"timestring must be a string, got {type(timestring)}")
# Variable Initialization
ampm = "AM"
info = []
timestring = timestring.split(" ") # type: ignore[assignment]
# Getting AM/PM component
if len(timestring) > 1:
ampm = timestring[1]
# Getting individual time components
info = timestring[0].split(":")
# isoformat is 00:00:00.00, max 3 colons
if len(info) > 4:
raise ValueError(f"Failed to parse timestring {timestring}")
# collecting the attributes necessary to create a time object
tdict = {}
attrs = ["hour", "minute", "second", "microsecond"]
for attr, value in zip(attrs, info):
tdict[attr] = int(value)
# Setting missing components to 0
for attr in attrs:
if not tdict.get(attr):
tdict[attr] = 0
# hours less and 1 and more than 12 are off limits in 12 hour clocks
if not 1 <= tdict["hour"] <= 12:
raise ValueError(f"Failed to parse timestring {timestring}")
# 12:30 AM is 00:30 isoformat
if ampm == "AM" and tdict["hour"] == 12:
tdict["hour"] == 0
# 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat
elif ampm == "PM" and tdict["hour"] < 12:
tdict["hour"] += 12
# Building and returning a time object
return time(**tdict) # type: ignore[arg-type]
def set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:
"""Sets the given datetime-like object to the given time
Given a DatetimeLike object 'dt' and a time-like object 't', returns a
datetime like object that shares the date of dt and the time of t.
Very similar to datetime.combine, but accepts datetime objects for both
inputs.
Parameters:
dt (DatetimeLike): Datetime to convert
t (DateOrTime): Time to convert to
Returns:
python datetime.datetime object with converted time
"""
# Initializing the new time that will be set
newtime: dict[str, float] = {}
# Reading the necessary time attributes
if isinstance(t, str):
t = read_timestring(t)
newtime = deconstruct_dt(t)
elif isinstance(t, time):
newtime = deconstruct_dt(t)
else:
newtime = deconstruct_dt(to_datetime(t).time())
# Creating the new datetime with t=t
return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]
def timestring(t: DateOrTime) -> str:
"""Converts a time-like object to a 12-hour-clock timestring
Given a time-like object t, returns a timestring represented by the
12-hour-clock (eg. 4:30 PM).
Parameters:
t (DateOrTime):
date or time object to read into a 12-hour-clock-based timestring
Returns:
A string representing the time on a 12-hour-clock
"""
# Ensuring that t is a time object
if not isinstance(t, time):
t = to_datetime(t).time()
# Deconstructing components to create a time string
ampm = "AM"
hour = t.hour
minute = t.minute if t.minute > 9 else f"0{t.minute}"
if hour > 12:
ampm = "PM"
hour -= 12
return f"{hour}:{minute} {ampm}"
def to_datetime(dtlike: DatetimeLike) -> datetime:
"""
Given a datetime-like object, converts it to a python standard datetime
Parameters:
dtlike (DatetimeLike):
The Datetime-convertable object
Returns:
The converted python datetime
Raises:
TypeError: Only accepts python-datetime-convertable objects
"""
if isinstance(dtlike, datetime):
return dtlike
elif isinstance(dtlike, pd.Timestamp):
return dtlike.to_pydatetime()
elif isinstance(dtlike, np.datetime64):
return pd.Timestamp(dtlike).to_pydatetime()
elif isinstance(dtlike, date):
return datetime.combine(dtlike, datetime.min.time())
elif isinstance(dtlike, str):
return datetime.fromisoformat(dtlike)
raise TypeError(f"Can not convert passed object {dtlike} to python datetime")
def to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:
"""
Converts an ambiguous delta object to a python timedelta
Given an amiguous object which can in some way be interpreted as a timedelta
relative to some 'current' time, converts that object to an appropriate
timedelta object, or 'step' in time.
Parameters:
current:
The 'current' time, which determines how to interpret the delta
delta (Union[DateOrTime, timedelta, float]);
The object being passed that may represent a 'step' in time
Returns:
the appropriate timedelta 'step'
Raises:
TypeError:
When passed a type that can not be coerced/interpreted
ValueError:
When a type-appropriate object can not be coerced, or is in some way
invalid (eg. the step in time is BEFORE the current time)
"""
# Multiple parses must be made on strings to successfully coerce all of them
if isinstance(delta, str):
try:
delta = set_time(current, read_timestring(delta))
except ValueError:
delta = datetime.fromisoformat(delta) # type: ignore[arg-type]
elif isinstance(delta, time):
delta = set_time(current, delta)
elif isinstance(delta, (float, int)):
delta = current + timedelta(days=delta)
elif isinstance(delta, timedelta):
delta = current + delta
# if isinstance(delta, DatetimeLike):
else:
delta = to_datetime(delta)
if delta > current:
return delta - current
raise ValueError(
f"Passed delta {delta} is prior to current time {current}. Please "
"choose a time AFTER the current date."
)
def to_time(tlike: TimeLike) -> time:
"""
Given a TimeLike object, converts it to a python standard time object
Parameters:
tlike:
The time-convertable object
Returns:
The converted python time object
Raises:
TypeError: Only accepts python-time-convertable objects
"""
if isinstance(tlike, str):
return read_timestring(tlike)
elif isinstance(tlike, time):
return tlike
raise TypeError(f"Can not convert passed object {tlike} to python time")
class NullClass:
"""
A class designed to take the place of other functions, modules, or classes
This class stands in place of a function, class, or module attached to
another class as an attribute. When an attribute is initialized as a
NullClass, one can safely access it as an attribute, call it, and access
attributes on it. These actions can also be performed recursively; any of
these operations performed on the nullclass will simply return itself,
allowing them to be chained infinitely.
Use this class in place of another function or class in order to safely
use an attribute without making constant checks.
This is most useful in place of functions/classes that perform
logging/printing, but also makes sense in place of functions that modify
things in place or always return None.
Examples:
.. highlight:: python
.. code-block:: python
class MyClass:
def __init__(self, data, verbose=False):
# This is cleaner and more pythonic than...
self.print = print if verbose else NullClass()
self.print("Initialized as Verbose!")
# Alternative 1
self.print = print if verbose else lambda *args, **kwargs: None
self.print("Initialized as Verbose!")
# Alternative 2
self.print = print if print is verbose else None
if self.print is not None:
self.print("Initialized as Verbose!")
# Alternative 3
self.verbose = verbose
if self.verbose:
print("Initialized as Verbose!")
# etc etc etc...
# This is cleaner and more pythonic than...
self.tqdm = tqdm.progress_bar if verbose else NullClass()
with self.tqdm(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1) # Safe!
# Alternative
self.verbose = verbose
if verbose:
with tqdm.progress_bar(total=1000) as pbar:
while condition:
self.do_something()
pbar.update(1)
else:
while condition:
self.do_something() # gross.
"""
def __call__(self, *args: Any, **kwargs: Any) -> NullClass:
return self
def __getattr__(self, attr: str) -> NullClass:
return self
def __enter__(self, *args, **kwargs) -> NullClass:
return self
def __exit__(self, *args, **kwargs) -> None:
pass
def __bool__(self) -> bool:
return False
| 30.85242
| 94
| 0.641321
|
from __future__ import annotations
from abc import ABC, abstractmethod
import builtins
from datetime import (
date,
datetime,
time,
timedelta,
)
import math
from pathlib import Path
import numpy as np
import pandas as pd
from typing import (
TYPE_CHECKING,
Any,
Literal,
Generator,
Generic,
Iterable,
Optional,
TypeVar,
Union,
)
T = TypeVar("T")
class PropertyType(Generic[T]):
def fget(self, *args: Any) -> T:
...
Property = builtins.property
PyNumber = Union[int, float]
Number = Union[PyNumber, np.number, pd.core.arrays.numeric.NumericDtype]
DatetimeLike = Union[pd.Timestamp, np.datetime64, date, datetime, str]
TimeLike = Union[time, str]
DateOrTime = Union[DatetimeLike, time]
if TYPE_CHECKING:
from typeshed import SupportsLessThanT as SLTT
_global_persistent_path: PropertyType[Path]
def auto_batch(iterable: Iterable) -> Generator:
return get_batches(iterable, auto_batch_size(iterable))
def auto_batch_size(iterable: Iterable) -> int:
iterable = list(iterable)
horizontal_offset = 10000
horizontal_stretch = 70 / 100_000_000
vertical_offset = 100
output: Number
output = len(iterable) - horizontal_offset
output = output**2
output *= -1
output *= horizontal_stretch
output += vertical_offset
return bounded(int(output), lower=30, upper=100)
def bounded(
to_bound: SLTT, lower: Optional[SLTT] = None, upper: Optional[SLTT] = None
) -> SLTT:
if lower is None and upper is None:
raise ValueError(
"Of the parameters 'lower' and 'upper', at least one must be" "specified"
)
if lower:
to_bound = max(to_bound, lower)
if upper:
to_bound = min(to_bound, upper)
return to_bound
def deconstruct_dt(dt: DateOrTime) -> dict[str, float]:
d = ["year", "month", "day"]
t = ["hour", "minute", "second", "microsecond"]
attrs = []
if isinstance(dt, str):
dt = read_timestring(dt)
if isinstance(dt, datetime):
attrs = d + t
elif isinstance(dt, time):
attrs = t
elif isinstance(dt, date):
attrs = d
else:
raise TypeError(f"{dt=} is not a valid datetime object")
dtdict = {}
for attr in attrs:
dtdict[attr] = getattr(dt, attr)
return dtdict
def get_batches(iterable: Iterable, size: int = 100) -> Generator:
iterable = list(iterable)
last = len(iterable)
for i in range(math.ceil(last / size)):
start = i * size
end = start + size
end = end if end < last else last
yield iterable[start:end]
def get_time(t: DateOrTime) -> time:
if isinstance(t, (time, str)):
return to_time(t)
return to_datetime(t).time()
def get_weekday(dt: DatetimeLike) -> str:
weekdays = {
0: "Monday",
1: "Tuesday",
2: "Wednesday",
3: "Thursday",
4: "Friday",
5: "Saturday",
6: "Sunday",
}
return weekdays[to_datetime(dt).weekday()]
def is_func(f: Any) -> bool:
class C:
def method(self):
pass
class ABCC(ABC):
@abstractmethod
def amethod(self):
pass
def func():
pass
cmethod = classmethod(func)
smethod = staticmethod(func)
lamb = lambda: None
c = C()
functype = type(func)
methodtype = type(C.method)
classmethodtype = type(cmethod)
staticmethodtype = type(smethod)
abstractmethodtype = type(ABCC.amethod)
boundmethodtype = type(c.method)
lambdatype = type(lamb)
builtintype = type(print)
return isinstance(
f,
(
functype,
methodtype,
boundmethodtype,
lambdatype,
builtintype,
abstractmethodtype,
classmethodtype,
staticmethodtype,
),
)
def nearest_expiry(
expiry: DatetimeLike, method: Literal["after", "before", "both"] = "after"
) -> datetime:
expiry = to_datetime(expiry)
expiry = set_time(expiry, "4:00 PM")
if expiry.weekday() > 4:
if method == "after":
dist = 7 - expiry.weekday()
expiry += timedelta(days=dist)
elif method == "before":
dist = expiry.weekday() - 4
expiry -= timedelta(days=dist)
elif method == "both":
bdist = expiry.weekday() - 4
adist = 7 - expiry.weekday()
if bdist < adist:
expiry -= timedelta(days=bdist)
else:
expiry += timedelta(days=adist)
return expiry
def optimal_start(
start: datetime,
max_start: datetime,
min_end: datetime,
end: Optional[DatetimeLike] = None,
t: Optional[TimeLike] = None,
) -> datetime:
end = min_end if end is None else to_datetime(end)
if max_start >= end:
return start
optimal_delta = (end - max_start) / 2
optimal_date = max_start + optimal_delta
t = "00:00:00" if t is None else to_time(t)
set_time(optimal_date, t)
# Bounding the date to acceptable minimums and maximums
lower_bound = set_time(max_start + timedelta(days=1), t)
upper_bound = set_time(max_start + timedelta(days=365), t)
optimal_start = bounded(optimal_date, lower=lower_bound, upper=upper_bound)
return optimal_start
def progress_print(to_print: Any, last: list[int] = [0]) -> None:
print("\r" + (" " * last[0]), end="\r", flush=True) # type: ignore[operator]
print(to_print, end="", flush=True)
last[0] = len(str(to_print))
def read_timestring(timestring: str) -> time:
try:
return read_twelve_hour_timestring(timestring)
except (TypeError, ValueError) as e:
return time.fromisoformat(timestring)
def read_twelve_hour_timestring(timestring: str) -> time:
# Timestrings must be strs
if not isinstance(timestring, str):
raise TypeError(f"timestring must be a string, got {type(timestring)}")
# Variable Initialization
ampm = "AM"
info = []
timestring = timestring.split(" ") # type: ignore[assignment]
# Getting AM/PM component
if len(timestring) > 1:
ampm = timestring[1]
# Getting individual time components
info = timestring[0].split(":")
# isoformat is 00:00:00.00, max 3 colons
if len(info) > 4:
raise ValueError(f"Failed to parse timestring {timestring}")
# collecting the attributes necessary to create a time object
tdict = {}
attrs = ["hour", "minute", "second", "microsecond"]
for attr, value in zip(attrs, info):
tdict[attr] = int(value)
# Setting missing components to 0
for attr in attrs:
if not tdict.get(attr):
tdict[attr] = 0
# hours less and 1 and more than 12 are off limits in 12 hour clocks
if not 1 <= tdict["hour"] <= 12:
raise ValueError(f"Failed to parse timestring {timestring}")
# 12:30 AM is 00:30 isoformat
if ampm == "AM" and tdict["hour"] == 12:
tdict["hour"] == 0
# 12:30 PM is 12:30 isoformat, 1:30 PM is 13:30 isoformat
elif ampm == "PM" and tdict["hour"] < 12:
tdict["hour"] += 12
# Building and returning a time object
return time(**tdict) # type: ignore[arg-type]
def set_time(dt: DatetimeLike, t: DateOrTime) -> datetime:
# Initializing the new time that will be set
newtime: dict[str, float] = {}
# Reading the necessary time attributes
if isinstance(t, str):
t = read_timestring(t)
newtime = deconstruct_dt(t)
elif isinstance(t, time):
newtime = deconstruct_dt(t)
else:
newtime = deconstruct_dt(to_datetime(t).time())
# Creating the new datetime with t=t
return to_datetime(dt).replace(**newtime) # type: ignore [arg-type]
def timestring(t: DateOrTime) -> str:
# Ensuring that t is a time object
if not isinstance(t, time):
t = to_datetime(t).time()
# Deconstructing components to create a time string
ampm = "AM"
hour = t.hour
minute = t.minute if t.minute > 9 else f"0{t.minute}"
if hour > 12:
ampm = "PM"
hour -= 12
return f"{hour}:{minute} {ampm}"
def to_datetime(dtlike: DatetimeLike) -> datetime:
if isinstance(dtlike, datetime):
return dtlike
elif isinstance(dtlike, pd.Timestamp):
return dtlike.to_pydatetime()
elif isinstance(dtlike, np.datetime64):
return pd.Timestamp(dtlike).to_pydatetime()
elif isinstance(dtlike, date):
return datetime.combine(dtlike, datetime.min.time())
elif isinstance(dtlike, str):
return datetime.fromisoformat(dtlike)
raise TypeError(f"Can not convert passed object {dtlike} to python datetime")
def to_step(current: datetime, delta: Union[DateOrTime, timedelta, float]) -> timedelta:
# Multiple parses must be made on strings to successfully coerce all of them
if isinstance(delta, str):
try:
delta = set_time(current, read_timestring(delta))
except ValueError:
delta = datetime.fromisoformat(delta) # type: ignore[arg-type]
elif isinstance(delta, time):
delta = set_time(current, delta)
elif isinstance(delta, (float, int)):
delta = current + timedelta(days=delta)
elif isinstance(delta, timedelta):
delta = current + delta
# if isinstance(delta, DatetimeLike):
else:
delta = to_datetime(delta)
if delta > current:
return delta - current
raise ValueError(
f"Passed delta {delta} is prior to current time {current}. Please "
"choose a time AFTER the current date."
)
def to_time(tlike: TimeLike) -> time:
if isinstance(tlike, str):
return read_timestring(tlike)
elif isinstance(tlike, time):
return tlike
raise TypeError(f"Can not convert passed object {tlike} to python time")
class NullClass:
def __call__(self, *args: Any, **kwargs: Any) -> NullClass:
return self
def __getattr__(self, attr: str) -> NullClass:
return self
def __enter__(self, *args, **kwargs) -> NullClass:
return self
def __exit__(self, *args, **kwargs) -> None:
pass
def __bool__(self) -> bool:
return False
| true
| true
|
f7077629f4047cd4624eb6867fe46fb9a5aa1e11
| 420
|
py
|
Python
|
core/wsgi.py
|
diegolinkk/praise_songs_control
|
8bfb1234cfd2f6cce977dbebed5bf939479cdcbf
|
[
"MIT"
] | null | null | null |
core/wsgi.py
|
diegolinkk/praise_songs_control
|
8bfb1234cfd2f6cce977dbebed5bf939479cdcbf
|
[
"MIT"
] | 3
|
2021-03-18T22:31:55.000Z
|
2021-09-22T18:21:36.000Z
|
core/wsgi.py
|
diegolinkk/praise_songs_control
|
8bfb1234cfd2f6cce977dbebed5bf939479cdcbf
|
[
"MIT"
] | null | null | null |
"""
WSGI config for core project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = Cling(get_wsgi_application())
| 23.333333
| 78
| 0.785714
|
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'core.settings')
application = Cling(get_wsgi_application())
| true
| true
|
f7077667b8e2b3f0de93363073e81d69a01b08d9
| 1,237
|
py
|
Python
|
assets/data_and_scripts/Parsing/write_records_cancer-expressed_2.py
|
GTPB/PPB18
|
fe19c9b8c6b91dce22c554bc1fa48c97834f8a87
|
[
"CC-BY-4.0"
] | 4
|
2020-01-14T18:23:24.000Z
|
2021-07-30T10:15:10.000Z
|
assets/data_and_scripts/Parsing/write_records_cancer-expressed_2.py
|
GTPB/PPB18
|
fe19c9b8c6b91dce22c554bc1fa48c97834f8a87
|
[
"CC-BY-4.0"
] | null | null | null |
assets/data_and_scripts/Parsing/write_records_cancer-expressed_2.py
|
GTPB/PPB18
|
fe19c9b8c6b91dce22c554bc1fa48c97834f8a87
|
[
"CC-BY-4.0"
] | 1
|
2021-07-30T10:11:35.000Z
|
2021-07-30T10:11:35.000Z
|
'''
Parsing - Exercise 16
The script reads a multiple sequence file in FASTA format and
only write to a new file the records the Uniprot ACs of which
are present in the list created in Exercise 14).
This version of the script collects the header and the sequence
separately, in case you wanted to manipulate them.
'''
# We need two input files
cancer_file = open('cancer-expressed.txt')
human_fasta = open('SwissProt-Human.fasta')
Outfile = open('cancer-expressed_records.fasta','w')
# Create the list from cancer-expressed.txt
cancer_list = []
for line in cancer_file:
AC = line.strip()
cancer_list.append(AC)
# Read the FASTA input and check if ACs are in cancer_list
seq = ""
for line in human_fasta:
# Take the first record into account
if line[0] == '>' and seq == '':
header = line
AC = line.split('|')[1]
elif line[0] != '>':
seq = seq + line
elif line[0] == '>' and seq != '':
if AC in cancer_list:
Outfile.write(header + seq)
# Re-initialise variables for the next record
header = line
AC = line.split('|')[1]
seq = ''
# Take the last record into account
if AC in cancer_list:
Outfile.write(header + seq)
Outfile.close()
| 25.770833
| 63
| 0.65966
|
cancer_file = open('cancer-expressed.txt')
human_fasta = open('SwissProt-Human.fasta')
Outfile = open('cancer-expressed_records.fasta','w')
cancer_list = []
for line in cancer_file:
AC = line.strip()
cancer_list.append(AC)
seq = ""
for line in human_fasta:
if line[0] == '>' and seq == '':
header = line
AC = line.split('|')[1]
elif line[0] != '>':
seq = seq + line
elif line[0] == '>' and seq != '':
if AC in cancer_list:
Outfile.write(header + seq)
header = line
AC = line.split('|')[1]
seq = ''
if AC in cancer_list:
Outfile.write(header + seq)
Outfile.close()
| true
| true
|
f70777f09a9a9f21addb5306d2a17be6ccea84c0
| 1,699
|
py
|
Python
|
gym_wmgds/envs/mujoco/ant.py
|
ozcell/gym_wmgds_ma
|
c2cb22943913361947216b908d50decc46616e99
|
[
"Python-2.0",
"OLDAP-2.7"
] | 1
|
2020-12-23T16:38:15.000Z
|
2020-12-23T16:38:15.000Z
|
gym_wmgds/envs/mujoco/ant.py
|
ozcell/gym_wmgds_ma
|
c2cb22943913361947216b908d50decc46616e99
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
gym_wmgds/envs/mujoco/ant.py
|
ozcell/gym_wmgds_ma
|
c2cb22943913361947216b908d50decc46616e99
|
[
"Python-2.0",
"OLDAP-2.7"
] | null | null | null |
import numpy as np
from gym_wmgds import utils
from gym_wmgds.envs.mujoco import mujoco_env
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
| 36.934783
| 92
| 0.615068
|
import numpy as np
from gym_wmgds import utils
from gym_wmgds.envs.mujoco import mujoco_env
class AntEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
mujoco_env.MujocoEnv.__init__(self, 'ant.xml', 5)
utils.EzPickle.__init__(self)
def step(self, a):
xposbefore = self.get_body_com("torso")[0]
self.do_simulation(a, self.frame_skip)
xposafter = self.get_body_com("torso")[0]
forward_reward = (xposafter - xposbefore)/self.dt
ctrl_cost = .5 * np.square(a).sum()
contact_cost = 0.5 * 1e-3 * np.sum(
np.square(np.clip(self.sim.data.cfrc_ext, -1, 1)))
survive_reward = 1.0
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self.state_vector()
notdone = np.isfinite(state).all() \
and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self._get_obs()
return ob, reward, done, dict(
reward_forward=forward_reward,
reward_ctrl=-ctrl_cost,
reward_contact=-contact_cost,
reward_survive=survive_reward)
def _get_obs(self):
return np.concatenate([
self.sim.data.qpos.flat[2:],
self.sim.data.qvel.flat,
np.clip(self.sim.data.cfrc_ext, -1, 1).flat,
])
def reset_model(self):
qpos = self.init_qpos + self.np_random.uniform(size=self.model.nq, low=-.1, high=.1)
qvel = self.init_qvel + self.np_random.randn(self.model.nv) * .1
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.distance = self.model.stat.extent * 0.5
| true
| true
|
f70777f94ac9e945ac459a52890901f6a1135b62
| 678
|
py
|
Python
|
gifsong/models.py
|
bigjust/gifsong
|
954df6b35c2b4d85773a223e63739c97fcb749dd
|
[
"MIT",
"Unlicense"
] | 1
|
2019-04-21T17:28:10.000Z
|
2019-04-21T17:28:10.000Z
|
gifsong/models.py
|
bigjust/gifsong
|
954df6b35c2b4d85773a223e63739c97fcb749dd
|
[
"MIT",
"Unlicense"
] | null | null | null |
gifsong/models.py
|
bigjust/gifsong
|
954df6b35c2b4d85773a223e63739c97fcb749dd
|
[
"MIT",
"Unlicense"
] | null | null | null |
from django.db import models
class SFWManager(models.Manager):
def get_queryset(self):
return super(SFWManager, self).get_queryset().filter(sfwness=gifsong.SFW)
class gifsong(models.Model):
SFW = 1
NSFW = 2
UNKNOWN = 3
STATUS_CHOICES = (
(SFW, 'SFW'),
(NSFW, 'NSFW'),
(UNKNOWN, 'Unknown'),
)
image_url = models.CharField(max_length=255)
audio_url = models.CharField(max_length=255)
sfwness = models.PositiveIntegerField(choices=STATUS_CHOICES, default=UNKNOWN)
objects = models.Manager()
sfw = SFWManager()
def create(cls, image_url, audio_url):
gifsong = cls()
return gifsong
| 25.111111
| 82
| 0.650442
|
from django.db import models
class SFWManager(models.Manager):
def get_queryset(self):
return super(SFWManager, self).get_queryset().filter(sfwness=gifsong.SFW)
class gifsong(models.Model):
SFW = 1
NSFW = 2
UNKNOWN = 3
STATUS_CHOICES = (
(SFW, 'SFW'),
(NSFW, 'NSFW'),
(UNKNOWN, 'Unknown'),
)
image_url = models.CharField(max_length=255)
audio_url = models.CharField(max_length=255)
sfwness = models.PositiveIntegerField(choices=STATUS_CHOICES, default=UNKNOWN)
objects = models.Manager()
sfw = SFWManager()
def create(cls, image_url, audio_url):
gifsong = cls()
return gifsong
| true
| true
|
f70778288e6a0299fe2b3813692e792b2c507dba
| 4,054
|
py
|
Python
|
onmt/encoders/bert.py
|
SivilTaram/dialogue-utterance-rewriter-pytorch
|
92c2254958b7a1ee9199836f7f2236575270983f
|
[
"MIT"
] | null | null | null |
onmt/encoders/bert.py
|
SivilTaram/dialogue-utterance-rewriter-pytorch
|
92c2254958b7a1ee9199836f7f2236575270983f
|
[
"MIT"
] | null | null | null |
onmt/encoders/bert.py
|
SivilTaram/dialogue-utterance-rewriter-pytorch
|
92c2254958b7a1ee9199836f7f2236575270983f
|
[
"MIT"
] | null | null | null |
"""
Implementation from: https://raw.githubusercontent.com/Zenglinxiao/OpenNMT-py/bert/onmt/encoders/bert.py
@Author: Zenglinxiao
"""
import torch.nn as nn
from onmt.encoders.transformer import TransformerEncoderLayer
from onmt.utils.misc import sequence_mask
class BertEncoder(nn.Module):
"""BERT Encoder: A Transformer Encoder with LayerNorm and BertPooler.
:cite:`DBLP:journals/corr/abs-1810-04805`
Args:
embeddings (onmt.modules.BertEmbeddings): embeddings to use
num_layers (int): number of encoder layers.
d_model (int): size of the model
heads (int): number of heads
d_ff (int): size of the inner FF layer
dropout (float): dropout parameters
"""
def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,
d_ff=3072, dropout=0.1, attention_dropout=0.1,
max_relative_positions=0):
super(BertEncoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.heads = heads
self.dropout = dropout
# Feed-Forward size should be 4*d_model as in paper
self.d_ff = d_ff
self.embeddings = embeddings
# Transformer Encoder Block
self.encoder = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff,
dropout, attention_dropout,
max_relative_positions=max_relative_positions,
activation='gelu') for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)
self.pooler = BertPooler(d_model)
@classmethod
def from_opt(cls, opt, embeddings):
"""Alternate constructor."""
return cls(
embeddings,
opt.enc_layers,
opt.word_vec_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
opt.max_relative_positions
)
def forward(self, input_ids, lengths, token_type_ids=None):
"""
Args:
input_ids (Tensor): ``(seq_len, batch_size, feature_dim)``, padding ids=0
lengths (Tensor): ``(batch_size)``, record length of sequence
token_type_ids (seq_len, batch_size): ``(B, S)``, A(0), B(1), pad(0)
Returns:
all_encoder_layers (list of Tensor): ``(B, S, H)``, token level
pooled_output (Tensor): ``(B, H)``, sequence level
"""
# remove the feature dimension
# seq_len x batch_size
emb = self.embeddings(input_ids, token_type_ids)
out = emb.transpose(0, 1).contiguous()
# [batch, seq] -> [batch, 1, seq]
mask = ~sequence_mask(lengths).unsqueeze(1)
for layer in self.encoder:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout):
self.dropout = dropout
self.embeddings.update_dropout(dropout)
for layer in self.encoder:
layer.update_dropout(dropout)
class BertPooler(nn.Module):
def __init__(self, hidden_size):
"""A pooling block (Linear layer followed by Tanh activation).
Args:
hidden_size (int): size of hidden layer.
"""
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation_fn = nn.Tanh()
def forward(self, hidden_states):
"""hidden_states[:, 0, :] --> {Linear, Tanh} --> Returns.
Args:
hidden_states (Tensor): last layer's hidden_states, ``(B, S, H)``
Returns:
pooled_output (Tensor): transformed output of last layer's hidden
"""
first_token_tensor = hidden_states[:, 0, :] # [batch, d_model]
pooled_output = self.activation_fn(self.dense(first_token_tensor))
return pooled_output
| 34.948276
| 104
| 0.619388
|
import torch.nn as nn
from onmt.encoders.transformer import TransformerEncoderLayer
from onmt.utils.misc import sequence_mask
class BertEncoder(nn.Module):
def __init__(self, embeddings, num_layers=12, d_model=768, heads=12,
d_ff=3072, dropout=0.1, attention_dropout=0.1,
max_relative_positions=0):
super(BertEncoder, self).__init__()
self.num_layers = num_layers
self.d_model = d_model
self.heads = heads
self.dropout = dropout
self.d_ff = d_ff
self.embeddings = embeddings
self.encoder = nn.ModuleList(
[TransformerEncoderLayer(d_model, heads, d_ff,
dropout, attention_dropout,
max_relative_positions=max_relative_positions,
activation='gelu') for _ in range(num_layers)])
self.layer_norm = nn.LayerNorm(d_model, eps=1e-12)
self.pooler = BertPooler(d_model)
@classmethod
def from_opt(cls, opt, embeddings):
return cls(
embeddings,
opt.enc_layers,
opt.word_vec_size,
opt.heads,
opt.transformer_ff,
opt.dropout[0] if type(opt.dropout) is list else opt.dropout,
opt.attention_dropout[0] if type(opt.attention_dropout)
is list else opt.attention_dropout,
opt.max_relative_positions
)
def forward(self, input_ids, lengths, token_type_ids=None):
emb = self.embeddings(input_ids, token_type_ids)
out = emb.transpose(0, 1).contiguous()
mask = ~sequence_mask(lengths).unsqueeze(1)
for layer in self.encoder:
out = layer(out, mask)
out = self.layer_norm(out)
return emb, out.transpose(0, 1).contiguous(), lengths
def update_dropout(self, dropout):
self.dropout = dropout
self.embeddings.update_dropout(dropout)
for layer in self.encoder:
layer.update_dropout(dropout)
class BertPooler(nn.Module):
def __init__(self, hidden_size):
super(BertPooler, self).__init__()
self.dense = nn.Linear(hidden_size, hidden_size)
self.activation_fn = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0, :]
pooled_output = self.activation_fn(self.dense(first_token_tensor))
return pooled_output
| true
| true
|
f707785c5c17239ff88272142d3ef7859dc477c1
| 1,559
|
py
|
Python
|
photo_app/models.py
|
newtonkiragu/photo_share
|
43344ddf32c89547ff4b2fb1f33aaaf4d502566d
|
[
"MIT"
] | 2
|
2019-01-23T11:14:42.000Z
|
2019-01-23T11:14:45.000Z
|
photo_app/models.py
|
silver230/photo_share
|
43344ddf32c89547ff4b2fb1f33aaaf4d502566d
|
[
"MIT"
] | null | null | null |
photo_app/models.py
|
silver230/photo_share
|
43344ddf32c89547ff4b2fb1f33aaaf4d502566d
|
[
"MIT"
] | 1
|
2018-12-04T14:36:27.000Z
|
2018-12-04T14:36:27.000Z
|
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Profile(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_photo = models.ImageField(upload_to='profile_pictures/', blank=True, null=True)
bio = models.TextField(max_length=200, blank=True)
def __str__(self):
return str(self.id)
class Image(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
image = models.ImageField(upload_to='images/')
image_name = models.CharField(max_length=50, blank=True)
image_caption = models.TextField(blank=True)
poster = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_caption(self, caption):
self.update(caption=caption)
@classmethod
def get_image_by_id(cls, n):
Image.objects.get(id=n)
def create_user_profile(sender, **kwargs):
if kwargs['created']:
user_profile = Profile.objects.create(user=kwargs['instance'])
post_save.connect(create_user_profile, sender=User)
class Comment(models.Model):
text = models.TextField()
image = models.ForeignKey(Image, on_delete=models.CASCADE)
commenter = models.ForeignKey(User, verbose_name="Comment", on_delete=models.CASCADE)
| 29.415094
| 91
| 0.720334
|
import uuid
from django.db import models
from django.contrib.auth.models import User
from django.db.models.signals import post_save
class Profile(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
user = models.OneToOneField(User, on_delete=models.CASCADE)
profile_photo = models.ImageField(upload_to='profile_pictures/', blank=True, null=True)
bio = models.TextField(max_length=200, blank=True)
def __str__(self):
return str(self.id)
class Image(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
image = models.ImageField(upload_to='images/')
image_name = models.CharField(max_length=50, blank=True)
image_caption = models.TextField(blank=True)
poster = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return str(self.id)
def save_image(self):
self.save()
def delete_image(self):
self.delete()
def update_caption(self, caption):
self.update(caption=caption)
@classmethod
def get_image_by_id(cls, n):
Image.objects.get(id=n)
def create_user_profile(sender, **kwargs):
if kwargs['created']:
user_profile = Profile.objects.create(user=kwargs['instance'])
post_save.connect(create_user_profile, sender=User)
class Comment(models.Model):
text = models.TextField()
image = models.ForeignKey(Image, on_delete=models.CASCADE)
commenter = models.ForeignKey(User, verbose_name="Comment", on_delete=models.CASCADE)
| true
| true
|
f70778b35ade5ae9f9780b0e26a1ac6183840c0f
| 4,528
|
py
|
Python
|
test_cli.py
|
shede333/PyMobileProvision
|
281fdf35362a570739a35bbf45ee7c4b5f7e6120
|
[
"MIT"
] | 7
|
2019-05-21T03:15:22.000Z
|
2021-04-16T03:26:33.000Z
|
test_cli.py
|
shede333/PyMobileProvision
|
281fdf35362a570739a35bbf45ee7c4b5f7e6120
|
[
"MIT"
] | 1
|
2019-09-03T09:07:58.000Z
|
2020-03-18T07:03:28.000Z
|
test_cli.py
|
shede333/PyMobileProvision
|
281fdf35362a570739a35bbf45ee7c4b5f7e6120
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding:UTF-8 _*_
"""
__author__ = 'shede333'
"""
import plistlib
import shutil
from pathlib import Path
from mobileprovision import util
from mobileprovision import MobileProvisionModel
RESOURCE_PATH = Path(__file__).resolve().parent.joinpath("resource")
SRC_MP_PATH = RESOURCE_PATH.joinpath("sw-src.mobileprovision")
def test_cli_import():
origin_path = util.MP_ROOT_PATH
util.MP_ROOT_PATH = RESOURCE_PATH.joinpath("Provisioning Profiles")
if util.MP_ROOT_PATH.is_dir():
shutil.rmtree(util.MP_ROOT_PATH)
util.MP_ROOT_PATH.mkdir()
mp_model = MobileProvisionModel(SRC_MP_PATH)
file_name = "{}.mobileprovision".format(mp_model.uuid)
dst_path = util.MP_ROOT_PATH.joinpath(file_name)
assert not dst_path.is_file()
util.import_mobileprovision(SRC_MP_PATH)
assert dst_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
cp_mp_path = dst_path.with_name("123.mobileprovision")
assert not cp_mp_path.is_file()
shutil.copy(SRC_MP_PATH, cp_mp_path)
assert cp_mp_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs=None)
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs='name')
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
# 删除测试目录
shutil.rmtree(util.MP_ROOT_PATH)
# 恢复路径
util.MP_ROOT_PATH = origin_path
def test_cli_convert():
dst_path = SRC_MP_PATH.with_name("dst.plist")
if dst_path.is_file():
dst_path.unlink()
MobileProvisionModel(SRC_MP_PATH).convert_to_plist_file(dst_path)
assert dst_path.is_file()
p_obj = plistlib.loads(dst_path.read_bytes())
assert p_obj["AppIDName"] == "XC xyz shede333 testFirst"
dst_path.unlink()
def test_mp_property():
from datetime import datetime
from datetime import timezone
from datetime import timedelta
mp_model = MobileProvisionModel(SRC_MP_PATH)
assert mp_model["name"] == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert mp_model.app_id_name == "XC xyz shede333 testFirst"
assert mp_model.name == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert len(mp_model.provisioned_devices) == 1
assert mp_model.team_name == "ShaoWei Wang"
assert mp_model.team_identifier == "RR23U62KET"
assert mp_model.uuid == "5e3f9cc7-59d2-4cef-902b-97ba409e5874"
assert isinstance(mp_model.entitlements, dict)
assert mp_model.app_id_prefix == "RR23U62KET"
assert mp_model.app_id() == "xyz.shede333.testFirst"
assert mp_model.app_id(is_need_prefix=True) == "RR23U62KET.xyz.shede333.testFirst"
assert mp_model.contain_device_id("00008020-0009306C1429002E")
assert mp_model.creation_timestamp == datetime(2019, 11, 19, 9, 27, 50).timestamp()
assert mp_model.expiration_timestamp == datetime(2019, 11, 26, 9, 27, 50).timestamp()
assert mp_model.date_is_valid() == (datetime.utcnow().timestamp() < mp_model.expiration_timestamp)
# utc_dt = datetime.fromtimestamp(mp_model.creation_timestamp, tz=timezone.utc)
utc_dt = datetime(2019, 11, 19, 9, 27, 50).replace(tzinfo=timezone.utc)
assert utc_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 09:27:50"
tz_8h = timezone(timedelta(hours=8)) # 东八区
local_dt = utc_dt.astimezone(tz_8h)
assert local_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 17:27:50"
import tempfile
with tempfile.TemporaryDirectory() as dir_path:
ent_dst_path = Path(dir_path).joinpath("entitlements.plist")
if ent_dst_path.is_file():
ent_dst_path.unlink()
mp_model.export_entitlements_file(ent_dst_path)
assert ent_dst_path.is_file()
p_obj = plistlib.loads(ent_dst_path.read_bytes())
assert p_obj["application-identifier"] == "RR23U62KET.xyz.shede333.testFirst"
assert len(mp_model.developer_certificates) == 2
cer_model = mp_model.developer_certificates[0]
assert cer_model.common_name == "iPhone Developer: 333wshw@163.com (6EWWJK58A9)"
assert cer_model.sha256 == "122F041D0C659348CC9CB1C1CBC6A60BBB3C8184D9261C73F117DBE785F9AA20"
assert cer_model.sha1 == "38C56BC325AF693E16E8B4C17CAAB50982868C32"
assert cer_model.not_valid_before == datetime(2019, 5, 21, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
assert cer_model.not_valid_after == datetime(2020, 5, 20, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
| 39.719298
| 102
| 0.730345
|
import plistlib
import shutil
from pathlib import Path
from mobileprovision import util
from mobileprovision import MobileProvisionModel
RESOURCE_PATH = Path(__file__).resolve().parent.joinpath("resource")
SRC_MP_PATH = RESOURCE_PATH.joinpath("sw-src.mobileprovision")
def test_cli_import():
origin_path = util.MP_ROOT_PATH
util.MP_ROOT_PATH = RESOURCE_PATH.joinpath("Provisioning Profiles")
if util.MP_ROOT_PATH.is_dir():
shutil.rmtree(util.MP_ROOT_PATH)
util.MP_ROOT_PATH.mkdir()
mp_model = MobileProvisionModel(SRC_MP_PATH)
file_name = "{}.mobileprovision".format(mp_model.uuid)
dst_path = util.MP_ROOT_PATH.joinpath(file_name)
assert not dst_path.is_file()
util.import_mobileprovision(SRC_MP_PATH)
assert dst_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
cp_mp_path = dst_path.with_name("123.mobileprovision")
assert not cp_mp_path.is_file()
shutil.copy(SRC_MP_PATH, cp_mp_path)
assert cp_mp_path.is_file()
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs=None)
assert len(list(util.MP_ROOT_PATH.iterdir())) == 2
util.import_mobileprovision(SRC_MP_PATH, replace_at_attrs='name')
assert len(list(util.MP_ROOT_PATH.iterdir())) == 1
shutil.rmtree(util.MP_ROOT_PATH)
util.MP_ROOT_PATH = origin_path
def test_cli_convert():
dst_path = SRC_MP_PATH.with_name("dst.plist")
if dst_path.is_file():
dst_path.unlink()
MobileProvisionModel(SRC_MP_PATH).convert_to_plist_file(dst_path)
assert dst_path.is_file()
p_obj = plistlib.loads(dst_path.read_bytes())
assert p_obj["AppIDName"] == "XC xyz shede333 testFirst"
dst_path.unlink()
def test_mp_property():
from datetime import datetime
from datetime import timezone
from datetime import timedelta
mp_model = MobileProvisionModel(SRC_MP_PATH)
assert mp_model["name"] == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert mp_model.app_id_name == "XC xyz shede333 testFirst"
assert mp_model.name == "iOS Team Provisioning Profile: xyz.shede333.testFirst"
assert len(mp_model.provisioned_devices) == 1
assert mp_model.team_name == "ShaoWei Wang"
assert mp_model.team_identifier == "RR23U62KET"
assert mp_model.uuid == "5e3f9cc7-59d2-4cef-902b-97ba409e5874"
assert isinstance(mp_model.entitlements, dict)
assert mp_model.app_id_prefix == "RR23U62KET"
assert mp_model.app_id() == "xyz.shede333.testFirst"
assert mp_model.app_id(is_need_prefix=True) == "RR23U62KET.xyz.shede333.testFirst"
assert mp_model.contain_device_id("00008020-0009306C1429002E")
assert mp_model.creation_timestamp == datetime(2019, 11, 19, 9, 27, 50).timestamp()
assert mp_model.expiration_timestamp == datetime(2019, 11, 26, 9, 27, 50).timestamp()
assert mp_model.date_is_valid() == (datetime.utcnow().timestamp() < mp_model.expiration_timestamp)
utc_dt = datetime(2019, 11, 19, 9, 27, 50).replace(tzinfo=timezone.utc)
assert utc_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 09:27:50"
tz_8h = timezone(timedelta(hours=8))
local_dt = utc_dt.astimezone(tz_8h)
assert local_dt.strftime("%Y-%m-%d %H:%M:%S") == "2019-11-19 17:27:50"
import tempfile
with tempfile.TemporaryDirectory() as dir_path:
ent_dst_path = Path(dir_path).joinpath("entitlements.plist")
if ent_dst_path.is_file():
ent_dst_path.unlink()
mp_model.export_entitlements_file(ent_dst_path)
assert ent_dst_path.is_file()
p_obj = plistlib.loads(ent_dst_path.read_bytes())
assert p_obj["application-identifier"] == "RR23U62KET.xyz.shede333.testFirst"
assert len(mp_model.developer_certificates) == 2
cer_model = mp_model.developer_certificates[0]
assert cer_model.common_name == "iPhone Developer: 333wshw@163.com (6EWWJK58A9)"
assert cer_model.sha256 == "122F041D0C659348CC9CB1C1CBC6A60BBB3C8184D9261C73F117DBE785F9AA20"
assert cer_model.sha1 == "38C56BC325AF693E16E8B4C17CAAB50982868C32"
assert cer_model.not_valid_before == datetime(2019, 5, 21, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
assert cer_model.not_valid_after == datetime(2020, 5, 20, 4, 28, 15).replace(
tzinfo=timezone.utc).timestamp()
| true
| true
|
f707797a77dc879868486481a5c6510730eb87b1
| 9,376
|
py
|
Python
|
install/cupy_builder/_compiler.py
|
hey-sagar/cupy
|
83a6d9efeee27f80ef000561f58454abd9d21533
|
[
"MIT"
] | null | null | null |
install/cupy_builder/_compiler.py
|
hey-sagar/cupy
|
83a6d9efeee27f80ef000561f58454abd9d21533
|
[
"MIT"
] | null | null | null |
install/cupy_builder/_compiler.py
|
hey-sagar/cupy
|
83a6d9efeee27f80ef000561f58454abd9d21533
|
[
"MIT"
] | 1
|
2022-03-21T20:19:12.000Z
|
2022-03-21T20:19:12.000Z
|
import distutils.ccompiler
import os
import os.path
import platform
import shutil
import sys
import subprocess
from typing import Optional, List
import setuptools
import setuptools.msvc
from setuptools import Extension
from cupy_builder._context import Context
import cupy_builder.install_build as build
def _nvcc_gencode_options(cuda_version: int) -> List[str]:
"""Returns NVCC GPU code generation options."""
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
# The arch_list specifies virtual architectures, such as 'compute_61',
# and real architectures, such as 'sm_61', for which the CUDA
# input files are to be compiled.
#
# The syntax of an entry of the list is
#
# entry ::= virtual_arch | (virtual_arch, real_arch)
#
# where virtual_arch is a string which means a virtual architecture and
# real_arch is a string which means a real architecture.
#
# If a virtual architecture is supplied, NVCC generates a PTX code
# the virtual architecture. If a pair of a virtual architecture and a
# real architecture is supplied, NVCC generates a PTX code for the
# virtual architecture as well as a cubin code for the real one.
#
# For example, making NVCC generate a PTX code for 'compute_60' virtual
# architecture, the arch_list has an entry of 'compute_60'.
#
# arch_list = ['compute_60']
#
# For another, making NVCC generate a PTX code for 'compute_61' virtual
# architecture and a cubin code for 'sm_61' real architecture, the
# arch_list has an entry of ('compute_61', 'sm_61').
#
# arch_list = [('compute_61', 'sm_61')]
#
# See the documentation of each CUDA version for the list of supported
# architectures:
#
# https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#options-for-steering-gpu-code-generation
if cuda_version >= 11040:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
('compute_87', 'sm_87'),
'compute_87']
elif cuda_version >= 11010:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_86']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
else:
# This should not happen.
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
class DeviceCompilerBase:
"""A class that invokes NVCC or HIPCC."""
def __init__(self, ctx: Context):
self._context = ctx
def _get_preprocess_options(self, ext: Extension) -> List[str]:
# https://setuptools.pypa.io/en/latest/deprecated/distutils/apiref.html#distutils.core.Extension
# https://github.com/pypa/setuptools/blob/v60.0.0/setuptools/_distutils/command/build_ext.py#L524-L526
incdirs = ext.include_dirs[:] # type: ignore
macros = ext.define_macros[:] # type: ignore
for undef in ext.undef_macros: # type: ignore
macros.append((undef,))
return distutils.ccompiler.gen_preprocess_options(macros, incdirs)
def spawn(self, commands: List[str]) -> None:
print('Command:', commands)
subprocess.check_call(commands)
class DeviceCompilerUnix(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
self._compile_unix_hipcc(obj, src, ext)
else:
self._compile_unix_nvcc(obj, src, ext)
def _compile_unix_nvcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with NVCC.
nvcc_path = build.get_nvcc_path()
base_opts = build.get_compiler_base_options(nvcc_path)
compiler_so = nvcc_path
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + [
'-O2', '--compiler-options="-fPIC"']
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
else:
postargs += ['--std=c++11']
postargs += ['-Xcompiler=-fno-gnu-unique']
print('NVCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
def _compile_unix_hipcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
# For CUDA C source files, compile them with HIPCC.
rocm_path = build.get_hipcc_path()
base_opts = build.get_compiler_base_options(rocm_path)
compiler_so = rocm_path
hip_version = build.get_hip_version()
postargs = ['-O2', '-fPIC', '--include', 'hip_runtime.h']
if hip_version >= 402:
postargs += ['--std=c++14']
else:
postargs += ['--std=c++11']
print('HIPCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
class DeviceCompilerWin32(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
raise RuntimeError('ROCm is not supported on Windows')
compiler_so = build.get_nvcc_path()
cc_args = self._get_preprocess_options(ext) + ['-c']
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
if cuda_version >= 11020:
# MSVC 14.0 (2015) is deprecated for CUDA 11.2 but we need it
# to build CuPy because some Python versions were built using it.
# REF: https://wiki.python.org/moin/WindowsCompilers
postargs += ['-allow-unsupported-compiler']
postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
# This is to compile thrust with MSVC2015
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
cl_exe_path = self._find_host_compiler_path()
if cl_exe_path is not None:
print(f'Using host compiler at {cl_exe_path}')
postargs += ['--compiler-bindir', cl_exe_path]
print('NVCC options:', postargs)
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
def _find_host_compiler_path(self) -> Optional[str]:
# c.f. cupy.cuda.compiler._get_extra_path_for_msvc
cl_exe = shutil.which('cl.exe')
if cl_exe:
# The compiler is already on PATH, no extra path needed.
return None
vctools: List[str] = setuptools.msvc.EnvironmentInfo(
platform.machine()).VCTools
for path in vctools:
cl_exe = os.path.join(path, 'cl.exe')
if os.path.exists(cl_exe):
return path
print(f'Warning: cl.exe could not be found in {vctools}')
return None
| 40.240343
| 118
| 0.564526
|
import distutils.ccompiler
import os
import os.path
import platform
import shutil
import sys
import subprocess
from typing import Optional, List
import setuptools
import setuptools.msvc
from setuptools import Extension
from cupy_builder._context import Context
import cupy_builder.install_build as build
def _nvcc_gencode_options(cuda_version: int) -> List[str]:
if sys.argv == ['setup.py', 'develop']:
return []
envcfg = os.getenv('CUPY_NVCC_GENERATE_CODE', None)
if envcfg is not None and envcfg != 'current':
return ['--generate-code={}'.format(arch)
for arch in envcfg.split(';') if len(arch) > 0]
if envcfg == 'current' and build.get_compute_capabilities() is not None:
ccs = build.get_compute_capabilities()
arch_list = [
f'compute_{cc}' if cc < 60 else (f'compute_{cc}', f'sm_{cc}')
for cc in ccs]
else:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
('compute_87', 'sm_87'),
'compute_87']
elif cuda_version >= 11010:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
('compute_86', 'sm_86'),
'compute_86']
elif cuda_version >= 11000:
arch_list = ['compute_35',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
('compute_80', 'sm_80'),
'compute_80']
elif cuda_version >= 10000:
arch_list = ['compute_30',
'compute_50',
('compute_60', 'sm_60'),
('compute_61', 'sm_61'),
('compute_70', 'sm_70'),
('compute_75', 'sm_75'),
'compute_70']
else:
assert False
options = []
for arch in arch_list:
if type(arch) is tuple:
virtual_arch, real_arch = arch
options.append('--generate-code=arch={},code={}'.format(
virtual_arch, real_arch))
else:
options.append('--generate-code=arch={},code={}'.format(
arch, arch))
return options
class DeviceCompilerBase:
def __init__(self, ctx: Context):
self._context = ctx
def _get_preprocess_options(self, ext: Extension) -> List[str]:
lude_dirs[:]
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
return distutils.ccompiler.gen_preprocess_options(macros, incdirs)
def spawn(self, commands: List[str]) -> None:
print('Command:', commands)
subprocess.check_call(commands)
class DeviceCompilerUnix(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
self._compile_unix_hipcc(obj, src, ext)
else:
self._compile_unix_nvcc(obj, src, ext)
def _compile_unix_nvcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
nvcc_path = build.get_nvcc_path()
base_opts = build.get_compiler_base_options(nvcc_path)
compiler_so = nvcc_path
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + [
'-O2', '--compiler-options="-fPIC"']
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
else:
postargs += ['--std=c++11']
postargs += ['-Xcompiler=-fno-gnu-unique']
print('NVCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
def _compile_unix_hipcc(self, obj: str, src: str, ext: Extension) -> None:
cc_args = self._get_preprocess_options(ext) + ['-c']
rocm_path = build.get_hipcc_path()
base_opts = build.get_compiler_base_options(rocm_path)
compiler_so = rocm_path
hip_version = build.get_hip_version()
postargs = ['-O2', '-fPIC', '--include', 'hip_runtime.h']
if hip_version >= 402:
postargs += ['--std=c++14']
else:
postargs += ['--std=c++11']
print('HIPCC options:', postargs)
self.spawn(compiler_so + base_opts + cc_args + [src, '-o', obj] +
postargs)
class DeviceCompilerWin32(DeviceCompilerBase):
def compile(self, obj: str, src: str, ext: Extension) -> None:
if self._context.use_hip:
raise RuntimeError('ROCm is not supported on Windows')
compiler_so = build.get_nvcc_path()
cc_args = self._get_preprocess_options(ext) + ['-c']
cuda_version = build.get_cuda_version()
postargs = _nvcc_gencode_options(cuda_version) + ['-O2']
if cuda_version >= 11020:
postargs += ['-allow-unsupported-compiler']
postargs += ['-Xcompiler', '/MD', '-D_USE_MATH_DEFINES']
if cuda_version >= 11020:
postargs += ['--std=c++14']
num_threads = int(os.environ.get('CUPY_NUM_NVCC_THREADS', '2'))
postargs += [f'-t{num_threads}']
cl_exe_path = self._find_host_compiler_path()
if cl_exe_path is not None:
print(f'Using host compiler at {cl_exe_path}')
postargs += ['--compiler-bindir', cl_exe_path]
print('NVCC options:', postargs)
self.spawn(compiler_so + cc_args + [src, '-o', obj] + postargs)
def _find_host_compiler_path(self) -> Optional[str]:
cl_exe = shutil.which('cl.exe')
if cl_exe:
return None
vctools: List[str] = setuptools.msvc.EnvironmentInfo(
platform.machine()).VCTools
for path in vctools:
cl_exe = os.path.join(path, 'cl.exe')
if os.path.exists(cl_exe):
return path
print(f'Warning: cl.exe could not be found in {vctools}')
return None
| true
| true
|
f70779a887ba49ecdcd744e1cbc233a53a5fed12
| 154
|
py
|
Python
|
test.py
|
KuangenZhang/XsensIMUReader
|
24eb04d363d574cfbbfe6f9b27f22ea26116dc7b
|
[
"MIT"
] | 1
|
2021-09-14T15:55:42.000Z
|
2021-09-14T15:55:42.000Z
|
test.py
|
KuangenZhang/XsensIMUReader
|
24eb04d363d574cfbbfe6f9b27f22ea26116dc7b
|
[
"MIT"
] | null | null | null |
test.py
|
KuangenZhang/XsensIMUReader
|
24eb04d363d574cfbbfe6f9b27f22ea26116dc7b
|
[
"MIT"
] | null | null | null |
import keyboard
import time
while True:
if keyboard.is_pressed('esc'):
print('Pressed ESC')
break
else:
time.sleep(1e-3)
| 15.4
| 34
| 0.597403
|
import keyboard
import time
while True:
if keyboard.is_pressed('esc'):
print('Pressed ESC')
break
else:
time.sleep(1e-3)
| true
| true
|
f7077a064f15130f8f179abed20b25de3a326f42
| 21,354
|
py
|
Python
|
tests/cosmology/test_cosmology_apsuite.py
|
emaballarin/phytorch
|
68cf0a630e2fee9dd98f08639edcceb2389adf35
|
[
"MIT"
] | 1
|
2022-01-21T06:59:20.000Z
|
2022-01-21T06:59:20.000Z
|
tests/cosmology/test_cosmology_apsuite.py
|
emaballarin/phytorch
|
68cf0a630e2fee9dd98f08639edcceb2389adf35
|
[
"MIT"
] | null | null | null |
tests/cosmology/test_cosmology_apsuite.py
|
emaballarin/phytorch
|
68cf0a630e2fee9dd98f08639edcceb2389adf35
|
[
"MIT"
] | 1
|
2021-04-27T00:45:47.000Z
|
2021-04-27T00:45:47.000Z
|
# Based on the astropy test suite (v4.2.1)
# (https://github.com/astropy/astropy/blob/v4.2.1/astropy/cosmology/tests/test_cosmology.py)
from io import StringIO
from typing import Type
import numpy as np
import pytest
import torch
from pytest import mark
from torch import tensor
import phytorch.cosmology.drivers.analytic
import phytorch.cosmology.drivers.analytic_diff
import phytorch.cosmology.special
from phytorch.constants import codata2014, G as Newton_G
from phytorch.cosmology.special import AbstractFlatLambdaCDMR, AbstractLambdaCDMR
from phytorch.units.astro import Gpc, Gyr, Mpc
from phytorch.units.si import cm, gram, kelvin, km, s
from phytorch.units.unit import Unit
from tests.common.closeness import close
from tests.common.dtypes import with_default_double
ZERO = torch.zeros(())
ONE = torch.ones(())
SMALL = 1e-16
Z = tensor([0, 0.5, 1, 2])
H70 = 70 * km/s/Mpc
H704 = 70.4 * km/s/Mpc
def test_critical_density():
fac = (Newton_G / codata2014.G).to(Unit())
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
# constants defined only so accurately
assert ((cosmo.critical_density0 * fac).to(gram / cm**3) - 9.309668456020899e-30) < 1e-9
assert cosmo.critical_density0 == cosmo.critical_density(0)
assert close((cosmo.critical_density(tensor([1, 5])) * fac).to(gram / cm**3).value,
[2.70352772e-29, 5.53739080e-28])
def test_xtfuncs():
cosmo = AbstractLambdaCDMR()
cosmo.H0, cosmo.Om0, cosmo.Ode0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, 0.5, 3.04, 2.725 * kelvin
z = tensor([2, 3.2])
assert close(cosmo.lookback_time_integrand(tensor(3)), 0.052218976654969378)
assert close(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541])
assert close(cosmo.abs_distance_integrand(tensor(3)), 3.3420145059180402)
assert close(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758])
def test_zeroing():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.27
cosmo.Ode0 = 0
cosmo.Or0 = 0
assert cosmo.Ode(1.5) == 0
assert (cosmo.Ode(Z) == ZERO).all()
assert cosmo.Or(1.5) == 0
assert (cosmo.Or(Z) == ZERO).all()
# TODO: add neutrinos
# assert allclose(cosmo.Onu(1.5), [0, 0, 0, 0])
# assert allclose(cosmo.Onu(z), [0, 0, 0, 0])
assert (cosmo.Ob(Z) == ZERO).all()
def test_matter():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ob0 = 0.045
assert cosmo.Om(0) == 0.3
assert cosmo.Ob(0) == 0.045
assert close(cosmo.Om(Z), [0.3, 0.59124088, 0.77419355, 0.92045455])
assert close(cosmo.Ob(Z), [0.045, 0.08868613, 0.11612903, 0.13806818])
assert close(cosmo.Odm(Z), [0.255, 0.50255474, 0.65806452, 0.78238636])
assert close(cosmo.Ob(Z) + cosmo.Odm(Z), cosmo.Om(Z))
def test_ocurv():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ok0 == 0
assert cosmo.Ok(0) == 0
assert (cosmo.Ok(Z) == ZERO).all()
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.5
assert abs(cosmo.Ok0 - 0.2) < SMALL
assert abs(cosmo.Ok(0) - 0.2) < SMALL
assert close(cosmo.Ok(Z), [0.2, 0.22929936, 0.21621622, 0.17307692])
assert (cosmo.Ok(Z) + cosmo.Om(Z) + cosmo.Ode(Z) == ONE).all()
def test_ode():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ode(0) == cosmo.Ode0
assert close(cosmo.Ode(Z), [0.7, 0.408759, 0.2258065, 0.07954545])
def test_tcmb():
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
cosmo.Tcmb0 = 2.5 * kelvin
assert cosmo.Tcmb(2) == 7.5 * kelvin
assert (cosmo.Tcmb(tensor([0, 1, 2, 3, 9.])).to(kelvin).value == tensor([2.5, 5, 7.5, 10, 25])).all()
def test_efunc_vs_invefunc():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.7
assert cosmo.efunc(0.5) * cosmo.inv_efunc(0.5) == 1
assert (cosmo.efunc(Z) * cosmo.inv_efunc(Z) == ONE).all()
# TODO: test this for subclasses?
class BaseLambdaCDMDriverTest:
flat_cosmo_cls: Type[phytorch.cosmology.special.BaseFlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.BaseLambdaCDM]
class BaseLambdaCDMTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDM]
@with_default_double
@mark.parametrize(('func', 'vals', 'unit', 'rtol'), (
# From the astropy test suite:
# Test values were taken from the following web cosmology
# calculators on 27th Feb 2012:
# Wright: http://www.astro.ucla.edu/~wright/CosmoCalc.html
# (https://ui.adsabs.harvard.edu/abs/2006PASP..118.1711W)
# Kempner: http://www.kempner.net/cosmic.php
# iCosmos: http://www.icosmos.co.uk/index.html
(phytorch.cosmology.special.FlatLambdaCDM.comoving_distance,
(3364.5, 3364.8, 3364.7988), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.angular_diameter_distance,
(1682.3, 1682.4, 1682.3994), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.luminosity_distance,
(6729.2, 6729.6, 6729.5976), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_time,
(7.841, 7.84178, 7.843), Gyr, 1e-3),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_distance,
(2404.0, 2404.24, 2404.4), Mpc, 1e-3),
))
def test_flat_z1(self, func, vals, unit, rtol):
cosmo = self.flat_cosmo_cls()
cosmo.H0 = H70
cosmo.Om0 = 0.27
assert close(getattr(cosmo, func.__name__)(1).to(unit).value, vals, rtol=rtol)
@mark.parametrize('Om0, Ode0, vals', (
(0.27, 0.73, (29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802)),
(0.27, 0, (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814)),
(2, 0, (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
))
def test_comoving_volume(self, Om0, Ode0, vals):
z = tensor([0.5, 1, 2, 3, 5, 9])
# for (Om0, Ode0), vals in zip(
# ((0.27, 0.73), (0.27, 0), (2, 0)),
# # Form Ned Wright's calculator: not very *accurate* (sic), so
# # like astropy, test to very low precision
# ((29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802),
# (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814),
# (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
# ):
c = self.cosmo_cls()
c.H0, c.Om0, c.Ode0 = H70, Om0, Ode0
assert close(c.comoving_volume(z).to(Gpc**3).value, vals, rtol=1e-2)
# TODO: (requires integration) test_differential_comoving_volume
icosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
icosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
icosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
@mark.parametrize('Om0, Ode0, data', (
(0.3, 0.7, icosmo_flat), (0.3, 0.1, icosmo_open), (2, 0.1, icosmo_closed)
))
def test_flat_open_closed_icosmo(self, Om0, Ode0, data):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, Om0, Ode0
z, dm, da, dl = (tensor(_, dtype=torch.get_default_dtype())
for _ in np.loadtxt(StringIO(data), unpack=True))
assert close(cosmo.comoving_transverse_distance(z).to(Mpc).value, dm)
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, da)
assert close(cosmo.luminosity_distance(z).to(Mpc).value, dl)
def test_distmod(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert cosmo.hubble_distance.to(Mpc) == 4258.415596590909
assert close(cosmo.distmod(tensor([1, 5])), [44.124857, 48.40167258])
@with_default_double
def test_negdistmod(self):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, 0.2, 1.3
z = tensor([50, 100])
assert close(cosmo.luminosity_distance(z).to(Mpc).value, [16612.44047622, -46890.79092244])
assert close(cosmo.distmod(z), [46.102167189, 48.355437790944])
def test_comoving_distance_z1z2(self):
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.8
with pytest.raises(RuntimeError):
cosmo.comoving_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert cosmo.comoving_distance_z1z2(1, 2) == - cosmo.comoving_distance_z1z2(2, 1)
assert close(
cosmo.comoving_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683]
)
@with_default_double
@mark.parametrize('Om0, val', (
# (0, 2997.92458), # TODO: cannot do Om0=0 with LambdaCDM, need special cosmology
(1, 1756.1435599923348),
))
def test_distance_in_special_cosmologies(self, Om0, val):
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = Om0
assert close(cosmo.comoving_distance(0).to(Mpc).value, 0)
assert close(cosmo.comoving_distance(1).to(Mpc).value, val)
@with_default_double
def test_comoving_transverse_distance_z1z2(self):
z1, z2 = tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 0.3
with pytest.raises(RuntimeError):
cosmo.comoving_transverse_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.comoving_transverse_distance_z1z2(1, 2).to(Mpc).value, 1313.2232194828466)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 1.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258]
)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884]
)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 1, 0.2
assert close(
cosmo.comoving_transverse_distance_z1z2(0.1, tensor([0, 0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[-281.31602666724865, 0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927]
)
def test_angular_diameter_distance_z1z2(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
with pytest.raises(RuntimeError):
cosmo.angular_diameter_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 646.22968662822018)
assert close(
cosmo.angular_diameter_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[1760.0628637762106, 1670.7497657219858, -969.34452994, 1159.0970895962193, 115.72768186186921]
)
assert close(
cosmo.angular_diameter_distance_z1z2(0.1, tensor([0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976]
)
# Non-flat (positive Ok0) test
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H704, 0.2, 0.5
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 620.1175337852428)
# Non-flat (negative Ok0) test
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 2, 1
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 228.42914659246014)
def test_absorption_distance(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert close(cosmo.absorption_distance(3), 7.98685853)
assert close(cosmo.absorption_distance(tensor([1, 3])), [1.72576635, 7.98685853])
class BaseLambdaCDMRTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDMR]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDMR]
@with_default_double
def test_ogamma(self):
z = tensor([1, 10, 500, 1000])
for Neff, Tcmb0, vals in (
# (3, 0, [1651.9, 858.2, 26.855, 13.642]), # cannot have Or0=0
(3, 2.725, [1651.8, 857.9, 26.767, 13.582]),
(3, 4, [1651.4, 856.6, 26.489, 13.405]),
# (3.04, 0, [1651.91, 858.205, 26.8586, 13.6469]), # cannot have Or0=0
(3.04, 2.725, [1651.76, 857.817, 26.7688, 13.5841]),
(3.04, 4, [1651.21, 856.411, 26.4845, 13.4028]),
):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, Neff, Tcmb0*kelvin
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, vals, rtol=5e-4)
# from astropy: Just to be really sure, we also do a version where the
# integral is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
hubdis = (299792.458 / 70.0)
Neff = 3.04
for Tcmb0 in (2.725, 5):
Ogamma0h2 = 4 * 5.670373e-8 / 299792458**3 * Tcmb0**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7/8 * (4 / 11)**(4/3) * Neff
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
vals = 2 * hubdis * (((1 + Or0*z) / (1+z))**0.5 - 1) / (Or0 - 1)
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Neff, cosmo.Tcmb0, cosmo.Ode0 = H70, Neff, Tcmb0 * kelvin, 0
assert close(cosmo.comoving_distance(z).to(Mpc).value, vals)
class TestAnalyticLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDM
class TestAnalyticCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDMR
class TestAnalyticDiffLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDM
class TestAnalyticDiffCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDMR
# TODO: (age...) test_age
# TODO: (age...) test_age_in_special_cosmologies
# TODO: (neutrinos, weird models...) test_distances
| 44.028866
| 119
| 0.588929
|
from io import StringIO
from typing import Type
import numpy as np
import pytest
import torch
from pytest import mark
from torch import tensor
import phytorch.cosmology.drivers.analytic
import phytorch.cosmology.drivers.analytic_diff
import phytorch.cosmology.special
from phytorch.constants import codata2014, G as Newton_G
from phytorch.cosmology.special import AbstractFlatLambdaCDMR, AbstractLambdaCDMR
from phytorch.units.astro import Gpc, Gyr, Mpc
from phytorch.units.si import cm, gram, kelvin, km, s
from phytorch.units.unit import Unit
from tests.common.closeness import close
from tests.common.dtypes import with_default_double
ZERO = torch.zeros(())
ONE = torch.ones(())
SMALL = 1e-16
Z = tensor([0, 0.5, 1, 2])
H70 = 70 * km/s/Mpc
H704 = 70.4 * km/s/Mpc
def test_critical_density():
fac = (Newton_G / codata2014.G).to(Unit())
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
assert ((cosmo.critical_density0 * fac).to(gram / cm**3) - 9.309668456020899e-30) < 1e-9
assert cosmo.critical_density0 == cosmo.critical_density(0)
assert close((cosmo.critical_density(tensor([1, 5])) * fac).to(gram / cm**3).value,
[2.70352772e-29, 5.53739080e-28])
def test_xtfuncs():
cosmo = AbstractLambdaCDMR()
cosmo.H0, cosmo.Om0, cosmo.Ode0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, 0.5, 3.04, 2.725 * kelvin
z = tensor([2, 3.2])
assert close(cosmo.lookback_time_integrand(tensor(3)), 0.052218976654969378)
assert close(cosmo.lookback_time_integrand(z), [0.10333179, 0.04644541])
assert close(cosmo.abs_distance_integrand(tensor(3)), 3.3420145059180402)
assert close(cosmo.abs_distance_integrand(z), [2.7899584, 3.44104758])
def test_zeroing():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.27
cosmo.Ode0 = 0
cosmo.Or0 = 0
assert cosmo.Ode(1.5) == 0
assert (cosmo.Ode(Z) == ZERO).all()
assert cosmo.Or(1.5) == 0
assert (cosmo.Or(Z) == ZERO).all()
assert (cosmo.Ob(Z) == ZERO).all()
def test_matter():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ob0 = 0.045
assert cosmo.Om(0) == 0.3
assert cosmo.Ob(0) == 0.045
assert close(cosmo.Om(Z), [0.3, 0.59124088, 0.77419355, 0.92045455])
assert close(cosmo.Ob(Z), [0.045, 0.08868613, 0.11612903, 0.13806818])
assert close(cosmo.Odm(Z), [0.255, 0.50255474, 0.65806452, 0.78238636])
assert close(cosmo.Ob(Z) + cosmo.Odm(Z), cosmo.Om(Z))
def test_ocurv():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ok0 == 0
assert cosmo.Ok(0) == 0
assert (cosmo.Ok(Z) == ZERO).all()
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.5
assert abs(cosmo.Ok0 - 0.2) < SMALL
assert abs(cosmo.Ok(0) - 0.2) < SMALL
assert close(cosmo.Ok(Z), [0.2, 0.22929936, 0.21621622, 0.17307692])
assert (cosmo.Ok(Z) + cosmo.Om(Z) + cosmo.Ode(Z) == ONE).all()
def test_ode():
cosmo = AbstractFlatLambdaCDMR()
cosmo.Om0 = 0.3
assert cosmo.Ode(0) == cosmo.Ode0
assert close(cosmo.Ode(Z), [0.7, 0.408759, 0.2258065, 0.07954545])
def test_tcmb():
cosmo = AbstractFlatLambdaCDMR()
cosmo.H0 = H704
cosmo.Om0 = 0.272
cosmo.Tcmb0 = 2.5 * kelvin
assert cosmo.Tcmb(2) == 7.5 * kelvin
assert (cosmo.Tcmb(tensor([0, 1, 2, 3, 9.])).to(kelvin).value == tensor([2.5, 5, 7.5, 10, 25])).all()
def test_efunc_vs_invefunc():
cosmo = AbstractLambdaCDMR()
cosmo.Om0 = 0.3
cosmo.Ode0 = 0.7
assert cosmo.efunc(0.5) * cosmo.inv_efunc(0.5) == 1
assert (cosmo.efunc(Z) * cosmo.inv_efunc(Z) == ONE).all()
class BaseLambdaCDMDriverTest:
flat_cosmo_cls: Type[phytorch.cosmology.special.BaseFlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.BaseLambdaCDM]
class BaseLambdaCDMTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDM]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDM]
@with_default_double
@mark.parametrize(('func', 'vals', 'unit', 'rtol'), (
(phytorch.cosmology.special.FlatLambdaCDM.comoving_distance,
(3364.5, 3364.8, 3364.7988), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.angular_diameter_distance,
(1682.3, 1682.4, 1682.3994), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.luminosity_distance,
(6729.2, 6729.6, 6729.5976), Mpc, 1e-4),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_time,
(7.841, 7.84178, 7.843), Gyr, 1e-3),
(phytorch.cosmology.special.FlatLambdaCDM.lookback_distance,
(2404.0, 2404.24, 2404.4), Mpc, 1e-3),
))
def test_flat_z1(self, func, vals, unit, rtol):
cosmo = self.flat_cosmo_cls()
cosmo.H0 = H70
cosmo.Om0 = 0.27
assert close(getattr(cosmo, func.__name__)(1).to(unit).value, vals, rtol=rtol)
@mark.parametrize('Om0, Ode0, vals', (
(0.27, 0.73, (29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802)),
(0.27, 0, (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814)),
(2, 0, (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
))
def test_comoving_volume(self, Om0, Ode0, vals):
z = tensor([0.5, 1, 2, 3, 5, 9])
# ((29.123, 159.529, 630.427, 1178.531, 2181.485, 3654.802),
# (20.501, 99.019, 380.278, 747.049, 1558.363, 3123.814),
# (12.619, 44.708, 114.904, 173.709, 258.82, 358.992))
# ):
c = self.cosmo_cls()
c.H0, c.Om0, c.Ode0 = H70, Om0, Ode0
assert close(c.comoving_volume(z).to(Gpc**3).value, vals, rtol=1e-2)
# TODO: (requires integration) test_differential_comoving_volume
icosmo_flat = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.7
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 669.77536 576.15085 778.61386
0.32500000 1285.5964 970.26143 1703.4152
0.50000000 1888.6254 1259.0836 2832.9381
0.66250000 2395.5489 1440.9317 3982.6000
0.82500000 2855.5732 1564.6976 5211.4210
1.0000000 3303.8288 1651.9144 6607.6577
1.1625000 3681.1867 1702.2829 7960.5663
1.3250000 4025.5229 1731.4077 9359.3408
1.5000000 4363.8558 1745.5423 10909.640
1.6625000 4651.4830 1747.0359 12384.573
1.8250000 4916.5970 1740.3883 13889.387
2.0000000 5179.8621 1726.6207 15539.586
2.1625000 5406.0204 1709.4136 17096.540
2.3250000 5616.5075 1689.1752 18674.888
2.5000000 5827.5418 1665.0120 20396.396
2.6625000 6010.4886 1641.0890 22013.414
2.8250000 6182.1688 1616.2533 23646.796
3.0000000 6355.6855 1588.9214 25422.742
3.1625000 6507.2491 1563.3031 27086.425
3.3250000 6650.4520 1537.6768 28763.205
3.5000000 6796.1499 1510.2555 30582.674
3.6625000 6924.2096 1485.0852 32284.127
3.8250000 7045.8876 1460.2876 33996.408
4.0000000 7170.3664 1434.0733 35851.832
4.1625000 7280.3423 1410.2358 37584.767
4.3250000 7385.3277 1386.9160 39326.870
4.5000000 7493.2222 1362.4040 41212.722
4.6625000 7588.9589 1340.2135 42972.480
"""
icosmo_open = """\
# from icosmo (icosmo.org)
# Om 0.3 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 643.08185 553.18868 747.58265
0.32500000 1200.9858 906.40441 1591.3062
0.50000000 1731.6262 1154.4175 2597.4393
0.66250000 2174.3252 1307.8648 3614.8157
0.82500000 2578.7616 1413.0201 4706.2399
1.0000000 2979.3460 1489.6730 5958.6920
1.1625000 3324.2002 1537.2024 7188.5829
1.3250000 3646.8432 1568.5347 8478.9104
1.5000000 3972.8407 1589.1363 9932.1017
1.6625000 4258.1131 1599.2913 11337.226
1.8250000 4528.5346 1603.0211 12793.110
2.0000000 4804.9314 1601.6438 14414.794
2.1625000 5049.2007 1596.5852 15968.097
2.3250000 5282.6693 1588.7727 17564.875
2.5000000 5523.0914 1578.0261 19330.820
2.6625000 5736.9813 1566.4113 21011.694
2.8250000 5942.5803 1553.6158 22730.370
3.0000000 6155.4289 1538.8572 24621.716
3.1625000 6345.6997 1524.4924 26413.975
3.3250000 6529.3655 1509.6799 28239.506
3.5000000 6720.2676 1493.3928 30241.204
3.6625000 6891.5474 1478.0799 32131.840
3.8250000 7057.4213 1462.6780 34052.058
4.0000000 7230.3723 1446.0745 36151.862
4.1625000 7385.9998 1430.7021 38130.224
4.3250000 7537.1112 1415.4199 40135.117
4.5000000 7695.0718 1399.1040 42322.895
4.6625000 7837.5510 1384.1150 44380.133
"""
icosmo_closed = """\
# from icosmo (icosmo.org)
# Om 2 w -1 h 0.7 Ol 0.1
# z comoving_transvers_dist angular_diameter_dist luminosity_dist
0.0000000 0.0000000 0.0000000 0.0000000
0.16250000 601.80160 517.67879 699.59436
0.32500000 1057.9502 798.45297 1401.7840
0.50000000 1438.2161 958.81076 2157.3242
0.66250000 1718.6778 1033.7912 2857.3019
0.82500000 1948.2400 1067.5288 3555.5381
1.0000000 2152.7954 1076.3977 4305.5908
1.1625000 2312.3427 1069.2914 5000.4410
1.3250000 2448.9755 1053.3228 5693.8681
1.5000000 2575.6795 1030.2718 6439.1988
1.6625000 2677.9671 1005.8092 7130.0873
1.8250000 2768.1157 979.86398 7819.9270
2.0000000 2853.9222 951.30739 8561.7665
2.1625000 2924.8116 924.84161 9249.7167
2.3250000 2988.5333 898.80701 9936.8732
2.5000000 3050.3065 871.51614 10676.073
2.6625000 3102.1909 847.01459 11361.774
2.8250000 3149.5043 823.39982 12046.854
3.0000000 3195.9966 798.99915 12783.986
3.1625000 3235.5334 777.30533 13467.908
3.3250000 3271.9832 756.52790 14151.327
3.5000000 3308.1758 735.15017 14886.791
3.6625000 3339.2521 716.19347 15569.263
3.8250000 3368.1489 698.06195 16251.319
4.0000000 3397.0803 679.41605 16985.401
4.1625000 3422.1142 662.87926 17666.664
4.3250000 3445.5542 647.05243 18347.576
4.5000000 3469.1805 630.76008 19080.493
4.6625000 3489.7534 616.29199 19760.729
"""
@mark.parametrize('Om0, Ode0, data', (
(0.3, 0.7, icosmo_flat), (0.3, 0.1, icosmo_open), (2, 0.1, icosmo_closed)
))
def test_flat_open_closed_icosmo(self, Om0, Ode0, data):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, Om0, Ode0
z, dm, da, dl = (tensor(_, dtype=torch.get_default_dtype())
for _ in np.loadtxt(StringIO(data), unpack=True))
assert close(cosmo.comoving_transverse_distance(z).to(Mpc).value, dm)
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, da)
assert close(cosmo.luminosity_distance(z).to(Mpc).value, dl)
def test_distmod(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert cosmo.hubble_distance.to(Mpc) == 4258.415596590909
assert close(cosmo.distmod(tensor([1, 5])), [44.124857, 48.40167258])
@with_default_double
def test_negdistmod(self):
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H70, 0.2, 1.3
z = tensor([50, 100])
assert close(cosmo.luminosity_distance(z).to(Mpc).value, [16612.44047622, -46890.79092244])
assert close(cosmo.distmod(z), [46.102167189, 48.355437790944])
def test_comoving_distance_z1z2(self):
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.8
with pytest.raises(RuntimeError):
cosmo.comoving_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert cosmo.comoving_distance_z1z2(1, 2) == - cosmo.comoving_distance_z1z2(2, 1)
assert close(
cosmo.comoving_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[3767.90579253, 2386.25591391, -1381.64987862, 2893.11776663, 174.1524683]
)
@with_default_double
@mark.parametrize('Om0, val', (
# (0, 2997.92458), # TODO: cannot do Om0=0 with LambdaCDM, need special cosmology
(1, 1756.1435599923348),
))
def test_distance_in_special_cosmologies(self, Om0, val):
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = Om0
assert close(cosmo.comoving_distance(0).to(Mpc).value, 0)
assert close(cosmo.comoving_distance(1).to(Mpc).value, val)
@with_default_double
def test_comoving_transverse_distance_z1z2(self):
z1, z2 = tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 0.3
with pytest.raises(RuntimeError):
cosmo.comoving_transverse_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.comoving_transverse_distance_z1z2(1, 2).to(Mpc).value, 1313.2232194828466)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.flat_cosmo_cls()
cosmo.Om0 = 1.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[2202.72682564, 1559.51679971, -643.21002593, 1408.36365679, 85.09286258]
)
assert close(cosmo.comoving_distance_z1z2(z1, z2).to(Mpc).value,
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 0.3, 0.5
assert close(
cosmo.comoving_transverse_distance_z1z2(z1, z2).to(Mpc).value,
[3535.931375645655, 2226.430046551708, -1208.6817970036532, 2595.567367601969, 151.36592003406884]
)
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 1, 0.2
assert close(
cosmo.comoving_transverse_distance_z1z2(0.1, tensor([0, 0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[-281.31602666724865, 0, 248.58093707820436, 843.9331377460543, 1618.6104987686672, 2287.5626543279927]
)
def test_angular_diameter_distance_z1z2(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
with pytest.raises(RuntimeError):
cosmo.angular_diameter_distance_z1z2(tensor((1, 2)), tensor((3, 4, 5)))
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 646.22968662822018)
assert close(
cosmo.angular_diameter_distance_z1z2(tensor([0, 0, 2, 0.5, 1]), tensor([2, 1, 1, 2.5, 1.1])).to(Mpc).value,
[1760.0628637762106, 1670.7497657219858, -969.34452994, 1159.0970895962193, 115.72768186186921]
)
assert close(
cosmo.angular_diameter_distance_z1z2(0.1, tensor([0.1, 0.2, 0.5, 1.1, 2])).to(Mpc).value,
[0, 332.09893173, 986.35635069, 1508.37010062, 1621.07937976]
)
# Non-flat (positive Ok0) test
cosmo = self.cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Ode0 = H704, 0.2, 0.5
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 620.1175337852428)
# Non-flat (negative Ok0) test
cosmo = self.cosmo_cls()
cosmo.Om0, cosmo.Ode0 = 2, 1
assert close(cosmo.angular_diameter_distance_z1z2(1, 2).to(Mpc).value, 228.42914659246014)
def test_absorption_distance(self):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0 = H704, 0.272
assert close(cosmo.absorption_distance(3), 7.98685853)
assert close(cosmo.absorption_distance(tensor([1, 3])), [1.72576635, 7.98685853])
class BaseLambdaCDMRTest(BaseLambdaCDMDriverTest):
flat_cosmo_cls: Type[phytorch.cosmology.special.FlatLambdaCDMR]
cosmo_cls: Type[phytorch.cosmology.special.LambdaCDMR]
@with_default_double
def test_ogamma(self):
z = tensor([1, 10, 500, 1000])
for Neff, Tcmb0, vals in (
# (3, 0, [1651.9, 858.2, 26.855, 13.642]), # cannot have Or0=0
(3, 2.725, [1651.8, 857.9, 26.767, 13.582]),
(3, 4, [1651.4, 856.6, 26.489, 13.405]),
# (3.04, 0, [1651.91, 858.205, 26.8586, 13.6469]), # cannot have Or0=0
(3.04, 2.725, [1651.76, 857.817, 26.7688, 13.5841]),
(3.04, 4, [1651.21, 856.411, 26.4845, 13.4028]),
):
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Om0, cosmo.Neff, cosmo.Tcmb0 = H70, 0.3, Neff, Tcmb0*kelvin
assert close(cosmo.angular_diameter_distance(z).to(Mpc).value, vals, rtol=5e-4)
# from astropy: Just to be really sure, we also do a version where the
# integral is analytic, which is a Ode = 0 flat universe. In this case
# Integrate(1/E(x),{x,0,z}) = 2 ( sqrt((1+Or z)/(1+z)) - 1 )/(Or - 1)
# Recall that c/H0 * Integrate(1/E) is FLRW.comoving_distance.
hubdis = (299792.458 / 70.0)
Neff = 3.04
for Tcmb0 in (2.725, 5):
Ogamma0h2 = 4 * 5.670373e-8 / 299792458**3 * Tcmb0**4 / 1.87837e-26
Onu0h2 = Ogamma0h2 * 7/8 * (4 / 11)**(4/3) * Neff
Or0 = (Ogamma0h2 + Onu0h2) / 0.7**2
vals = 2 * hubdis * (((1 + Or0*z) / (1+z))**0.5 - 1) / (Or0 - 1)
cosmo = self.flat_cosmo_cls()
cosmo.H0, cosmo.Neff, cosmo.Tcmb0, cosmo.Ode0 = H70, Neff, Tcmb0 * kelvin, 0
assert close(cosmo.comoving_distance(z).to(Mpc).value, vals)
class TestAnalyticLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDM
class TestAnalyticCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic.LambdaCDMR
class TestAnalyticDiffLambdaCDM(BaseLambdaCDMTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDM
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDM
class TestAnalyticDiffCDMR(BaseLambdaCDMRTest):
flat_cosmo_cls = phytorch.cosmology.drivers.analytic_diff.FlatLambdaCDMR
cosmo_cls = phytorch.cosmology.drivers.analytic_diff.LambdaCDMR
# TODO: (age...) test_age
# TODO: (age...) test_age_in_special_cosmologies
# TODO: (neutrinos, weird models...) test_distances
| true
| true
|
f7077aff83afe4368eaf95ebf3fc072b46c40a11
| 3,765
|
py
|
Python
|
src/selectedtests/datasource/datasource_cli.py
|
isabella232/selected-tests
|
890cd5f39f5571d50f0406b4c25a1a2eef1006a3
|
[
"Apache-2.0"
] | 2
|
2020-04-13T11:26:57.000Z
|
2022-01-21T00:03:52.000Z
|
src/selectedtests/datasource/datasource_cli.py
|
mongodb/selected-tests
|
467f71f1d45b06ac3cc5db252f18658f8cd93083
|
[
"Apache-2.0"
] | 54
|
2019-09-26T18:56:34.000Z
|
2022-03-12T01:07:00.000Z
|
src/selectedtests/datasource/datasource_cli.py
|
isabella232/selected-tests
|
890cd5f39f5571d50f0406b4c25a1a2eef1006a3
|
[
"Apache-2.0"
] | 6
|
2019-10-01T14:24:27.000Z
|
2020-02-13T15:53:47.000Z
|
"""Cli entry point to setup db indexes."""
import click
import structlog
from click import Context
from miscutils.logging_config import Verbosity
from pymongo import ASCENDING, IndexModel
from pymongo.collection import Collection
from selectedtests.config.logging_config import config_logging
from selectedtests.datasource.mongo_wrapper import MongoWrapper
LOGGER = structlog.get_logger()
def setup_queue_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for ProjectTestMappingWorkItems.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("project", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the test and task mappings collections.
:param collection: Collection to add indexes to.
"""
# project, source_file on it's own could be unique, but the repo and branch are needed when
# there is a module.
index = IndexModel(
[
("project", ASCENDING),
("repo", ASCENDING),
("branch", ASCENDING),
("source_file", ASCENDING),
],
unique=True,
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_tasks_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping tasks collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel(
[("task_mapping_id", ASCENDING), ("name", ASCENDING), ("variant", ASCENDING)], unique=True
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_test_files_indexes(collection: Collection) -> None:
"""
Create appropriate indexes for the mapping test files collection.
The indexes must support both the $lookup operation and uniqueness constraints.
:param collection: Collection to add indexes to.
"""
index = IndexModel([("test_mapping_id", ASCENDING), ("name", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
@click.group()
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
@click.option("--mongo-uri", required=True, type=str, help="Mongo URI to connect to.")
@click.pass_context
def cli(ctx: Context, verbose: bool, mongo_uri: str) -> None:
"""Suite of MongoDB related commands, see the commands help for more details."""
ctx.ensure_object(dict)
ctx.obj["mongo"] = MongoWrapper.connect(mongo_uri)
verbosity = Verbosity.DEBUG if verbose else Verbosity.INFO
config_logging(verbosity, human_readable=False)
@cli.command()
@click.pass_context
def create_indexes(ctx: Context) -> None:
"""Initialize the mongo database with proper indexes."""
# Creating index no-ops if index already exists
setup_queue_indexes(ctx.obj["mongo"].test_mappings_queue())
setup_queue_indexes(ctx.obj["mongo"].task_mappings_queue())
setup_mappings_indexes(ctx.obj["mongo"].test_mappings())
setup_mappings_indexes(ctx.obj["mongo"].task_mappings())
setup_mappings_test_files_indexes(ctx.obj["mongo"].test_mappings_test_files())
setup_mappings_tasks_indexes(ctx.obj["mongo"].task_mappings_tasks())
def main() -> None:
"""Entry point for setting up selected-tests db indexes."""
return cli(obj={}, auto_envvar_prefix="SELECTED_TESTS")
| 35.186916
| 98
| 0.724037
|
import click
import structlog
from click import Context
from miscutils.logging_config import Verbosity
from pymongo import ASCENDING, IndexModel
from pymongo.collection import Collection
from selectedtests.config.logging_config import config_logging
from selectedtests.datasource.mongo_wrapper import MongoWrapper
LOGGER = structlog.get_logger()
def setup_queue_indexes(collection: Collection) -> None:
index = IndexModel([("project", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_indexes(collection: Collection) -> None:
# there is a module.
index = IndexModel(
[
("project", ASCENDING),
("repo", ASCENDING),
("branch", ASCENDING),
("source_file", ASCENDING),
],
unique=True,
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_tasks_indexes(collection: Collection) -> None:
index = IndexModel(
[("task_mapping_id", ASCENDING), ("name", ASCENDING), ("variant", ASCENDING)], unique=True
)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
def setup_mappings_test_files_indexes(collection: Collection) -> None:
index = IndexModel([("test_mapping_id", ASCENDING), ("name", ASCENDING)], unique=True)
collection.create_indexes([index])
LOGGER.info("Adding indexes for collection", collection=collection.name)
@click.group()
@click.option("--verbose", is_flag=True, default=False, help="Enable verbose logging.")
@click.option("--mongo-uri", required=True, type=str, help="Mongo URI to connect to.")
@click.pass_context
def cli(ctx: Context, verbose: bool, mongo_uri: str) -> None:
ctx.ensure_object(dict)
ctx.obj["mongo"] = MongoWrapper.connect(mongo_uri)
verbosity = Verbosity.DEBUG if verbose else Verbosity.INFO
config_logging(verbosity, human_readable=False)
@cli.command()
@click.pass_context
def create_indexes(ctx: Context) -> None:
# Creating index no-ops if index already exists
setup_queue_indexes(ctx.obj["mongo"].test_mappings_queue())
setup_queue_indexes(ctx.obj["mongo"].task_mappings_queue())
setup_mappings_indexes(ctx.obj["mongo"].test_mappings())
setup_mappings_indexes(ctx.obj["mongo"].task_mappings())
setup_mappings_test_files_indexes(ctx.obj["mongo"].test_mappings_test_files())
setup_mappings_tasks_indexes(ctx.obj["mongo"].task_mappings_tasks())
def main() -> None:
return cli(obj={}, auto_envvar_prefix="SELECTED_TESTS")
| true
| true
|
f7077b3a0499cf10523ce9abd87eec6b3738fc49
| 11,466
|
py
|
Python
|
sdk/python/pulumi_azure_native/network/v20200701/route.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200701/route.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/network/v20200701/route.py
|
pulumi-bot/pulumi-azure-native
|
f7b9490b5211544318e455e5cceafe47b628e12c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['Route']
class Route(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
next_hop_ip_address: Optional[pulumi.Input[str]] = None,
next_hop_type: Optional[pulumi.Input[Union[str, 'RouteNextHopType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Route resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] address_prefix: The destination CIDR to which the route applies.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] name: The name of the resource that is unique within a resource group. This name can be used to access the resource.
:param pulumi.Input[str] next_hop_ip_address: The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
:param pulumi.Input[Union[str, 'RouteNextHopType']] next_hop_type: The type of Azure hop the packet should be sent to.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[str] route_name: The name of the route.
:param pulumi.Input[str] route_table_name: The name of the route table.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_prefix'] = address_prefix
__props__['id'] = id
__props__['name'] = name
__props__['next_hop_ip_address'] = next_hop_ip_address
if next_hop_type is None and not opts.urn:
raise TypeError("Missing required property 'next_hop_type'")
__props__['next_hop_type'] = next_hop_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_name'] = route_name
if route_table_name is None and not opts.urn:
raise TypeError("Missing required property 'route_table_name'")
__props__['route_table_name'] = route_table_name
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200701:Route"), pulumi.Alias(type_="azure-native:network:Route"), pulumi.Alias(type_="azure-nextgen:network:Route"), pulumi.Alias(type_="azure-native:network/latest:Route"), pulumi.Alias(type_="azure-nextgen:network/latest:Route"), pulumi.Alias(type_="azure-native:network/v20150501preview:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Route"), pulumi.Alias(type_="azure-native:network/v20150615:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Route"), pulumi.Alias(type_="azure-native:network/v20160330:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Route"), pulumi.Alias(type_="azure-native:network/v20160601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Route"), pulumi.Alias(type_="azure-native:network/v20160901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Route"), pulumi.Alias(type_="azure-native:network/v20161201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Route"), pulumi.Alias(type_="azure-native:network/v20170301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Route"), pulumi.Alias(type_="azure-native:network/v20170601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Route"), pulumi.Alias(type_="azure-native:network/v20170801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Route"), pulumi.Alias(type_="azure-native:network/v20170901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Route"), pulumi.Alias(type_="azure-native:network/v20171001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Route"), pulumi.Alias(type_="azure-native:network/v20171101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Route"), pulumi.Alias(type_="azure-native:network/v20180101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Route"), pulumi.Alias(type_="azure-native:network/v20180201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Route"), pulumi.Alias(type_="azure-native:network/v20180401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Route"), pulumi.Alias(type_="azure-native:network/v20180601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Route"), pulumi.Alias(type_="azure-native:network/v20180701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Route"), pulumi.Alias(type_="azure-native:network/v20180801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Route"), pulumi.Alias(type_="azure-native:network/v20181001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Route"), pulumi.Alias(type_="azure-native:network/v20181101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Route"), pulumi.Alias(type_="azure-native:network/v20181201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Route"), pulumi.Alias(type_="azure-native:network/v20190201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Route"), pulumi.Alias(type_="azure-native:network/v20190401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Route"), pulumi.Alias(type_="azure-native:network/v20190601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Route"), pulumi.Alias(type_="azure-native:network/v20190701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Route"), pulumi.Alias(type_="azure-native:network/v20190801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Route"), pulumi.Alias(type_="azure-native:network/v20190901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Route"), pulumi.Alias(type_="azure-native:network/v20191101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Route"), pulumi.Alias(type_="azure-native:network/v20191201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Route"), pulumi.Alias(type_="azure-native:network/v20200301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Route"), pulumi.Alias(type_="azure-native:network/v20200401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200401:Route"), pulumi.Alias(type_="azure-native:network/v20200501:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Route"), pulumi.Alias(type_="azure-native:network/v20200601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Route"), pulumi.Alias(type_="azure-native:network/v20200801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Route")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Route, __self__).__init__(
'azure-native:network/v20200701:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Route':
"""
Get an existing Route resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_prefix"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["next_hop_ip_address"] = None
__props__["next_hop_type"] = None
__props__["provisioning_state"] = None
return Route(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
"""
The destination CIDR to which the route applies.
"""
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
"""
The name of the resource that is unique within a resource group. This name can be used to access the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
"""
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> pulumi.Output[str]:
"""
The type of Azure hop the packet should be sent to.
"""
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the route resource.
"""
return pulumi.get(self, "provisioning_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 70.343558
| 4,458
| 0.703646
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from ._enums import *
__all__ = ['Route']
class Route(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_prefix: Optional[pulumi.Input[str]] = None,
id: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
next_hop_ip_address: Optional[pulumi.Input[str]] = None,
next_hop_type: Optional[pulumi.Input[Union[str, 'RouteNextHopType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
route_name: Optional[pulumi.Input[str]] = None,
route_table_name: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['address_prefix'] = address_prefix
__props__['id'] = id
__props__['name'] = name
__props__['next_hop_ip_address'] = next_hop_ip_address
if next_hop_type is None and not opts.urn:
raise TypeError("Missing required property 'next_hop_type'")
__props__['next_hop_type'] = next_hop_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['route_name'] = route_name
if route_table_name is None and not opts.urn:
raise TypeError("Missing required property 'route_table_name'")
__props__['route_table_name'] = route_table_name
__props__['etag'] = None
__props__['provisioning_state'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200701:Route"), pulumi.Alias(type_="azure-native:network:Route"), pulumi.Alias(type_="azure-nextgen:network:Route"), pulumi.Alias(type_="azure-native:network/latest:Route"), pulumi.Alias(type_="azure-nextgen:network/latest:Route"), pulumi.Alias(type_="azure-native:network/v20150501preview:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150501preview:Route"), pulumi.Alias(type_="azure-native:network/v20150615:Route"), pulumi.Alias(type_="azure-nextgen:network/v20150615:Route"), pulumi.Alias(type_="azure-native:network/v20160330:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160330:Route"), pulumi.Alias(type_="azure-native:network/v20160601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160601:Route"), pulumi.Alias(type_="azure-native:network/v20160901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20160901:Route"), pulumi.Alias(type_="azure-native:network/v20161201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20161201:Route"), pulumi.Alias(type_="azure-native:network/v20170301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170301:Route"), pulumi.Alias(type_="azure-native:network/v20170601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170601:Route"), pulumi.Alias(type_="azure-native:network/v20170801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170801:Route"), pulumi.Alias(type_="azure-native:network/v20170901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20170901:Route"), pulumi.Alias(type_="azure-native:network/v20171001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171001:Route"), pulumi.Alias(type_="azure-native:network/v20171101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20171101:Route"), pulumi.Alias(type_="azure-native:network/v20180101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180101:Route"), pulumi.Alias(type_="azure-native:network/v20180201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180201:Route"), pulumi.Alias(type_="azure-native:network/v20180401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180401:Route"), pulumi.Alias(type_="azure-native:network/v20180601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180601:Route"), pulumi.Alias(type_="azure-native:network/v20180701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180701:Route"), pulumi.Alias(type_="azure-native:network/v20180801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20180801:Route"), pulumi.Alias(type_="azure-native:network/v20181001:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181001:Route"), pulumi.Alias(type_="azure-native:network/v20181101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181101:Route"), pulumi.Alias(type_="azure-native:network/v20181201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20181201:Route"), pulumi.Alias(type_="azure-native:network/v20190201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190201:Route"), pulumi.Alias(type_="azure-native:network/v20190401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190401:Route"), pulumi.Alias(type_="azure-native:network/v20190601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190601:Route"), pulumi.Alias(type_="azure-native:network/v20190701:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190701:Route"), pulumi.Alias(type_="azure-native:network/v20190801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190801:Route"), pulumi.Alias(type_="azure-native:network/v20190901:Route"), pulumi.Alias(type_="azure-nextgen:network/v20190901:Route"), pulumi.Alias(type_="azure-native:network/v20191101:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191101:Route"), pulumi.Alias(type_="azure-native:network/v20191201:Route"), pulumi.Alias(type_="azure-nextgen:network/v20191201:Route"), pulumi.Alias(type_="azure-native:network/v20200301:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200301:Route"), pulumi.Alias(type_="azure-native:network/v20200401:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200401:Route"), pulumi.Alias(type_="azure-native:network/v20200501:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200501:Route"), pulumi.Alias(type_="azure-native:network/v20200601:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200601:Route"), pulumi.Alias(type_="azure-native:network/v20200801:Route"), pulumi.Alias(type_="azure-nextgen:network/v20200801:Route")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Route, __self__).__init__(
'azure-native:network/v20200701:Route',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Route':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["address_prefix"] = None
__props__["etag"] = None
__props__["name"] = None
__props__["next_hop_ip_address"] = None
__props__["next_hop_type"] = None
__props__["provisioning_state"] = None
return Route(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressPrefix")
def address_prefix(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "address_prefix")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nextHopIpAddress")
def next_hop_ip_address(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "next_hop_ip_address")
@property
@pulumi.getter(name="nextHopType")
def next_hop_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "next_hop_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
f7077bba967cf6529a48e062c8de879f6a9a44de
| 10,325
|
py
|
Python
|
tensorflow_gan/examples/progressive_gan/networks_test.py
|
Ankuraxz/gan
|
b956c7d571539fd1053b3df3dddddbcbd27be65c
|
[
"Apache-2.0"
] | 2
|
2021-03-29T04:39:29.000Z
|
2021-05-14T05:51:40.000Z
|
tensorflow_gan/examples/progressive_gan/networks_test.py
|
Ankuraxz/gan
|
b956c7d571539fd1053b3df3dddddbcbd27be65c
|
[
"Apache-2.0"
] | 2
|
2020-08-18T20:47:45.000Z
|
2020-08-19T18:38:39.000Z
|
tensorflow_gan/examples/progressive_gan/networks_test.py
|
Ankuraxz/gan
|
b956c7d571539fd1053b3df3dddddbcbd27be65c
|
[
"Apache-2.0"
] | 1
|
2020-08-18T16:12:39.000Z
|
2020-08-18T16:12:39.000Z
|
# coding=utf-8
# Copyright 2020 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python2 python3
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples.progressive_gan import layers
from tensorflow_gan.examples.progressive_gan import networks
def _get_grad_norm(ys, xs):
"""Compute 2-norm of dys / dxs."""
return tf.sqrt(
tf.add_n([
tf.reduce_sum(input_tensor=tf.square(g))
for g in tf.gradients(ys=ys, xs=xs)
]))
def _num_filters_stub(block_id):
return networks.num_filters(block_id, 8, 1, 8)
class NetworksTest(tf.test.TestCase):
def test_resolution_schedule_correct(self):
rs = networks.ResolutionSchedule(
start_resolutions=[5, 3], scale_base=2, num_resolutions=3)
self.assertEqual(rs.start_resolutions, (5, 3))
self.assertEqual(rs.scale_base, 2)
self.assertEqual(rs.num_resolutions, 3)
self.assertEqual(rs.final_resolutions, (20, 12))
self.assertEqual(rs.scale_factor(1), 4)
self.assertEqual(rs.scale_factor(2), 2)
self.assertEqual(rs.scale_factor(3), 1)
with self.assertRaises(ValueError):
rs.scale_factor(0)
with self.assertRaises(ValueError):
rs.scale_factor(4)
def test_block_name(self):
self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')
def test_min_total_num_images(self):
self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)
def test_compute_progress(self):
if tf.executing_eagerly():
progress_output = []
for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]:
progress = networks.compute_progress(
current_image_id,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output.append(sess.run(progress))
else:
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output = [
sess.run(progress, feed_dict={current_image_id_ph: cur_image_id})
for cur_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]
]
self.assertArrayNear(progress_output,
[0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],
1.0e-6)
def test_generator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [
sess.run(
networks._generator_alpha(2, tf.constant(progress, tf.float32)))
for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]
]
alpha_fixed_progress = [
sess.run(
networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))
for block_id in range(1, 5)
]
self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],
1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)
def test_discriminator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [sess.run(networks._discriminator_alpha(
2, tf.constant(progress, tf.float32))) for progress in
[0, 0.2, 1, 1.2, 2, 2.2, 3]]
alpha_fixed_progress = [sess.run(networks._discriminator_alpha(
block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5)]
self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)
def test_blend_images_in_stable_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
progress=tf.constant(0.0),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_blend_images_in_transition_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
tf.constant(0.2),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_num_filters(self):
self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)
self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)
def test_generator_grad_norm_progress(self):
if tf.executing_eagerly():
# tf.placeholder() is not compatible with eager execution.
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
z = tf.random.normal([2, 10], dtype=tf.float32)
x, _ = networks.generator(
z, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(x))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})
x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15) # total num of images
])
self.assertEqual((2, 16, 16, 3), x1_np.shape)
self.assertEqual((2, 16, 16, 3), x2_np.shape)
# The gradient of block_1 is always on.
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
# The gradient of block_2 is on after 1 stable stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
# The gradient of block_3 is on after 2 stable stage + 1 transition stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
def test_discriminator_grad_norm_progress(self):
if tf.executing_eagerly():
# tf.placeholder() is not compatible with eager execution.
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
x = tf.random.normal([2, 16, 16, 3])
logits, _ = networks.discriminator(
x, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(logits))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15) # total num of images
])
# The gradient of block_1 is always on.
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
# The gradient of block_2 is on after 1 stable stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
# The gradient of block_3 is on after 2 stable stage + 1 transition stage.
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
if __name__ == '__main__':
tf.test.main()
| 38.240741
| 80
| 0.657821
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_gan.examples.progressive_gan import layers
from tensorflow_gan.examples.progressive_gan import networks
def _get_grad_norm(ys, xs):
return tf.sqrt(
tf.add_n([
tf.reduce_sum(input_tensor=tf.square(g))
for g in tf.gradients(ys=ys, xs=xs)
]))
def _num_filters_stub(block_id):
return networks.num_filters(block_id, 8, 1, 8)
class NetworksTest(tf.test.TestCase):
def test_resolution_schedule_correct(self):
rs = networks.ResolutionSchedule(
start_resolutions=[5, 3], scale_base=2, num_resolutions=3)
self.assertEqual(rs.start_resolutions, (5, 3))
self.assertEqual(rs.scale_base, 2)
self.assertEqual(rs.num_resolutions, 3)
self.assertEqual(rs.final_resolutions, (20, 12))
self.assertEqual(rs.scale_factor(1), 4)
self.assertEqual(rs.scale_factor(2), 2)
self.assertEqual(rs.scale_factor(3), 1)
with self.assertRaises(ValueError):
rs.scale_factor(0)
with self.assertRaises(ValueError):
rs.scale_factor(4)
def test_block_name(self):
self.assertEqual(networks.block_name(10), 'progressive_gan_block_10')
def test_min_total_num_images(self):
self.assertEqual(networks.min_total_num_images(7, 8, 4), 52)
def test_compute_progress(self):
if tf.executing_eagerly():
progress_output = []
for current_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]:
progress = networks.compute_progress(
current_image_id,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output.append(sess.run(progress))
else:
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images=7,
transition_stage_num_images=8,
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
progress_output = [
sess.run(progress, feed_dict={current_image_id_ph: cur_image_id})
for cur_image_id in [0, 3, 6, 7, 8, 10, 15, 29, 100]
]
self.assertArrayNear(progress_output,
[0.0, 0.0, 0.0, 0.0, 0.125, 0.375, 1.0, 1.0, 1.0],
1.0e-6)
def test_generator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [
sess.run(
networks._generator_alpha(2, tf.constant(progress, tf.float32)))
for progress in [0, 0.2, 1, 1.2, 2, 2.2, 3]
]
alpha_fixed_progress = [
sess.run(
networks._generator_alpha(block_id, tf.constant(1.2, tf.float32)))
for block_id in range(1, 5)
]
self.assertArrayNear(alpha_fixed_block_id, [0, 0.2, 1, 0.8, 0, 0, 0],
1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 0.2, 0], 1.0e-6)
def test_discriminator_alpha(self):
with self.cached_session(use_gpu=True) as sess:
alpha_fixed_block_id = [sess.run(networks._discriminator_alpha(
2, tf.constant(progress, tf.float32))) for progress in
[0, 0.2, 1, 1.2, 2, 2.2, 3]]
alpha_fixed_progress = [sess.run(networks._discriminator_alpha(
block_id, tf.constant(1.2, tf.float32))) for block_id in range(1, 5)]
self.assertArrayNear(alpha_fixed_block_id, [1, 1, 1, 0.8, 0, 0, 0], 1.0e-6)
self.assertArrayNear(alpha_fixed_progress, [0, 0.8, 1, 1], 1.0e-6)
def test_blend_images_in_stable_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
progress=tf.constant(0.0),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = sess.run(layers.upscale(layers.downscale(x, 2), 2))
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_blend_images_in_transition_stage(self):
x_np = np.random.normal(size=[2, 8, 8, 3])
x = tf.constant(x_np, tf.float32)
x_blend = networks.blend_images(
x,
tf.constant(0.2),
resolution_schedule=networks.ResolutionSchedule(
scale_base=2, num_resolutions=2),
num_blocks=2)
with self.cached_session(use_gpu=True) as sess:
x_blend_np = sess.run(x_blend)
x_blend_expected_np = 0.8 * sess.run(
layers.upscale(layers.downscale(x, 2), 2)) + 0.2 * x_np
self.assertNDArrayNear(x_blend_np, x_blend_expected_np, 1.0e-6)
def test_num_filters(self):
self.assertEqual(networks.num_filters(1, 4096, 1, 256), 256)
self.assertEqual(networks.num_filters(5, 4096, 1, 256), 128)
def test_generator_grad_norm_progress(self):
if tf.executing_eagerly():
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
z = tf.random.normal([2, 10], dtype=tf.float32)
x, _ = networks.generator(
z, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(x))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
x1_np = sess.run(x, feed_dict={current_image_id_ph: 0.12})
x2_np = sess.run(x, feed_dict={current_image_id_ph: 1.8})
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15)
])
self.assertEqual((2, 16, 16, 3), x1_np.shape)
self.assertEqual((2, 16, 16, 3), x2_np.shape)
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
def test_discriminator_grad_norm_progress(self):
if tf.executing_eagerly():
return
stable_stage_num_images = 2
transition_stage_num_images = 3
current_image_id_ph = tf.compat.v1.placeholder(tf.int32, [])
progress = networks.compute_progress(
current_image_id_ph,
stable_stage_num_images,
transition_stage_num_images,
num_blocks=3)
x = tf.random.normal([2, 16, 16, 3])
logits, _ = networks.discriminator(
x, progress, _num_filters_stub,
networks.ResolutionSchedule(
start_resolutions=(4, 4), scale_base=2, num_resolutions=3))
fake_loss = tf.reduce_sum(input_tensor=tf.square(logits))
grad_norms = [
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_1/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_2/.*')),
_get_grad_norm(
fake_loss,
tf.compat.v1.trainable_variables('.*/progressive_gan_block_3/.*'))
]
grad_norms_output = None
with self.cached_session(use_gpu=True) as sess:
sess.run(tf.compat.v1.global_variables_initializer())
grad_norms_output = np.array([
sess.run(grad_norms, feed_dict={current_image_id_ph: i})
for i in range(15)
])
self.assertEqual(
np.argmax(grad_norms_output[:, 0] > 0), 0,
'gradient norms {} for block 1 is not always on'.format(
grad_norms_output[:, 0]))
self.assertEqual(
np.argmax(grad_norms_output[:, 1] > 0), 3,
'gradient norms {} for block 2 is not on at step 3'.format(
grad_norms_output[:, 1]))
self.assertEqual(
np.argmax(grad_norms_output[:, 2] > 0), 8,
'gradient norms {} for block 3 is not on at step 8'.format(
grad_norms_output[:, 2]))
if __name__ == '__main__':
tf.test.main()
| true
| true
|
f7077d34d2c10e6a2d7526ede57dfb4554d44700
| 13,018
|
py
|
Python
|
kubernetes/client/models/extensions_v1beta1_deployment_spec.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 11
|
2020-10-13T05:27:59.000Z
|
2021-09-23T02:56:32.000Z
|
kubernetes/client/models/extensions_v1beta1_deployment_spec.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 48
|
2020-10-15T09:53:36.000Z
|
2021-07-05T15:33:24.000Z
|
kubernetes/client/models/extensions_v1beta1_deployment_spec.py
|
Prahladk09/python-1
|
2dfb3035535e4be52ba549f1ff47acbe573b73f6
|
[
"Apache-2.0"
] | 4
|
2020-12-04T08:51:35.000Z
|
2022-03-27T09:42:20.000Z
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1DeploymentSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'rollback_to': 'ExtensionsV1beta1RollbackConfig',
'selector': 'V1LabelSelector',
'strategy': 'ExtensionsV1beta1DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'rollback_to': 'rollbackTo',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, rollback_to=None, selector=None, strategy=None, template=None):
"""
ExtensionsV1beta1DeploymentSpec - a model defined in Swagger
"""
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._rollback_to = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
if rollback_to is not None:
self.rollback_to = rollback_to
if selector is not None:
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
"""
Gets the min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:return: The min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
"""
Sets the min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)
:param min_ready_seconds: The min_ready_seconds of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
"""
Gets the paused of this ExtensionsV1beta1DeploymentSpec.
Indicates that the deployment is paused and will not be processed by the deployment controller.
:return: The paused of this ExtensionsV1beta1DeploymentSpec.
:rtype: bool
"""
return self._paused
@paused.setter
def paused(self, paused):
"""
Sets the paused of this ExtensionsV1beta1DeploymentSpec.
Indicates that the deployment is paused and will not be processed by the deployment controller.
:param paused: The paused of this ExtensionsV1beta1DeploymentSpec.
:type: bool
"""
self._paused = paused
@property
def progress_deadline_seconds(self):
"""
Gets the progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".
:return: The progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
"""
Sets the progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".
:param progress_deadline_seconds: The progress_deadline_seconds of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
"""
Gets the replicas of this ExtensionsV1beta1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:return: The replicas of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._replicas
@replicas.setter
def replicas(self, replicas):
"""
Sets the replicas of this ExtensionsV1beta1DeploymentSpec.
Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.
:param replicas: The replicas of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._replicas = replicas
@property
def revision_history_limit(self):
"""
Gets the revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".
:return: The revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
:rtype: int
"""
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
"""
Sets the revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".
:param revision_history_limit: The revision_history_limit of this ExtensionsV1beta1DeploymentSpec.
:type: int
"""
self._revision_history_limit = revision_history_limit
@property
def rollback_to(self):
"""
Gets the rollback_to of this ExtensionsV1beta1DeploymentSpec.
DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.
:return: The rollback_to of this ExtensionsV1beta1DeploymentSpec.
:rtype: ExtensionsV1beta1RollbackConfig
"""
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rollback_to):
"""
Sets the rollback_to of this ExtensionsV1beta1DeploymentSpec.
DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.
:param rollback_to: The rollback_to of this ExtensionsV1beta1DeploymentSpec.
:type: ExtensionsV1beta1RollbackConfig
"""
self._rollback_to = rollback_to
@property
def selector(self):
"""
Gets the selector of this ExtensionsV1beta1DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:return: The selector of this ExtensionsV1beta1DeploymentSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this ExtensionsV1beta1DeploymentSpec.
Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.
:param selector: The selector of this ExtensionsV1beta1DeploymentSpec.
:type: V1LabelSelector
"""
self._selector = selector
@property
def strategy(self):
"""
Gets the strategy of this ExtensionsV1beta1DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:return: The strategy of this ExtensionsV1beta1DeploymentSpec.
:rtype: ExtensionsV1beta1DeploymentStrategy
"""
return self._strategy
@strategy.setter
def strategy(self, strategy):
"""
Sets the strategy of this ExtensionsV1beta1DeploymentSpec.
The deployment strategy to use to replace existing pods with new ones.
:param strategy: The strategy of this ExtensionsV1beta1DeploymentSpec.
:type: ExtensionsV1beta1DeploymentStrategy
"""
self._strategy = strategy
@property
def template(self):
"""
Gets the template of this ExtensionsV1beta1DeploymentSpec.
Template describes the pods that will be created.
:return: The template of this ExtensionsV1beta1DeploymentSpec.
:rtype: V1PodTemplateSpec
"""
return self._template
@template.setter
def template(self, template):
"""
Sets the template of this ExtensionsV1beta1DeploymentSpec.
Template describes the pods that will be created.
:param template: The template of this ExtensionsV1beta1DeploymentSpec.
:type: V1PodTemplateSpec
"""
if template is None:
raise ValueError("Invalid value for `template`, must not be `None`")
self._template = template
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ExtensionsV1beta1DeploymentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 36.982955
| 453
| 0.666308
|
from pprint import pformat
from six import iteritems
import re
class ExtensionsV1beta1DeploymentSpec(object):
swagger_types = {
'min_ready_seconds': 'int',
'paused': 'bool',
'progress_deadline_seconds': 'int',
'replicas': 'int',
'revision_history_limit': 'int',
'rollback_to': 'ExtensionsV1beta1RollbackConfig',
'selector': 'V1LabelSelector',
'strategy': 'ExtensionsV1beta1DeploymentStrategy',
'template': 'V1PodTemplateSpec'
}
attribute_map = {
'min_ready_seconds': 'minReadySeconds',
'paused': 'paused',
'progress_deadline_seconds': 'progressDeadlineSeconds',
'replicas': 'replicas',
'revision_history_limit': 'revisionHistoryLimit',
'rollback_to': 'rollbackTo',
'selector': 'selector',
'strategy': 'strategy',
'template': 'template'
}
def __init__(self, min_ready_seconds=None, paused=None, progress_deadline_seconds=None, replicas=None, revision_history_limit=None, rollback_to=None, selector=None, strategy=None, template=None):
self._min_ready_seconds = None
self._paused = None
self._progress_deadline_seconds = None
self._replicas = None
self._revision_history_limit = None
self._rollback_to = None
self._selector = None
self._strategy = None
self._template = None
self.discriminator = None
if min_ready_seconds is not None:
self.min_ready_seconds = min_ready_seconds
if paused is not None:
self.paused = paused
if progress_deadline_seconds is not None:
self.progress_deadline_seconds = progress_deadline_seconds
if replicas is not None:
self.replicas = replicas
if revision_history_limit is not None:
self.revision_history_limit = revision_history_limit
if rollback_to is not None:
self.rollback_to = rollback_to
if selector is not None:
self.selector = selector
if strategy is not None:
self.strategy = strategy
self.template = template
@property
def min_ready_seconds(self):
return self._min_ready_seconds
@min_ready_seconds.setter
def min_ready_seconds(self, min_ready_seconds):
self._min_ready_seconds = min_ready_seconds
@property
def paused(self):
return self._paused
@paused.setter
def paused(self, paused):
self._paused = paused
@property
def progress_deadline_seconds(self):
return self._progress_deadline_seconds
@progress_deadline_seconds.setter
def progress_deadline_seconds(self, progress_deadline_seconds):
self._progress_deadline_seconds = progress_deadline_seconds
@property
def replicas(self):
return self._replicas
@replicas.setter
def replicas(self, replicas):
self._replicas = replicas
@property
def revision_history_limit(self):
return self._revision_history_limit
@revision_history_limit.setter
def revision_history_limit(self, revision_history_limit):
self._revision_history_limit = revision_history_limit
@property
def rollback_to(self):
return self._rollback_to
@rollback_to.setter
def rollback_to(self, rollback_to):
self._rollback_to = rollback_to
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, selector):
self._selector = selector
@property
def strategy(self):
return self._strategy
@strategy.setter
def strategy(self, strategy):
self._strategy = strategy
@property
def template(self):
return self._template
@template.setter
def template(self, template):
if template is None:
raise ValueError("Invalid value for `template`, must not be `None`")
self._template = template
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ExtensionsV1beta1DeploymentSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
f7077db102990be3657c8aaccdcd149bcdc1e042
| 978
|
py
|
Python
|
nomadgram/users/migrations/0002_auto_20171213_2310.py
|
BryanSHRyu/nomadgram
|
5ee49d1f384d5cc7fc9fc57d8518e9762019ee15
|
[
"MIT"
] | null | null | null |
nomadgram/users/migrations/0002_auto_20171213_2310.py
|
BryanSHRyu/nomadgram
|
5ee49d1f384d5cc7fc9fc57d8518e9762019ee15
|
[
"MIT"
] | 18
|
2020-06-05T16:48:54.000Z
|
2022-03-08T22:48:59.000Z
|
nomadgram/users/migrations/0002_auto_20171213_2310.py
|
bshryu/nomadgram
|
5ee49d1f384d5cc7fc9fc57d8518e9762019ee15
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2017-12-13 14:10
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not specified')], max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(max_length=140, null=True),
),
migrations.AddField(
model_name='user',
name='website',
field=models.URLField(null=True),
),
]
| 27.166667
| 147
| 0.551125
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='bio',
field=models.TextField(null=True),
),
migrations.AddField(
model_name='user',
name='gender',
field=models.CharField(choices=[('male', 'Male'), ('female', 'Female'), ('not-specified', 'Not specified')], max_length=80, null=True),
),
migrations.AddField(
model_name='user',
name='phone',
field=models.CharField(max_length=140, null=True),
),
migrations.AddField(
model_name='user',
name='website',
field=models.URLField(null=True),
),
]
| true
| true
|
f7077dd0a24962ac99cf887a355fd7e25b0c8fe9
| 475
|
py
|
Python
|
polityper/wsgi.py
|
kubasikora/WPAM-Projekt-Backend
|
09419b9d3b881c8e741ec86e985db0740f28c0c4
|
[
"MIT"
] | 1
|
2020-11-06T18:25:20.000Z
|
2020-11-06T18:25:20.000Z
|
polityper/wsgi.py
|
kubasikora/WPAM-Projekt-Backend
|
09419b9d3b881c8e741ec86e985db0740f28c0c4
|
[
"MIT"
] | 20
|
2020-11-07T00:10:16.000Z
|
2021-03-25T15:04:12.000Z
|
polityper/wsgi.py
|
kubasikora/WPAM-Projekt-Backend
|
09419b9d3b881c8e741ec86e985db0740f28c0c4
|
[
"MIT"
] | null | null | null |
"""
WSGI config for polityper project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from dotenv import load_dotenv
from django.core.wsgi import get_wsgi_application
load_dotenv(dotenv_path="prod.env", verbose=True)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'polityper.settings')
application = get_wsgi_application()
| 26.388889
| 78
| 0.795789
|
import os
from dotenv import load_dotenv
from django.core.wsgi import get_wsgi_application
load_dotenv(dotenv_path="prod.env", verbose=True)
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'polityper.settings')
application = get_wsgi_application()
| true
| true
|
f7077e6a47717ee76b83b0cd1f2ab5b0bc01b6e4
| 1,063
|
py
|
Python
|
assistant/products/urls.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
assistant/products/urls.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
assistant/products/urls.py
|
kapiak/ware_prod
|
ae61256890834c434d2e38cc2ccacf00b638665a
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
product_orders_modal,
make_product_purchase,
allocate_product_to_order,
receive_product_stock,
ProductListView,
product_add_to_purchase,
product_search,
)
app_name = "products"
urlpatterns = [
path('', ProductListView.as_view(), name="product-list"),
path('search/', product_search, name="product-search"),
path(
'add-to-purchase/<uuid:guid>/',
product_add_to_purchase,
name="product-add-to-purchase",
),
path(
"product-orders-modal/<uuid:guid>/",
product_orders_modal,
name="product_orders_modal_workflow",
),
path(
"products/make-purchase/<uuid:guid>",
make_product_purchase,
name="make_product_purchase",
),
path(
"products/allocation/<uuid:guid>/",
allocate_product_to_order,
name="allocate_product_to_order",
),
path(
"products/receive/<uuid:guid>/",
receive_product_stock,
name="receive_product_stock",
),
]
| 24.159091
| 61
| 0.636877
|
from django.urls import path
from .views import (
product_orders_modal,
make_product_purchase,
allocate_product_to_order,
receive_product_stock,
ProductListView,
product_add_to_purchase,
product_search,
)
app_name = "products"
urlpatterns = [
path('', ProductListView.as_view(), name="product-list"),
path('search/', product_search, name="product-search"),
path(
'add-to-purchase/<uuid:guid>/',
product_add_to_purchase,
name="product-add-to-purchase",
),
path(
"product-orders-modal/<uuid:guid>/",
product_orders_modal,
name="product_orders_modal_workflow",
),
path(
"products/make-purchase/<uuid:guid>",
make_product_purchase,
name="make_product_purchase",
),
path(
"products/allocation/<uuid:guid>/",
allocate_product_to_order,
name="allocate_product_to_order",
),
path(
"products/receive/<uuid:guid>/",
receive_product_stock,
name="receive_product_stock",
),
]
| true
| true
|
f7077eff780853479ec63093b2da83cd37972231
| 1,645
|
py
|
Python
|
package/spack-perl-file-sharedir-install/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | 1
|
2018-07-17T07:45:09.000Z
|
2018-07-17T07:45:09.000Z
|
package/spack-perl-file-sharedir-install/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
package/spack-perl-file-sharedir-install/package.py
|
ctuning/ck-spack
|
307934efce1be2d4f104251275c82fbc70127105
|
[
"BSD-3-Clause"
] | null | null | null |
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PerlFileSharedirInstall(PerlPackage):
"""Install shared files"""
homepage = "http://search.cpan.org/~ether/File-ShareDir-Install-0.11/lib/File/ShareDir/Install.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ET/ETHER/File-ShareDir-Install-0.11.tar.gz"
version('0.11', '61107e6ce6eee42bf29525b1a4d029e0')
depends_on('perl-module-build', type='build')
| 44.459459
| 102
| 0.682067
| true
| true
|
|
f7077f937e44a1c495e4cf9ba12d72a24f64ccef
| 963
|
py
|
Python
|
src/spacemapping_curve/combinations.py
|
wenqian157/spacemapping_curve
|
6acceb003dfd78b144f80fa8ba6b96956ba6b330
|
[
"MIT"
] | null | null | null |
src/spacemapping_curve/combinations.py
|
wenqian157/spacemapping_curve
|
6acceb003dfd78b144f80fa8ba6b96956ba6b330
|
[
"MIT"
] | null | null | null |
src/spacemapping_curve/combinations.py
|
wenqian157/spacemapping_curve
|
6acceb003dfd78b144f80fa8ba6b96956ba6b330
|
[
"MIT"
] | null | null | null |
class BooleanData:
def __init__(self, objects):
self.objects = list(objects)
@ property
def head(self):
return(self.objects[0])
@ property
def tail(self):
if len(self.objects) > 1:
return self.objects[1:]
else:
return []
class BooleanUnion(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return min(loc_diss)
class BooleanDifference(BooleanData):
def get_distance(self, pt):
loc_diss = [self.head.get_distance(pt)]
for loc_obj in self.tail:
loc_diss.append(- loc_obj.get_distance(pt))
return max(loc_diss)
class BooleanIntersection(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return max(loc_diss)
| 24.692308
| 55
| 0.610592
|
class BooleanData:
def __init__(self, objects):
self.objects = list(objects)
@ property
def head(self):
return(self.objects[0])
@ property
def tail(self):
if len(self.objects) > 1:
return self.objects[1:]
else:
return []
class BooleanUnion(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return min(loc_diss)
class BooleanDifference(BooleanData):
def get_distance(self, pt):
loc_diss = [self.head.get_distance(pt)]
for loc_obj in self.tail:
loc_diss.append(- loc_obj.get_distance(pt))
return max(loc_diss)
class BooleanIntersection(BooleanData):
def get_distance(self, pt):
loc_diss = []
for loc_obj in self.objects:
loc_diss.append(loc_obj.get_distance(pt))
return max(loc_diss)
| true
| true
|
f7077fba0abe383ff9dbf4784a50f8fffffe4ad7
| 666
|
py
|
Python
|
ansible-devel/test/integration/targets/cli/test_k_and_K.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/integration/targets/cli/test_k_and_K.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
ansible-devel/test/integration/targets/cli/test_k_and_K.py
|
satishcarya/ansible
|
ed091e174c26316f621ac16344a95c99f56bdc43
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import pexpect
os.environ['ANSIBLE_NOCOLOR'] = '1'
out = pexpect.run(
'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} '
'-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]),
events={
'SSH password:': 'secretpassword\n',
'BECOME password': 'secretpassword\n',
},
timeout=10
)
print(out)
assert b'cliuser2' in out
| 23.785714
| 92
| 0.696697
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import sys
import pexpect
os.environ['ANSIBLE_NOCOLOR'] = '1'
out = pexpect.run(
'ansible -c ssh -i localhost, -u cliuser1 -e ansible_python_interpreter={0} '
'-m command -a whoami -Kkb --become-user cliuser2 localhost'.format(sys.argv[1]),
events={
'SSH password:': 'secretpassword\n',
'BECOME password': 'secretpassword\n',
},
timeout=10
)
print(out)
assert b'cliuser2' in out
| true
| true
|
f707802812c78956b7db89420facbe5e35dccd50
| 1,373
|
py
|
Python
|
parsers/disk.py
|
jazevedo620/wise-kubernetes
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | 1
|
2020-03-13T06:10:18.000Z
|
2020-03-13T06:10:18.000Z
|
parsers/disk.py
|
elba-kubernetes/experiment
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | 1
|
2020-09-18T20:14:38.000Z
|
2020-09-18T20:14:38.000Z
|
parsers/disk.py
|
elba-kubernetes/experiment
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | null | null | null |
import datetime
import sys
from collections import OrderedDict
class DiskEntry:
"""A disk entry."""
def __init__(self, read_in_kb, write_in_kb, timestamp):
"""Initialize a DiskEntry."""
self._read_in_kb = read_in_kb
self._write_in_kb = write_in_kb
self._timestamp = timestamp
def read_in_kb(self):
return self._read_in_kb
def write_in_kb(self):
return self._write_in_kb
def timestamp(self):
return self._timestamp
def main(iterator):
# List of timestamps and DiskEntry.
timestamps = []
disk_entries = OrderedDict()
# Process disk raw file.
for disk_line in iterator:
# Check if it is a comment.
if disk_line[0] == '#':
continue
disk_entry_data = disk_line.split()
timestamp = datetime.datetime.strptime(disk_entry_data[1], "%H:%M:%S.%f")
total_read_in_kb = 0
total_write_in_kb = 0
for disk_no in range((len(disk_entry_data) - 2) // 14):
total_read_in_kb += int(disk_entry_data[disk_no * 14 + 5])
total_write_in_kb += int(disk_entry_data[disk_no * 14 + 9])
# if len(disk_entries) < disk_no + 1:
# disk_entries.append(OrderedDict())
disk_entries[timestamp] = DiskEntry(total_read_in_kb, total_write_in_kb, timestamp)
return disk_entries
| 31.930233
| 91
| 0.638019
|
import datetime
import sys
from collections import OrderedDict
class DiskEntry:
def __init__(self, read_in_kb, write_in_kb, timestamp):
self._read_in_kb = read_in_kb
self._write_in_kb = write_in_kb
self._timestamp = timestamp
def read_in_kb(self):
return self._read_in_kb
def write_in_kb(self):
return self._write_in_kb
def timestamp(self):
return self._timestamp
def main(iterator):
timestamps = []
disk_entries = OrderedDict()
for disk_line in iterator:
if disk_line[0] == '#':
continue
disk_entry_data = disk_line.split()
timestamp = datetime.datetime.strptime(disk_entry_data[1], "%H:%M:%S.%f")
total_read_in_kb = 0
total_write_in_kb = 0
for disk_no in range((len(disk_entry_data) - 2) // 14):
total_read_in_kb += int(disk_entry_data[disk_no * 14 + 5])
total_write_in_kb += int(disk_entry_data[disk_no * 14 + 9])
disk_entries[timestamp] = DiskEntry(total_read_in_kb, total_write_in_kb, timestamp)
return disk_entries
| true
| true
|
f70782141243795c016e0d2561dc97be4a8bf135
| 12,442
|
py
|
Python
|
examples/speech_to_text/prep_covost_data.py
|
adrienxu/SATE
|
a932859287b2d3a944f7b0ae6670c84c98db7965
|
[
"MIT"
] | 11
|
2021-07-01T19:52:36.000Z
|
2022-03-09T09:33:47.000Z
|
examples/speech_to_text/prep_covost_data.py
|
adrienxu/SATE
|
a932859287b2d3a944f7b0ae6670c84c98db7965
|
[
"MIT"
] | null | null | null |
examples/speech_to_text/prep_covost_data.py
|
adrienxu/SATE
|
a932859287b2d3a944f7b0ae6670c84c98db7965
|
[
"MIT"
] | 1
|
2021-09-11T08:02:21.000Z
|
2021-09-11T08:02:21.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import string
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
"""Create a Dataset for CoVoST (https://github.com/facebookresearch/covost).
Args:
root (str): root path to the dataset and generated manifests/features
source_language (str): source (audio) language
target_language (str, optional): target (text) language,
None for no translation (default: None)
version (int, optional): CoVoST version. (default: 2)
download (bool, optional): Whether to download the dataset if it is not
found at root path. (default: ``False``).
"""
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
# SPLITS = ["train", "dev", "test"]
SPLITS = ["train"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
# Hack here so that we can get "split" column from CoVoST TSV.
# Note that we use CoVoST train split for ASR which is an extension
# to Common Voice train split.
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
cv_tsv = load_df_from_tsv(cv_tsv_path)
if self.no_translation:
print("No target translation.")
df = cv_tsv[["path", "sentence", "client_id"]]
df = df.set_index(["path"], drop=False)
else:
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "wav" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Path, int, int, str, str, str, str]:
"""Load the n-th sample from the dataset.
Args:
n (int): The index of the sample to be loaded
Returns:
tuple: ``(wav_path, sample_rate, n_frames, sentence, translation, speaker_id,
sample_id)``
"""
data = self.data[n]
path = self.root / "wav" / data["path"]
info = torchaudio.info(path)
sample_rate = info.sample_rate
n_frames = info.num_frames
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return path, sample_rate, n_frames, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
output_root = Path(args.output_root).absolute()
if args.tgt_lang is not None:
output_root = output_root / f"{args.src_lang}-{args.tgt_lang}"
else:
output_root = output_root / f"{args.src_lang}"
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
zip_path = output_root / "fbank80.zip"
if not zip_path.exists():
# Extract features
feature_root = output_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for wav_path, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
waveform, sample_rate = torchaudio.load(wav_path)
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
# Pack features into ZIP
print("ZIPing features...")
create_zip(feature_root, zip_path)
# # Clean up
# shutil.rmtree(feature_root)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
# Generate TSV manifest
print("Generating manifest...")
train_text = []
task = args.task
# if args.tgt_lang is not None:
# task = f"st_{args.src_lang}_{args.tgt_lang}"
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
if args.task == "st" and args.add_src:
manifest["src_text"] = []
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, sr, n_frames, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(n_frames / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
if args.lowercase_src:
src_utt = src_utt.lower()
if args.rm_punc_src:
for w in string.punctuation:
src_utt = src_utt.replace(w, "")
src_utt = src_utt.replace(" ", "")
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
if args.task == "st" and args.add_src:
manifest["src_text"].append(src_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
if args.task == "st" and args.add_src and args.share:
train_text.extend(manifest["src_text"])
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, output_root / f"{split}_{task}.tsv")
# Generate vocab
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{task}"
asr_spm_filename = None
gen_vocab_flag = True
if args.task == "st" and args.add_src:
if args.share:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
else:
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}_share"
asr_spm_filename = spm_filename_prefix + ".model"
else:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
assert args.asr_prefix is not None
asr_spm_filename = args.asr_prefix + ".model"
elif args.task == "asr":
if args.asr_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.asr_prefix
if gen_vocab_flag:
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
output_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
# Generate config YAML
gen_config_yaml(
output_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
asr_spm_filename=asr_spm_filename,
share_src_and_tgt=True if args.task == "asr" else False
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root", "-d", required=True, type=str,
help="data root with sub-folders for each language <root>/<src_lang>"
)
parser.add_argument(
"--output-root", "-o", required=True, type=str,
help="output root to save the results"
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--task", type=str, default="asr", choices=["asr", "st"])
parser.add_argument("--tgt-lang", "-t", type=str)
parser.add_argument("--share", action="store_true",
help="share the tokenizer and dictionary of the transcription and translation")
parser.add_argument("--add-src", action="store_true", help="add the src text for st task")
parser.add_argument("--asr-prefix", type=str, help="prefix of the asr dict")
parser.add_argument("--st-spm-prefix", type=str, default=None, help="prefix of the existing st dict")
parser.add_argument("--lowercase-src", action="store_true", help="lowercase the source text")
parser.add_argument("--rm-punc-src", action="store_true", help="remove the punctuation of the source text")
parser.add_argument("--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| 34.949438
| 111
| 0.571371
|
import argparse
import logging
from pathlib import Path
import shutil
from tempfile import NamedTemporaryFile
from typing import Optional, Tuple
import string
import pandas as pd
import torchaudio
from examples.speech_to_text.data_utils import (
create_zip,
extract_fbank_features,
filter_manifest_df,
gen_config_yaml,
gen_vocab,
get_zip_manifest,
load_df_from_tsv,
save_df_to_tsv,
)
from torch import Tensor
from torch.utils.data import Dataset
from torchaudio.datasets.utils import download_url, extract_archive
from tqdm import tqdm
log = logging.getLogger(__name__)
MANIFEST_COLUMNS = ["id", "audio", "n_frames", "tgt_text", "speaker"]
class CoVoST(Dataset):
COVOST_URL_TEMPLATE = (
"https://dl.fbaipublicfiles.com/covost/"
"covost_v2.{src_lang}_{tgt_lang}.tsv.tar.gz"
)
VERSIONS = {2}
SPLITS = ["train"]
XX_EN_LANGUAGES = {
1: ["fr", "de", "nl", "ru", "es", "it", "tr", "fa", "sv-SE", "mn", "zh-CN"],
2: [
"fr",
"de",
"es",
"ca",
"it",
"ru",
"zh-CN",
"pt",
"fa",
"et",
"mn",
"nl",
"tr",
"ar",
"sv-SE",
"lv",
"sl",
"ta",
"ja",
"id",
"cy",
],
}
EN_XX_LANGUAGES = {
1: [],
2: [
"de",
"tr",
"fa",
"sv-SE",
"mn",
"zh-CN",
"cy",
"ca",
"sl",
"et",
"id",
"ar",
"ta",
"lv",
"ja",
],
}
def __init__(
self,
root: str,
split: str,
source_language: str,
target_language: Optional[str] = None,
version: int = 2,
) -> None:
assert version in self.VERSIONS and split in self.SPLITS
assert source_language is not None
self.no_translation = target_language is None
if not self.no_translation:
assert "en" in {source_language, target_language}
if source_language == "en":
assert target_language in self.EN_XX_LANGUAGES[version]
else:
assert source_language in self.XX_EN_LANGUAGES[version]
else:
target_language = "de" if source_language == "en" else "en"
self.root: Path = Path(root)
cv_tsv_path = self.root / "validated.tsv"
assert cv_tsv_path.is_file()
cv_tsv = load_df_from_tsv(cv_tsv_path)
if self.no_translation:
print("No target translation.")
df = cv_tsv[["path", "sentence", "client_id"]]
df = df.set_index(["path"], drop=False)
else:
covost_url = self.COVOST_URL_TEMPLATE.format(
src_lang=source_language, tgt_lang=target_language
)
covost_archive = self.root / Path(covost_url).name
if not covost_archive.is_file():
download_url(covost_url, self.root.as_posix(), hash_value=None)
extract_archive(covost_archive.as_posix())
covost_tsv = load_df_from_tsv(
self.root / Path(covost_url).name.replace(".tar.gz", "")
)
df = pd.merge(
left=cv_tsv[["path", "sentence", "client_id"]],
right=covost_tsv[["path", "translation", "split"]],
how="inner",
on="path",
)
if split == "train":
df = df[(df["split"] == split) | (df["split"] == f"{split}_covost")]
else:
df = df[df["split"] == split]
data = df.to_dict(orient="index").items()
data = [v for k, v in sorted(data, key=lambda x: x[0])]
self.data = []
for e in data:
try:
path = self.root / "wav" / e["path"]
_ = torchaudio.info(path.as_posix())
self.data.append(e)
except RuntimeError:
pass
def __getitem__(
self, n: int
) -> Tuple[Path, int, int, str, str, str, str]:
data = self.data[n]
path = self.root / "wav" / data["path"]
info = torchaudio.info(path)
sample_rate = info.sample_rate
n_frames = info.num_frames
sentence = data["sentence"]
translation = None if self.no_translation else data["translation"]
speaker_id = data["client_id"]
_id = data["path"].replace(".mp3", "")
return path, sample_rate, n_frames, sentence, translation, speaker_id, _id
def __len__(self) -> int:
return len(self.data)
def process(args):
root = Path(args.data_root).absolute() / args.src_lang
output_root = Path(args.output_root).absolute()
if args.tgt_lang is not None:
output_root = output_root / f"{args.src_lang}-{args.tgt_lang}"
else:
output_root = output_root / f"{args.src_lang}"
if not root.is_dir():
raise NotADirectoryError(f"{root} does not exist")
zip_path = output_root / "fbank80.zip"
if not zip_path.exists():
feature_root = output_root / "fbank80"
feature_root.mkdir(exist_ok=True)
for split in CoVoST.SPLITS:
print(f"Fetching split {split}...")
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
print("Extracting log mel filter bank features...")
for wav_path, sample_rate, _, _, _, _, utt_id in tqdm(dataset):
waveform, sample_rate = torchaudio.load(wav_path)
extract_fbank_features(
waveform, sample_rate, feature_root / f"{utt_id}.npy"
)
print("ZIPing features...")
create_zip(feature_root, zip_path)
print("Fetching ZIP manifest...")
zip_manifest = get_zip_manifest(zip_path)
print("Generating manifest...")
train_text = []
task = args.task
for split in CoVoST.SPLITS:
manifest = {c: [] for c in MANIFEST_COLUMNS}
if args.task == "st" and args.add_src:
manifest["src_text"] = []
dataset = CoVoST(root, split, args.src_lang, args.tgt_lang)
for _, sr, n_frames, src_utt, tgt_utt, speaker_id, utt_id in tqdm(dataset):
manifest["id"].append(utt_id)
manifest["audio"].append(zip_manifest[utt_id])
duration_ms = int(n_frames / sr * 1000)
manifest["n_frames"].append(int(1 + (duration_ms - 25) / 10))
if args.lowercase_src:
src_utt = src_utt.lower()
if args.rm_punc_src:
for w in string.punctuation:
src_utt = src_utt.replace(w, "")
src_utt = src_utt.replace(" ", "")
manifest["tgt_text"].append(src_utt if args.tgt_lang is None else tgt_utt)
if args.task == "st" and args.add_src:
manifest["src_text"].append(src_utt)
manifest["speaker"].append(speaker_id)
is_train_split = split.startswith("train")
if is_train_split:
if args.task == "st" and args.add_src and args.share:
train_text.extend(manifest["src_text"])
train_text.extend(manifest["tgt_text"])
df = pd.DataFrame.from_dict(manifest)
df = filter_manifest_df(df, is_train_split=is_train_split)
save_df_to_tsv(df, output_root / f"{split}_{task}.tsv")
v_size_str = "" if args.vocab_type == "char" else str(args.vocab_size)
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{task}"
asr_spm_filename = None
gen_vocab_flag = True
if args.task == "st" and args.add_src:
if args.share:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
else:
spm_filename_prefix = f"spm_{args.vocab_type}{v_size_str}_{args.task}_share"
asr_spm_filename = spm_filename_prefix + ".model"
else:
if args.st_spm_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.st_spm_prefix
assert args.asr_prefix is not None
asr_spm_filename = args.asr_prefix + ".model"
elif args.task == "asr":
if args.asr_prefix is not None:
gen_vocab_flag = False
spm_filename_prefix = args.asr_prefix
if gen_vocab_flag:
with NamedTemporaryFile(mode="w") as f:
for t in train_text:
f.write(t + "\n")
gen_vocab(
Path(f.name),
output_root / spm_filename_prefix,
args.vocab_type,
args.vocab_size
)
gen_config_yaml(
output_root,
spm_filename_prefix + ".model",
yaml_filename=f"config_{task}.yaml",
specaugment_policy="lb",
cmvn_type=args.cmvn_type,
asr_spm_filename=asr_spm_filename,
share_src_and_tgt=True if args.task == "asr" else False
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data-root", "-d", required=True, type=str,
help="data root with sub-folders for each language <root>/<src_lang>"
)
parser.add_argument(
"--output-root", "-o", required=True, type=str,
help="output root to save the results"
)
parser.add_argument(
"--vocab-type",
default="unigram",
required=True,
type=str,
choices=["bpe", "unigram", "char"],
),
parser.add_argument("--vocab-size", default=1000, type=int)
parser.add_argument("--src-lang", "-s", required=True, type=str)
parser.add_argument("--task", type=str, default="asr", choices=["asr", "st"])
parser.add_argument("--tgt-lang", "-t", type=str)
parser.add_argument("--share", action="store_true",
help="share the tokenizer and dictionary of the transcription and translation")
parser.add_argument("--add-src", action="store_true", help="add the src text for st task")
parser.add_argument("--asr-prefix", type=str, help="prefix of the asr dict")
parser.add_argument("--st-spm-prefix", type=str, default=None, help="prefix of the existing st dict")
parser.add_argument("--lowercase-src", action="store_true", help="lowercase the source text")
parser.add_argument("--rm-punc-src", action="store_true", help="remove the punctuation of the source text")
parser.add_argument("--cmvn-type", default="utterance",
choices=["global", "utterance"],
help="The type of cepstral mean and variance normalization")
args = parser.parse_args()
process(args)
if __name__ == "__main__":
main()
| true
| true
|
f70782746902e562e89870e2bce98b502c29402b
| 2,205
|
py
|
Python
|
test/posix/integration/udp/test.py
|
AndreasAakesson/IncludeOS
|
891b960a0a7473c08cd0d93a2bba7569c6d88b48
|
[
"Apache-2.0"
] | null | null | null |
test/posix/integration/udp/test.py
|
AndreasAakesson/IncludeOS
|
891b960a0a7473c08cd0d93a2bba7569c6d88b48
|
[
"Apache-2.0"
] | 1
|
2016-04-03T16:24:09.000Z
|
2016-04-03T16:24:09.000Z
|
test/posix/integration/udp/test.py
|
AndreasAakesson/IncludeOS
|
891b960a0a7473c08cd0d93a2bba7569c6d88b48
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import sys
import os
import subprocess
import atexit
from vmrunner import vmrunner
vm = vmrunner.vms[0]
import socket
# Set up a temporary interface
import platform
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "alias", "10.0.0.3/24"])
else:
subprocess.call(["sudo", "ip", "addr", "add", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
# Tear down interface on exit
@atexit.register
def tear_down():
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "-alias", "10.0.0.3"])
else:
subprocess.call(["sudo", "ip", "addr", "del", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
S_HOST, S_PORT = '10.0.0.3', 4242
S_MESSAGE = "Only hipsters uses POSIX"
server = socket.socket
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind((S_HOST, S_PORT))
HOST, PORT = '10.0.0.58', 1042
RECEIVED = ''
def UDP_send(trigger_line):
MESSAGE = str.encode("POSIX is for hipsters")
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((S_HOST, S_PORT + 1))
sock.sendto(MESSAGE, (HOST, PORT))
def UDP_recv():
RECEIVED = server.recv(1024)
def verify_recv(trigger_line):
ok = RECEIVED == S_MESSAGE
RECEIVED = ''
return ok
def UDP_send_much(trigger_line):
MESSAGE = "Message #"
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((HOST, PORT))
for i in range(0, 5):
msg = str.encode(MESSAGE + repr(i))
sock.send(msg)
print("Sending {}".format(msg))
import _thread
_thread.start_new_thread(UDP_recv, ())
# Add custom event-handler
vm.on_output("recvfrom()", UDP_send)
vm.on_output("sendto() called", verify_recv)
vm.on_output("sendto() called", verify_recv)
vm.on_output("reading from buffer", UDP_send_much)
# Boot the VM, taking a timeout as parameter
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
vm.cmake().boot(10,image_name='posix_udp').clean()
| 25.344828
| 111
| 0.693424
|
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import range
import sys
import os
import subprocess
import atexit
from vmrunner import vmrunner
vm = vmrunner.vms[0]
import socket
import platform
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "alias", "10.0.0.3/24"])
else:
subprocess.call(["sudo", "ip", "addr", "add", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
@atexit.register
def tear_down():
if platform.system() == 'Darwin':
subprocess.call(["sudo", "ifconfig", "bridge43", "-alias", "10.0.0.3"])
else:
subprocess.call(["sudo", "ip", "addr", "del", "10.0.0.3/24", "dev", "bridge43", "label", "bridge43:1"])
S_HOST, S_PORT = '10.0.0.3', 4242
S_MESSAGE = "Only hipsters uses POSIX"
server = socket.socket
server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server.bind((S_HOST, S_PORT))
HOST, PORT = '10.0.0.58', 1042
RECEIVED = ''
def UDP_send(trigger_line):
MESSAGE = str.encode("POSIX is for hipsters")
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((S_HOST, S_PORT + 1))
sock.sendto(MESSAGE, (HOST, PORT))
def UDP_recv():
RECEIVED = server.recv(1024)
def verify_recv(trigger_line):
ok = RECEIVED == S_MESSAGE
RECEIVED = ''
return ok
def UDP_send_much(trigger_line):
MESSAGE = "Message #"
sock = socket.socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((HOST, PORT))
for i in range(0, 5):
msg = str.encode(MESSAGE + repr(i))
sock.send(msg)
print("Sending {}".format(msg))
import _thread
_thread.start_new_thread(UDP_recv, ())
vm.on_output("recvfrom()", UDP_send)
vm.on_output("sendto() called", verify_recv)
vm.on_output("sendto() called", verify_recv)
vm.on_output("reading from buffer", UDP_send_much)
if len(sys.argv) > 1:
vm.boot(image_name=str(sys.argv[1]))
else:
vm.cmake().boot(10,image_name='posix_udp').clean()
| true
| true
|
f7078323428be9f40d3247f9d5889a1af4f5f73c
| 2,771
|
py
|
Python
|
python3/koans/about_string_manipulation.py
|
leahein/python_koans
|
382d6b10397df22202af2402c93979d588c0093c
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
leahein/python_koans
|
382d6b10397df22202af2402c93979d588c0093c
|
[
"MIT"
] | null | null | null |
python3/koans/about_string_manipulation.py
|
leahein/python_koans
|
382d6b10397df22202af2402c93979d588c0093c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual('a', string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the", "rain", "in", "spain"], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(r'\n', string)
self.assertEqual(2, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
| 36.946667
| 81
| 0.648863
|
from runner.koan import *
class AboutStringManipulation(Koan):
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual("The values are one and 2", string)
def test_formatted_values_can_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual("The values are DOH, doh, doh and DOH!", string)
def test_any_python_expression_may_be_interpolated(self):
import math
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5),
decimal_places)
self.assertEqual("The square root of 5 is 2.2361", string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual("let", string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual('a', string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(97, ord('a'))
self.assertEqual(True, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual(["Sausage", "Egg", "Cheese"], words)
def test_strings_can_be_split_with_different_patterns(self):
import re
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual(["the", "rain", "in", "spain"], words)
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(r'\n', string)
self.assertEqual(2, len(string))
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual("Now is the time", ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual('Guido', 'guido'.capitalize())
self.assertEqual('GUIDO', 'guido'.upper())
self.assertEqual('timbot', 'TimBot'.lower())
self.assertEqual('Guido Van Rossum', 'guido van rossum'.title())
self.assertEqual('tOtAlLy AwEsOmE', 'ToTaLlY aWeSoMe'.swapcase())
| true
| true
|
f70783d2ccd40fcc6acaed1d94cedfa8b5d90036
| 643
|
py
|
Python
|
frappe/patches/v5_3/rename_chinese_languages.py
|
ashokrajbathu/secondrep
|
6e6a469a0956db01b5640c8bb16c5752556a219e
|
[
"MIT"
] | null | null | null |
frappe/patches/v5_3/rename_chinese_languages.py
|
ashokrajbathu/secondrep
|
6e6a469a0956db01b5640c8bb16c5752556a219e
|
[
"MIT"
] | null | null | null |
frappe/patches/v5_3/rename_chinese_languages.py
|
ashokrajbathu/secondrep
|
6e6a469a0956db01b5640c8bb16c5752556a219e
|
[
"MIT"
] | 2
|
2016-02-09T20:17:59.000Z
|
2018-03-21T16:48:48.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import frappe
def execute():
language_map = {
"中国(简体)": "簡體中文",
"中國(繁體)": "正體中文"
}
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings in language_map:
new_language_name = language_map[language_in_system_settings]
frappe.db.set_value("System Settings", "System Settings", "language", new_language_name)
for old_name, new_name in language_map.items():
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
| 33.842105
| 93
| 0.73717
|
from __future__ import unicode_literals
import frappe
def execute():
language_map = {
"中国(简体)": "簡體中文",
"中國(繁體)": "正體中文"
}
language_in_system_settings = frappe.db.get_single_value("System Settings", "language")
if language_in_system_settings in language_map:
new_language_name = language_map[language_in_system_settings]
frappe.db.set_value("System Settings", "System Settings", "language", new_language_name)
for old_name, new_name in language_map.items():
frappe.db.sql("""update `tabUser` set language=%(new_name)s where language=%(old_name)s""",
{ "old_name": old_name, "new_name": new_name })
| true
| true
|
f70783e180b4ca13da6bef7c7d91799a6f5e0db2
| 717
|
py
|
Python
|
src/main/scripts/modules/json.py
|
Kynarth/ryrycipe
|
e689ba5859a3641c7ff9ea0b868280bfeaf34ec9
|
[
"MIT"
] | null | null | null |
src/main/scripts/modules/json.py
|
Kynarth/ryrycipe
|
e689ba5859a3641c7ff9ea0b868280bfeaf34ec9
|
[
"MIT"
] | null | null | null |
src/main/scripts/modules/json.py
|
Kynarth/ryrycipe
|
e689ba5859a3641c7ff9ea0b868280bfeaf34ec9
|
[
"MIT"
] | null | null | null |
class JsonRes:
craftplan = 'json/craftplan.json'
category_to_icon_en = 'json/en/category_to_icon_en.json'
words_en = 'json/en/words_en.json'
items_en = 'json/en/items_en.json'
material_to_icon_en = 'json/en/material_to_icon_en.json'
plans_en = 'json/en/plans_en.json'
materials_en = 'json/en/materials_en.json'
items_fr = 'json/fr/items_fr.json'
materials_fr = 'json/fr/materials_fr.json'
words_fr = 'json/fr/words_fr.json'
plans_fr = 'json/fr/plans_fr.json'
category_to_icon_fr = 'json/fr/category_to_icon_fr.json'
material_to_icon_fr = 'json/fr/material_to_icon_fr.json'
@classmethod
def get(cls, string):
return cls.__getattribute__(cls, string)
| 39.833333
| 60
| 0.716876
|
class JsonRes:
craftplan = 'json/craftplan.json'
category_to_icon_en = 'json/en/category_to_icon_en.json'
words_en = 'json/en/words_en.json'
items_en = 'json/en/items_en.json'
material_to_icon_en = 'json/en/material_to_icon_en.json'
plans_en = 'json/en/plans_en.json'
materials_en = 'json/en/materials_en.json'
items_fr = 'json/fr/items_fr.json'
materials_fr = 'json/fr/materials_fr.json'
words_fr = 'json/fr/words_fr.json'
plans_fr = 'json/fr/plans_fr.json'
category_to_icon_fr = 'json/fr/category_to_icon_fr.json'
material_to_icon_fr = 'json/fr/material_to_icon_fr.json'
@classmethod
def get(cls, string):
return cls.__getattribute__(cls, string)
| true
| true
|
f70785ab9a87e136a0182edf5fd0d30f99328817
| 468
|
py
|
Python
|
hc/wsgi.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | null | null | null |
hc/wsgi.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | null | null | null |
hc/wsgi.py
|
IfBkg/healthchecks
|
dcd8a74c6b0bcdb0065e7c27d5b6639823400562
|
[
"BSD-3-Clause"
] | null | null | null |
"""
WSGI config for hc project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hc.settings")
application = StaticFilesHandler(get_wsgi_application())
| 26
| 78
| 0.801282
|
import os
from django.core.wsgi import get_wsgi_application
from django.contrib.staticfiles.handlers import StaticFilesHandler
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hc.settings")
application = StaticFilesHandler(get_wsgi_application())
| true
| true
|
f70785e5a2ed47f8a01bd91f0ef1227f58618b06
| 24,923
|
py
|
Python
|
keepercommander/importer/lastpass/lastpass.py
|
dstromberg/Commander
|
db521cbd1ef0367e95e120011d12bfe9ad6034f8
|
[
"MIT"
] | null | null | null |
keepercommander/importer/lastpass/lastpass.py
|
dstromberg/Commander
|
db521cbd1ef0367e95e120011d12bfe9ad6034f8
|
[
"MIT"
] | null | null | null |
keepercommander/importer/lastpass/lastpass.py
|
dstromberg/Commander
|
db521cbd1ef0367e95e120011d12bfe9ad6034f8
|
[
"MIT"
] | null | null | null |
# _ __
# | |/ /___ ___ _ __ ___ _ _ ®
# | ' </ -_) -_) '_ \/ -_) '_|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2021 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import calendar
import datetime
import getpass
import json
import logging
from typing import Optional, List
from ..importer import BaseImporter, Record, Folder, RecordField, RecordReferences, SharedFolder, Permission
from .account import Account
from .exceptions import LastPassUnknownError
from .vault import Vault
class LastPassImporter(BaseImporter):
def __init__(self):
self.addresses = [] # type: List[LastPassAddress]
self.months = {}
_months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
for i in range(len(_months)):
if _months[i]:
month = _months[i].casefold()
if month not in self.months:
self.months[month] = i
for i in range(len(calendar.month_name)):
if calendar.month_name[i]:
month = calendar.month_name[i].casefold()
if month not in self.months:
self.months[month] = i
def card_expiration(self, from_lastpass): # type: (str) -> str
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 2 and all(comp):
try:
year = int(comp[1])
if year < 200:
year += 2000
comp[1] = str(year)
except ValueError:
pass
if comp[0] in self.months:
return f'{self.months[comp[0]]:0>2}/{comp[1]}'
return from_lastpass
def lastpass_date(self, from_lastpass): # type: (str) -> int
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 3 and all(comp):
try:
month = self.months[comp[0]]
day = int(comp[1])
year = int(comp[2])
dt = datetime.date(year, month, day)
return int(datetime.datetime.fromordinal(dt.toordinal()).timestamp() * 1000)
except:
pass
return -1
def find_address(self, address): # type: (LastPassAddress) -> Optional[int]
for i in range(len(self.addresses)):
if self.addresses[i] == address:
return i + 1
def append_address(self, address): # type: (LastPassAddress) -> Optional[int]
if isinstance(address, LastPassAddress):
self.addresses.append(address)
return len(self.addresses)
def parse_typed_notes(self, notes): # type: (str) -> dict
lines = notes.split('\n')
fields = {}
key = ''
value = ''
for line in lines:
k, s, v = line.partition(':')
if s == ':':
if key:
if key == 'Notes':
value += line
elif key == 'Private Key':
if k == 'Public Key':
fields[key] = value
key = k
value = v
else:
value += '\n' + line
else:
fields[key] = value
key = k
value = v
else:
key = k
value = v
else:
if key:
value += '\n' + line
if key:
fields[key] = value
return fields
def do_import(self, name):
username = name
password = getpass.getpass(prompt='...' + 'LastPass Password'.rjust(30) + ': ', stream=None)
print('Press <Enter> if account is not protected with Multifactor Authentication')
twofa_code = getpass.getpass(prompt='...' + 'Multifactor Password'.rjust(30) + ': ', stream=None)
if not twofa_code:
twofa_code = None
try:
vault = Vault.open_remote(username, password, multifactor_password=twofa_code)
except LastPassUnknownError as lpe:
logging.warning(lpe)
return
else:
if len(vault.errors) > 0:
err_list = '\n'.join(vault.errors)
logging.warning(f'The following errors occurred retrieving Lastpass shared folder members:\n{err_list}')
for shared_folder in vault.shared_folders:
folder = SharedFolder()
folder.path = shared_folder.name
folder.permissions = []
for member in shared_folder.members:
perm = Permission()
perm.name = member['username']
perm.manage_records = member['readonly'] == '0'
perm.manage_users = member['can_administer'] == '1'
folder.permissions.append(perm)
for team in shared_folder.teams:
perm = Permission()
perm.name = team['name']
perm.manage_records = team['readonly'] == '0'
perm.manage_users = team['can_administer'] == '1'
folder.permissions.append(perm)
yield folder
for account in vault.accounts: # type: Account
record = Record()
if account.name:
record.title = account.name.decode('utf-8')
if account.username:
record.login = account.username.decode('utf-8')
if account.password:
record.password = account.password.decode('utf-8')
if account.url:
record.login_url = account.url.decode('utf-8')
if record.login_url == 'http://sn':
record.login_url = None
elif record.login_url == 'http://group':
continue
if account.notes:
notes = account.notes.decode('utf-8')
if notes.startswith('NoteType:'):
typed_values = self.parse_typed_notes(notes)
if 'NoteType' in typed_values:
note_type = typed_values.pop('NoteType', '')
notes = typed_values.pop('Notes', '')
typed_values.pop('Language', None)
if note_type == 'Bank Account':
self.populate_bank_account(record, typed_values)
elif note_type == 'Credit Card':
self.populate_credit_card(record, typed_values)
elif note_type == 'Address':
address = LastPassAddress.from_lastpass(typed_values)
if address:
addr_ref = self.append_address(address)
if addr_ref:
record.uid = addr_ref
self.populate_address_only(record, address)
self.populate_address(record, typed_values)
elif note_type == 'Driver\'s License':
address_record = self.populate_driver_license(record, typed_values)
if address_record is not None:
yield address_record
elif note_type == 'Passport':
self.populate_passport(record, typed_values)
elif note_type == 'Social Security':
self.populate_ssn_card(record, typed_values)
elif note_type == 'Health Insurance' or note_type == 'Insurance':
self.populate_health_insurance(record, typed_values)
elif note_type == 'Membership':
self.populate_membership(record, typed_values)
elif note_type == 'Email Account' or note_type == 'Instant Messenger':
record.type = 'login'
elif note_type == 'Database':
self.populate_database(record, typed_values)
elif note_type == 'Server':
self.populate_server(record, typed_values)
elif note_type == 'SSH Key':
self.populate_ssh_key(record, typed_values)
elif note_type == 'Software License':
self.populate_software_license(record, typed_values)
username = typed_values.pop('Username', '')
if username:
if record.login:
if record.login != username:
cf = RecordField(label='Username', value=username)
if record.type:
cf.type = 'login'
record.fields.append(cf)
else:
record.login = username
password = typed_values.pop('Password', '')
if password:
if record.password:
if record.password != password:
cf = RecordField(label='Password', value=password)
if record.type:
cf.type = 'password'
record.fields.append(cf)
else:
record.password = password
url = typed_values.pop('URL', '')
if url:
if record.login_url:
if record.login_url != url:
cf = RecordField(label='URL', value=url)
if record.type:
cf.type = 'url'
record.fields.append(cf)
else:
record.login_url = url
for key in typed_values:
value = typed_values[key]
if value:
if record.type:
cf = RecordField(type='text', label=key, value=str(value))
else:
cf = RecordField(label=key, value=str(value))
record.fields.append(cf)
record.notes = notes
if account.group or account.shared_folder:
fol = Folder()
if account.shared_folder:
fol.domain = account.shared_folder.name
if account.group:
fol.path = account.group.decode('utf-8')
record.folders = [fol]
yield record
def populate_address(self, record, notes): # type: (Record, dict) -> None
person = LastPassPersonName()
person.first = notes.pop('First Name', '')
person.middle = notes.pop('Middle Name', '')
person.last = notes.pop('Last Name', '')
if person.first or person.last:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Birthday', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
email = notes.pop('Email Address', None)
if email:
dtf = RecordField(type='email', value=email)
record.fields.append(dtf)
for phone_type in ['Phone', 'Evening Phone', 'Mobile Phone', 'Fax']:
phone = notes.pop(phone_type, '')
if phone:
try:
phone_dict = json.loads(phone)
if isinstance(phone_dict, dict):
if 'num' in phone_dict:
phone_number = phone_dict['num']
phone_ext = phone_dict.get('ext') or ''
phone_country_code = phone_dict.get('cc3l') or ''
phf = RecordField(type='phone', label=phone_type)
phf.value = {
# 'region': phone_country_code,
'number': phone_number,
'ext': phone_ext,
'type': ('Mobile' if phone_type.startswith('Mobile') else
'Home' if phone_type.startswith('Evening') else
'Work')
}
record.fields.append(phf)
except:
pass
def populate_address_only(self, record, lastpass_address): # type: (Record, LastPassAddress) -> None
if lastpass_address:
record.type = 'address'
address = RecordField(type='address')
address.value = {
'street1': lastpass_address.street1 or '',
'street2': lastpass_address.street2 or '',
'city': lastpass_address.city or '',
'state': lastpass_address.state or '',
'zip': lastpass_address.zip or '',
'country': lastpass_address.country or '',
}
record.fields.append(address)
def populate_credit_card(self, record, notes): # type: (Record, dict) -> None
record.type = 'bankCard'
card = RecordField(type='paymentCard')
card.value = {
'cardNumber': notes.pop('Number', ''),
'cardExpirationDate': self.card_expiration(notes.pop('Expiration Date', '')),
'cardSecurityCode': notes.pop('Security Code', '')
}
record.fields.append(card)
card_holder = RecordField(type='text', label='cardholderName', value=notes.pop('Name on Card', ''))
record.fields.append(card_holder)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
def populate_bank_account(self, record, notes): # type: (Record, dict) -> None
record.type = 'bankAccount'
bank = RecordField(type='bankAccount')
bank.value = {
'accountType': notes.pop('Account Type', ''),
'routingNumber': notes.pop('Routing Number', ''),
'accountNumber': notes.pop('Account Number', ''),
}
record.fields.append(bank)
bank_name = notes.pop('Bank Name', '')
if bank_name:
record.title = bank_name
def populate_passport(self, record, notes): # type: (Record, dict) -> None
record.type = 'passport'
number = RecordField(type='accountNumber', label='passportNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Issued Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateIssued', value=dt)
record.fields.append(dtf)
def populate_driver_license(self, record, notes): # type: (Record, dict) -> Optional[Record]
record.type = 'driverLicense'
account_number = RecordField(type='accountNumber', label='dlNumber', value=notes.pop('Number', ''))
record.fields.append(account_number)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
address = LastPassAddress.from_lastpass(notes)
address_record = None
if address:
ref_no = self.find_address(address)
if ref_no:
if record.references is None:
record.references = []
address_ref = next((x for x in record.references if x.type == 'address'), None)
if address_ref is None:
address_ref = RecordReferences(type='address')
record.references.append(address_ref)
address_ref.uids.append(ref_no)
return address_record
def populate_ssn_card(self, record, notes): # type: (Record, dict) -> None
record.type = 'ssnCard'
number = RecordField(type='accountNumber', label='identityNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
def populate_health_insurance(self, record, notes): # type: (Record, dict) -> None
record.type = 'healthInsurance'
number = RecordField(type='accountNumber', value=notes.pop('Policy Number', None))
record.fields.append(number)
def populate_membership(self, record, notes): # type: (Record, dict) -> None
record.type = 'membership'
number = RecordField(type='accountNumber', value=notes.pop('Membership Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Member Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Expiration Date', value=dt)
record.fields.append(dtf)
def populate_database(self, record, notes): # type: (Record, dict) -> None
record.type = 'databaseCredentials'
db_type = RecordField(type='text', label='type', value=notes.pop('Type', None))
record.fields.append(db_type)
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
record.login_url = ''
def populate_server(self, record, notes): # type: (str, Record, dict) -> None
record.type = 'serverCredentials'
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
def populate_ssh_key(self, record, notes): # type: (Record, dict) -> None
record.type = 'sshKeys'
passphrase = notes.pop('Passphrase', None)
if passphrase:
if record.password:
if record.password != passphrase:
passphrase = RecordField(type='password', label='passphrase', value=passphrase)
record.fields.append(passphrase)
else:
record.password = passphrase
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
private_key = notes.pop('Private Key', None)
public_key = notes.pop('Public Key', None)
if private_key or public_key:
value = {
'privateKey': private_key,
'publicKey': public_key
}
pk = RecordField(type='keyPair', value=value)
record.fields.append(pk)
dt = self.lastpass_date(notes.pop('Date', None))
if dt != -1:
dtf = RecordField(type='date', value=dt)
record.fields.append(dtf)
def populate_software_license(self, record, notes): # type: (Record, dict) -> None
record.type = 'softwareLicense'
number = RecordField(type='licenseNumber', value=notes.pop('License Key', None))
record.fields.append(number)
dt = self.lastpass_date(notes.pop('Purchase Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateActive', value=dt)
record.fields.append(dtf)
class LastPassPersonName(object):
def __init__(self):
self.first = ''
self.middle = ''
self.last = ''
@staticmethod
def from_lastpass(name): # type: (str) -> 'Optional[LastPassPersonName]'
if not name:
return None
if not isinstance(name, str):
return None
person = LastPassPersonName()
last, sep, other = name.partition(',')
if sep == ',':
person.last = last.strip()
comps = [x for x in other.strip().split(' ') if x]
else:
comps = [x for x in name.split(' ') if x]
person.last = comps.pop(-1)
if len(comps) > 0:
person.first = comps.pop(0)
if len(comps) > 0:
person.middle = ' '.join(comps)
if not person.first and not person.last:
return None
return person
class LastPassAddress(object):
def __init__(self):
self.street1 = ''
self.street2 = ''
self.city = ''
self.state = ''
self.zip = ''
self.country = ''
@staticmethod
def _compare_case_insensitive(s1, s2): # type: (any, any) -> bool
if isinstance(s1, str) and isinstance(s2, str):
return s1.casefold() == s2.casefold()
if s1 is None and s2 is None:
return True
return False
def __eq__(self, other):
if not isinstance(other, LastPassAddress):
return False
return (self._compare_case_insensitive(self.street1, other.street1) and
self._compare_case_insensitive(self.street2, other.street2) and
self._compare_case_insensitive(self.city, other.city) and
self._compare_case_insensitive(self.state, other.state))
@staticmethod
def from_lastpass(notes): # type: (dict) -> 'Optional[LastPassAddress]'
if not isinstance(notes, dict):
return None
address = LastPassAddress()
if 'Address 1' in notes:
address.street1 = notes.pop('Address 1', '')
address.street2 = notes.pop('Address 2', '')
elif 'Address' in notes:
s1, sep, s2 = notes.pop('Address', '').partition(',')
address.street1 = s1.strip()
if sep == ',':
address.street2 = s2.strip()
else:
return None
address.city = notes.pop('City / Town', '')
address.state = notes.pop('State', '')
address.zip = notes.pop('Zip / Postal Code', '')
address.country = notes.pop('Country', '')
return address
| 41.677258
| 120
| 0.508968
|
# |_|\_\___\___| .__/\___|_|
# |_|
#
# Keeper Commander
# Copyright 2021 Keeper Security Inc.
# Contact: ops@keepersecurity.com
#
import calendar
import datetime
import getpass
import json
import logging
from typing import Optional, List
from ..importer import BaseImporter, Record, Folder, RecordField, RecordReferences, SharedFolder, Permission
from .account import Account
from .exceptions import LastPassUnknownError
from .vault import Vault
class LastPassImporter(BaseImporter):
def __init__(self):
self.addresses = [] # type: List[LastPassAddress]
self.months = {}
_months = ['', 'January', 'February', 'March', 'April', 'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
for i in range(len(_months)):
if _months[i]:
month = _months[i].casefold()
if month not in self.months:
self.months[month] = i
for i in range(len(calendar.month_name)):
if calendar.month_name[i]:
month = calendar.month_name[i].casefold()
if month not in self.months:
self.months[month] = i
def card_expiration(self, from_lastpass): # type: (str) -> str
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 2 and all(comp):
try:
year = int(comp[1])
if year < 200:
year += 2000
comp[1] = str(year)
except ValueError:
pass
if comp[0] in self.months:
return f'{self.months[comp[0]]:0>2}/{comp[1]}'
return from_lastpass
def lastpass_date(self, from_lastpass): # type: (str) -> int
if from_lastpass:
comp = [x.strip().casefold() for x in from_lastpass.split(',')]
if len(comp) == 3 and all(comp):
try:
month = self.months[comp[0]]
day = int(comp[1])
year = int(comp[2])
dt = datetime.date(year, month, day)
return int(datetime.datetime.fromordinal(dt.toordinal()).timestamp() * 1000)
except:
pass
return -1
def find_address(self, address): # type: (LastPassAddress) -> Optional[int]
for i in range(len(self.addresses)):
if self.addresses[i] == address:
return i + 1
def append_address(self, address): # type: (LastPassAddress) -> Optional[int]
if isinstance(address, LastPassAddress):
self.addresses.append(address)
return len(self.addresses)
def parse_typed_notes(self, notes): # type: (str) -> dict
lines = notes.split('\n')
fields = {}
key = ''
value = ''
for line in lines:
k, s, v = line.partition(':')
if s == ':':
if key:
if key == 'Notes':
value += line
elif key == 'Private Key':
if k == 'Public Key':
fields[key] = value
key = k
value = v
else:
value += '\n' + line
else:
fields[key] = value
key = k
value = v
else:
key = k
value = v
else:
if key:
value += '\n' + line
if key:
fields[key] = value
return fields
def do_import(self, name):
username = name
password = getpass.getpass(prompt='...' + 'LastPass Password'.rjust(30) + ': ', stream=None)
print('Press <Enter> if account is not protected with Multifactor Authentication')
twofa_code = getpass.getpass(prompt='...' + 'Multifactor Password'.rjust(30) + ': ', stream=None)
if not twofa_code:
twofa_code = None
try:
vault = Vault.open_remote(username, password, multifactor_password=twofa_code)
except LastPassUnknownError as lpe:
logging.warning(lpe)
return
else:
if len(vault.errors) > 0:
err_list = '\n'.join(vault.errors)
logging.warning(f'The following errors occurred retrieving Lastpass shared folder members:\n{err_list}')
for shared_folder in vault.shared_folders:
folder = SharedFolder()
folder.path = shared_folder.name
folder.permissions = []
for member in shared_folder.members:
perm = Permission()
perm.name = member['username']
perm.manage_records = member['readonly'] == '0'
perm.manage_users = member['can_administer'] == '1'
folder.permissions.append(perm)
for team in shared_folder.teams:
perm = Permission()
perm.name = team['name']
perm.manage_records = team['readonly'] == '0'
perm.manage_users = team['can_administer'] == '1'
folder.permissions.append(perm)
yield folder
for account in vault.accounts: # type: Account
record = Record()
if account.name:
record.title = account.name.decode('utf-8')
if account.username:
record.login = account.username.decode('utf-8')
if account.password:
record.password = account.password.decode('utf-8')
if account.url:
record.login_url = account.url.decode('utf-8')
if record.login_url == 'http://sn':
record.login_url = None
elif record.login_url == 'http://group':
continue
if account.notes:
notes = account.notes.decode('utf-8')
if notes.startswith('NoteType:'):
typed_values = self.parse_typed_notes(notes)
if 'NoteType' in typed_values:
note_type = typed_values.pop('NoteType', '')
notes = typed_values.pop('Notes', '')
typed_values.pop('Language', None)
if note_type == 'Bank Account':
self.populate_bank_account(record, typed_values)
elif note_type == 'Credit Card':
self.populate_credit_card(record, typed_values)
elif note_type == 'Address':
address = LastPassAddress.from_lastpass(typed_values)
if address:
addr_ref = self.append_address(address)
if addr_ref:
record.uid = addr_ref
self.populate_address_only(record, address)
self.populate_address(record, typed_values)
elif note_type == 'Driver\'s License':
address_record = self.populate_driver_license(record, typed_values)
if address_record is not None:
yield address_record
elif note_type == 'Passport':
self.populate_passport(record, typed_values)
elif note_type == 'Social Security':
self.populate_ssn_card(record, typed_values)
elif note_type == 'Health Insurance' or note_type == 'Insurance':
self.populate_health_insurance(record, typed_values)
elif note_type == 'Membership':
self.populate_membership(record, typed_values)
elif note_type == 'Email Account' or note_type == 'Instant Messenger':
record.type = 'login'
elif note_type == 'Database':
self.populate_database(record, typed_values)
elif note_type == 'Server':
self.populate_server(record, typed_values)
elif note_type == 'SSH Key':
self.populate_ssh_key(record, typed_values)
elif note_type == 'Software License':
self.populate_software_license(record, typed_values)
username = typed_values.pop('Username', '')
if username:
if record.login:
if record.login != username:
cf = RecordField(label='Username', value=username)
if record.type:
cf.type = 'login'
record.fields.append(cf)
else:
record.login = username
password = typed_values.pop('Password', '')
if password:
if record.password:
if record.password != password:
cf = RecordField(label='Password', value=password)
if record.type:
cf.type = 'password'
record.fields.append(cf)
else:
record.password = password
url = typed_values.pop('URL', '')
if url:
if record.login_url:
if record.login_url != url:
cf = RecordField(label='URL', value=url)
if record.type:
cf.type = 'url'
record.fields.append(cf)
else:
record.login_url = url
for key in typed_values:
value = typed_values[key]
if value:
if record.type:
cf = RecordField(type='text', label=key, value=str(value))
else:
cf = RecordField(label=key, value=str(value))
record.fields.append(cf)
record.notes = notes
if account.group or account.shared_folder:
fol = Folder()
if account.shared_folder:
fol.domain = account.shared_folder.name
if account.group:
fol.path = account.group.decode('utf-8')
record.folders = [fol]
yield record
def populate_address(self, record, notes):
person = LastPassPersonName()
person.first = notes.pop('First Name', '')
person.middle = notes.pop('Middle Name', '')
person.last = notes.pop('Last Name', '')
if person.first or person.last:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Birthday', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
email = notes.pop('Email Address', None)
if email:
dtf = RecordField(type='email', value=email)
record.fields.append(dtf)
for phone_type in ['Phone', 'Evening Phone', 'Mobile Phone', 'Fax']:
phone = notes.pop(phone_type, '')
if phone:
try:
phone_dict = json.loads(phone)
if isinstance(phone_dict, dict):
if 'num' in phone_dict:
phone_number = phone_dict['num']
phone_ext = phone_dict.get('ext') or ''
phone_country_code = phone_dict.get('cc3l') or ''
phf = RecordField(type='phone', label=phone_type)
phf.value = {
'number': phone_number,
'ext': phone_ext,
'type': ('Mobile' if phone_type.startswith('Mobile') else
'Home' if phone_type.startswith('Evening') else
'Work')
}
record.fields.append(phf)
except:
pass
def populate_address_only(self, record, lastpass_address):
if lastpass_address:
record.type = 'address'
address = RecordField(type='address')
address.value = {
'street1': lastpass_address.street1 or '',
'street2': lastpass_address.street2 or '',
'city': lastpass_address.city or '',
'state': lastpass_address.state or '',
'zip': lastpass_address.zip or '',
'country': lastpass_address.country or '',
}
record.fields.append(address)
def populate_credit_card(self, record, notes):
record.type = 'bankCard'
card = RecordField(type='paymentCard')
card.value = {
'cardNumber': notes.pop('Number', ''),
'cardExpirationDate': self.card_expiration(notes.pop('Expiration Date', '')),
'cardSecurityCode': notes.pop('Security Code', '')
}
record.fields.append(card)
card_holder = RecordField(type='text', label='cardholderName', value=notes.pop('Name on Card', ''))
record.fields.append(card_holder)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
def populate_bank_account(self, record, notes):
record.type = 'bankAccount'
bank = RecordField(type='bankAccount')
bank.value = {
'accountType': notes.pop('Account Type', ''),
'routingNumber': notes.pop('Routing Number', ''),
'accountNumber': notes.pop('Account Number', ''),
}
record.fields.append(bank)
bank_name = notes.pop('Bank Name', '')
if bank_name:
record.title = bank_name
def populate_passport(self, record, notes):
record.type = 'passport'
number = RecordField(type='accountNumber', label='passportNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Issued Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateIssued', value=dt)
record.fields.append(dtf)
def populate_driver_license(self, record, notes):
record.type = 'driverLicense'
account_number = RecordField(type='accountNumber', label='dlNumber', value=notes.pop('Number', ''))
record.fields.append(account_number)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='expirationDate', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Date of Birth', None))
if dt != -1:
dtf = RecordField(type='birthDate', value=dt)
record.fields.append(dtf)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
address = LastPassAddress.from_lastpass(notes)
address_record = None
if address:
ref_no = self.find_address(address)
if ref_no:
if record.references is None:
record.references = []
address_ref = next((x for x in record.references if x.type == 'address'), None)
if address_ref is None:
address_ref = RecordReferences(type='address')
record.references.append(address_ref)
address_ref.uids.append(ref_no)
return address_record
def populate_ssn_card(self, record, notes):
record.type = 'ssnCard'
number = RecordField(type='accountNumber', label='identityNumber', value=notes.pop('Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
def populate_health_insurance(self, record, notes):
record.type = 'healthInsurance'
number = RecordField(type='accountNumber', value=notes.pop('Policy Number', None))
record.fields.append(number)
def populate_membership(self, record, notes):
record.type = 'membership'
number = RecordField(type='accountNumber', value=notes.pop('Membership Number', None))
record.fields.append(number)
person = LastPassPersonName.from_lastpass(notes.pop('Member Name', None))
if person:
pf = RecordField(type='name')
pf.value = {
'first': person.first,
'middle': person.middle,
'last': person.last
}
record.fields.append(pf)
dt = self.lastpass_date(notes.pop('Start Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Start Date', value=dt)
record.fields.append(dtf)
dt = self.lastpass_date(notes.pop('Expiration Date', None))
if dt != -1:
dtf = RecordField(type='date', label='Expiration Date', value=dt)
record.fields.append(dtf)
def populate_database(self, record, notes):
record.type = 'databaseCredentials'
db_type = RecordField(type='text', label='type', value=notes.pop('Type', None))
record.fields.append(db_type)
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
record.login_url = ''
def populate_server(self, record, notes):
record.type = 'serverCredentials'
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
def populate_ssh_key(self, record, notes):
record.type = 'sshKeys'
passphrase = notes.pop('Passphrase', None)
if passphrase:
if record.password:
if record.password != passphrase:
passphrase = RecordField(type='password', label='passphrase', value=passphrase)
record.fields.append(passphrase)
else:
record.password = passphrase
host = RecordField(type='host')
host.value = {
'hostName': notes.pop('Hostname', ''),
'port': notes.pop('Port', ''),
}
record.fields.append(host)
private_key = notes.pop('Private Key', None)
public_key = notes.pop('Public Key', None)
if private_key or public_key:
value = {
'privateKey': private_key,
'publicKey': public_key
}
pk = RecordField(type='keyPair', value=value)
record.fields.append(pk)
dt = self.lastpass_date(notes.pop('Date', None))
if dt != -1:
dtf = RecordField(type='date', value=dt)
record.fields.append(dtf)
def populate_software_license(self, record, notes):
record.type = 'softwareLicense'
number = RecordField(type='licenseNumber', value=notes.pop('License Key', None))
record.fields.append(number)
dt = self.lastpass_date(notes.pop('Purchase Date', None))
if dt != -1:
dtf = RecordField(type='date', label='dateActive', value=dt)
record.fields.append(dtf)
class LastPassPersonName(object):
def __init__(self):
self.first = ''
self.middle = ''
self.last = ''
@staticmethod
def from_lastpass(name):
if not name:
return None
if not isinstance(name, str):
return None
person = LastPassPersonName()
last, sep, other = name.partition(',')
if sep == ',':
person.last = last.strip()
comps = [x for x in other.strip().split(' ') if x]
else:
comps = [x for x in name.split(' ') if x]
person.last = comps.pop(-1)
if len(comps) > 0:
person.first = comps.pop(0)
if len(comps) > 0:
person.middle = ' '.join(comps)
if not person.first and not person.last:
return None
return person
class LastPassAddress(object):
def __init__(self):
self.street1 = ''
self.street2 = ''
self.city = ''
self.state = ''
self.zip = ''
self.country = ''
@staticmethod
def _compare_case_insensitive(s1, s2):
if isinstance(s1, str) and isinstance(s2, str):
return s1.casefold() == s2.casefold()
if s1 is None and s2 is None:
return True
return False
def __eq__(self, other):
if not isinstance(other, LastPassAddress):
return False
return (self._compare_case_insensitive(self.street1, other.street1) and
self._compare_case_insensitive(self.street2, other.street2) and
self._compare_case_insensitive(self.city, other.city) and
self._compare_case_insensitive(self.state, other.state))
@staticmethod
def from_lastpass(notes):
if not isinstance(notes, dict):
return None
address = LastPassAddress()
if 'Address 1' in notes:
address.street1 = notes.pop('Address 1', '')
address.street2 = notes.pop('Address 2', '')
elif 'Address' in notes:
s1, sep, s2 = notes.pop('Address', '').partition(',')
address.street1 = s1.strip()
if sep == ',':
address.street2 = s2.strip()
else:
return None
address.city = notes.pop('City / Town', '')
address.state = notes.pop('State', '')
address.zip = notes.pop('Zip / Postal Code', '')
address.country = notes.pop('Country', '')
return address
| true
| true
|
f70786a5c6e06916448c7c65a8d05f9d6a96e774
| 29,777
|
py
|
Python
|
neutron/tests/functional/agent/l3/framework.py
|
swiftchao/neutron
|
f253d676b8f96b52f16382830349da50c3e366bc
|
[
"Apache-2.0"
] | 1
|
2018-10-19T01:48:37.000Z
|
2018-10-19T01:48:37.000Z
|
neutron/tests/functional/agent/l3/framework.py
|
swiftchao/neutron
|
f253d676b8f96b52f16382830349da50c3e366bc
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/functional/agent/l3/framework.py
|
swiftchao/neutron
|
f253d676b8f96b52f16382830349da50c3e366bc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import functools
import textwrap
import mock
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3_router_info
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as n_const
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_config
from neutron.conf import common as common_config
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
_uuid = uuidutils.generate_uuid
OVS_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseSudoTestCase):
INTERFACE_DRIVER = OVS_INTERFACE_DRIVER
NESTED_NAMESPACE_SEPARATOR = '@'
def setUp(self):
super(L3AgentTestFramework, self).setUp()
self.mock_plugin_api = mock.patch(
'neutron.agent.l3.agent.L3PluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.conf = self._configure_agent('agent1')
self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1',
self.conf)
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host, agent_mode='dvr_snat'):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
conf.set_override('interface_driver', self.INTERFACE_DRIVER)
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
conf.set_override('agent_mode', agent_mode)
return conf
def _get_agent_ovs_integration_bridge(self, agent):
return get_ovs_bridge(agent.conf.ovs_integration_bridge)
def generate_router_info(self, enable_ha,
ip_version=constants.IP_VERSION_4,
extra_routes=True,
enable_fip=True, enable_snat=True,
num_internal_ports=1,
dual_stack=False, v6_ext_gw_with_sub=True,
qos_policy_id=None):
if ip_version == constants.IP_VERSION_6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
return l3_test_common.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
num_internal_ports=(
num_internal_ports),
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub),
qos_policy_id=qos_policy_id)
def _test_conntrack_disassociate_fip(self, ha):
'''Test that conntrack immediately drops stateful connection
that uses floating IP once it's disassociated.
'''
router_info = self.generate_router_info(enable_ha=ha)
router = self.manage_router(self.agent, router_info)
port = net_helpers.get_free_namespace_port(
constants.PROTO_NAME_TCP, router.ns_name)
client_address = '19.4.4.3'
server_address = '35.4.0.4'
def clean_fips(router):
router.router[constants.FLOATINGIP_KEY] = []
clean_fips(router)
self._add_fip(router, client_address, fixed_address=server_address)
router.process()
router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
netcat = net_helpers.NetcatTester(
router.ns_name, router.ns_name, client_address, port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
def assert_num_of_conntrack_rules(n):
out = router_ns.netns.execute(["conntrack", "-L",
"--orig-src", client_address])
self.assertEqual(
n, len([line for line in out.strip().split('\n') if line]))
if ha:
common_utils.wait_until_true(lambda: router.ha_state == 'master')
with self.assert_max_execution_time(100):
assert_num_of_conntrack_rules(0)
self.assertTrue(netcat.test_connectivity())
assert_num_of_conntrack_rules(1)
clean_fips(router)
router.process()
assert_num_of_conntrack_rules(0)
with testtools.ExpectedException(RuntimeError):
netcat.test_connectivity()
def _test_update_floatingip_statuses(self, router_info):
router = self.manage_router(self.agent, router_info)
rpc = self.agent.plugin_rpc.update_floatingip_statuses
self.assertTrue(rpc.called)
# Assert that every defined FIP is updated via RPC
expected_fips = set([
(fip['id'], constants.FLOATINGIP_STATUS_ACTIVE) for fip in
router.router[constants.FLOATINGIP_KEY]])
call = [args[0] for args in rpc.call_args_list][0]
actual_fips = set(
[(fip_id, status) for fip_id, status in call[2].items()])
self.assertEqual(expected_fips, actual_fips)
def _gateway_check(self, gateway_ip, external_device):
expected_gateway = gateway_ip
ip_vers = netaddr.IPAddress(expected_gateway).version
existing_gateway = (external_device.route.get_gateway(
ip_version=ip_vers).get('gateway'))
self.assertEqual(expected_gateway, existing_gateway)
def _assert_ha_device(self, router):
def ha_router_dev_name_getter(not_used):
return router.get_ha_device_name()
self.assertTrue(self.device_exists_with_ips_and_mac(
router.router[constants.HA_INTERFACE_KEY],
ha_router_dev_name_getter, router.ns_name))
def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=router.ns_name)
for subnet in external_port['subnets']:
self._gateway_check(subnet['gateway_ip'], external_device)
if not v6_ext_gw_with_sub:
self._gateway_check(self.agent.conf.ipv6_gateway,
external_device)
def _check_external_device(self, router):
external_port = router.get_ex_gw_port()
return (self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
router.ns_name))
def _assert_external_device(self, router):
self.assertTrue(self._check_external_device(router))
def _assert_ipv6_accept_ra(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.accept_ra' % external_device_name])
self.assertEqual(enabled, int(ra_state) != n_const.ACCEPT_RA_DISABLED)
def _assert_ipv6_forwarding(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
fwd_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.forwarding' % external_device_name])
self.assertEqual(int(enabled), int(fwd_state))
def _router_lifecycle(self, enable_ha, ip_version=constants.IP_VERSION_4,
dual_stack=False, v6_ext_gw_with_sub=True,
router_info=None):
router_info = router_info or self.generate_router_info(
enable_ha, ip_version, dual_stack=dual_stack,
v6_ext_gw_with_sub=(v6_ext_gw_with_sub))
return_copy = copy.deepcopy(router_info)
router = self.manage_router(self.agent, router_info)
# Add multiple-IPv6-prefix internal router port
slaac = constants.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
subnet_modes = [slaac_mode] * 2
self._add_internal_interface_by_subnet(router.router,
count=2, ip_version=constants.IP_VERSION_6,
ipv6_subnet_modes=subnet_modes)
router.process()
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ns_name,
interface_name)
common_utils.wait_until_true(lambda: router.ha_state == 'master')
# Keepalived notifies of a state transition when it starts,
# not when it ends. Thus, we have to wait until keepalived finishes
# configuring everything. We verify this by waiting until the last
# device has an IP address.
device = router.router[constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
common_utils.wait_until_true(device_exists)
self.assertTrue(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_external_device(router)
if not (enable_ha and
(ip_version == constants.IP_VERSION_6 or dual_stack)):
# Note(SridharG): enable the assert_gateway for IPv6 once
# keepalived on Ubuntu14.04 (i.e., check-neutron-dsvm-functional
# platform) is updated to 1.2.10 (or above).
# For more details: https://review.openstack.org/#/c/151284/
self._assert_gateway(router, v6_ext_gw_with_sub)
self.assertTrue(self.floating_ips_configured(router))
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_iptables_rules_converged(router)
self._assert_extra_routes(router)
if (ip_version == constants.IP_VERSION_6 or dual_stack):
ip_versions = [constants.IP_VERSION_4, constants.IP_VERSION_6]
else:
ip_versions = [constants.IP_VERSION_4]
self._assert_onlink_subnet_routes(router, ip_versions)
self._assert_metadata_chains(router)
# Verify router gateway interface is configured to receive Router Advts
# when IPv6 is enabled and no IPv6 gateway is configured.
if router.use_ipv6 and not v6_ext_gw_with_sub:
if not self.agent.conf.ipv6_gateway:
self._assert_ipv6_accept_ra(router)
if enable_ha:
self._assert_ha_device(router)
common_utils.wait_until_true(
lambda: router.keepalived_manager.get_process().active,
timeout=15)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
if enable_ha:
common_utils.wait_until_true(
lambda: not router.keepalived_manager.get_process().active,
timeout=15)
return return_copy
def manage_router(self, agent, router):
self.addCleanup(agent._safe_router_removed, router['id'])
# NOTE(mangelajo): Neutron functional for l3 don't rely on openvswitch
# agent tagging ports, and all ports remain untagged
# during test execution.
# Workaround related to lp#1767422 plugs new ports as
# dead vlan (4095) to avoid issues, we need to remove
# such tag during functional l3 testing.
original_plug_new = interface.OVSInterfaceDriver.plug_new
def new_ovs_plug(self, *args, **kwargs):
original_plug_new(self, *args, **kwargs)
bridge = (kwargs.get('bridge') or args[4] or
self.conf.ovs_integration_bridge)
device_name = kwargs.get('device_name') or args[2]
ovsbr = ovs_lib.OVSBridge(bridge)
ovsbr.clear_db_attribute('Port', device_name, 'tag')
with mock.patch(OVS_INTERFACE_DRIVER + '.plug_new', autospec=True) as (
ovs_plug):
ovs_plug.side_effect = new_ovs_plug
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None, fixed_ip_address_scope=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host,
'fixed_ip_address_scope': fixed_ip_address_scope}
router.router[constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=constants.IP_VERSION_4,
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
return ip_lib.network_namespace_exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
ha_device_name = router.get_ha_device_name()
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
ex_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
internal_port = router.router[constants.INTERFACE_KEY][0]
int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
internal_device_name = router.get_internal_device_name(
internal_port['id'])
internal_device_cidr = self._port_first_ip_cidr(internal_port)
floating_ip_cidr = common_utils.ip_to_cidr(
router.get_floating_ips()[0]['floating_ip_address'])
default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
return textwrap.dedent("""\
global_defs {
notification_email_from %(email_from)s
router_id %(router_id)s
}
vrrp_instance VR_1 {
state BACKUP
interface %(ha_device_name)s
virtual_router_id 1
priority 50
garp_master_delay 60
nopreempt
advert_int 2
track_interface {
%(ha_device_name)s
}
virtual_ipaddress {
169.254.0.1/24 dev %(ha_device_name)s
}
virtual_ipaddress_excluded {
%(floating_ip_cidr)s dev %(ex_device_name)s
%(external_device_cidr)s dev %(ex_device_name)s
%(internal_device_cidr)s dev %(internal_device_name)s
%(ex_port_ipv6)s dev %(ex_device_name)s scope link
%(int_port_ipv6)s dev %(internal_device_name)s scope link
}
virtual_routes {
0.0.0.0/0 via %(default_gateway_ip)s dev %(ex_device_name)s
8.8.8.0/24 via 19.4.4.4
%(extra_subnet_cidr)s dev %(ex_device_name)s scope link
}
}""") % {
'email_from': keepalived.KEEPALIVED_EMAIL_FROM,
'router_id': keepalived.KEEPALIVED_ROUTER_ID,
'ha_device_name': ha_device_name,
'ex_device_name': ex_device_name,
'external_device_cidr': external_device_cidr,
'internal_device_name': internal_device_name,
'internal_device_cidr': internal_device_cidr,
'floating_ip_cidr': floating_ip_cidr,
'default_gateway_ip': default_gateway_ip,
'int_port_ipv6': int_port_ipv6,
'ex_port_ipv6': ex_port_ipv6,
'extra_subnet_cidr': extra_subnet_cidr,
}
def _get_rule(self, iptables_manager, table, chain, predicate):
rules = iptables_manager.get_chain(table, chain)
result = next(rule for rule in rules if predicate(rule))
return result
def _assert_router_does_not_exist(self, router):
# If the namespace assertion succeeds
# then the devices and iptable rules have also been deleted,
# so there's no need to check that explicitly.
self.assertFalse(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: not self._metadata_proxy_exists(self.agent.conf, router))
def _assert_snat_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'POSTROUTING'))
def _assert_floating_ip_chains(self, router, snat_bound_fip=False):
if snat_bound_fip:
self.assertFalse(router.snat_iptables_manager.is_chain_empty(
'nat', 'float-snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'float-snat'))
def _assert_iptables_rules_converged(self, router):
# if your code is failing on this line, it means you are not generating
# your iptables rules in the same format that iptables-save returns
# them. run iptables-save to see the format they should be in
self.assertFalse(router.iptables_manager.apply())
def _assert_metadata_chains(self, router):
metadata_port_filter = lambda rule: (
str(self.agent.conf.metadata_port) in rule.rule)
self.assertTrue(self._get_rule(router.iptables_manager,
'nat',
'PREROUTING',
metadata_port_filter))
self.assertTrue(self._get_rule(router.iptables_manager,
'filter',
'INPUT',
metadata_port_filter))
def _assert_internal_devices(self, router):
internal_devices = router.router[constants.INTERFACE_KEY]
self.assertGreater(len(internal_devices), 0)
for device in internal_devices:
self.assertTrue(self.device_exists_with_ips_and_mac(
device, router.get_internal_device_name, router.ns_name))
def _assert_extra_routes(self, router, namespace=None):
if namespace is None:
namespace = router.ns_name
routes = ip_lib.get_routing_table(4, namespace=namespace)
routes = [{'nexthop': route['nexthop'],
'destination': route['destination']} for route in routes]
for extra_route in router.router['routes']:
self.assertIn(extra_route, routes)
def _assert_onlink_subnet_routes(
self, router, ip_versions, namespace=None):
ns_name = namespace or router.ns_name
routes = []
for ip_version in ip_versions:
_routes = ip_lib.get_routing_table(ip_version,
namespace=ns_name)
routes.extend(_routes)
routes = set(route['destination'] for route in routes)
extra_subnets = router.get_ex_gw_port()['extra_subnets']
for extra_subnet in (route['cidr'] for route in extra_subnets):
self.assertIn(extra_subnet, routes)
def _assert_interfaces_deleted_from_ovs(self):
def assert_ovs_bridge_empty(bridge_name):
bridge = ovs_lib.OVSBridge(bridge_name)
self.assertFalse(bridge.get_port_name_list())
assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
def floating_ips_configured(self, router):
floating_ips = router.router[constants.FLOATINGIP_KEY]
external_port = router.get_ex_gw_port()
return len(floating_ips) and all(
ip_lib.device_exists_with_ips_and_mac(
router.get_external_device_name(external_port['id']),
['%s/32' % fip['floating_ip_address']],
external_port['mac_address'],
namespace=router.ns_name) for fip in floating_ips)
def _create_router(self, router_info, agent):
ns_name = "%s%s%s" % (
'qrouter-' + router_info['id'],
self.NESTED_NAMESPACE_SEPARATOR, agent.host)
ext_name = "qg-%s-%s" % (agent.host, _uuid()[-4:])
int_name = "qr-%s-%s" % (agent.host, _uuid()[-4:])
get_ns_name = mock.patch.object(
namespaces.RouterNamespace, '_get_ns_name').start()
get_ns_name.return_value = ns_name
get_ext_name = mock.patch.object(l3_router_info.RouterInfo,
'get_external_device_name').start()
get_ext_name.return_value = ext_name
get_int_name = mock.patch.object(l3_router_info.RouterInfo,
'get_internal_device_name').start()
get_int_name.return_value = int_name
router = self.manage_router(agent, router_info)
router_ext_name = mock.patch.object(router,
'get_external_device_name').start()
router_ext_name.return_value = get_ext_name.return_value
router_int_name = mock.patch.object(router,
'get_internal_device_name').start()
router_int_name.return_value = get_int_name.return_value
return router
def create_ha_routers(self):
router_info = self.generate_router_info(enable_ha=True)
router1 = self._create_router(router_info, self.agent)
self._add_fip(router1, '192.168.111.12')
r1_br = ip_lib.IPDevice(router1.driver.conf.external_network_bridge)
r1_br.addr.add('19.4.4.1/24')
r1_br.link.set_up()
router_info_2 = copy.deepcopy(router_info)
router_info_2[constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface(ip='169.254.192.2',
mac='22:22:22:22:22:22'))
router2 = self._create_router(router_info_2, self.failover_agent)
r2_br = ip_lib.IPDevice(router2.driver.conf.external_network_bridge)
r2_br.addr.add('19.4.4.1/24')
r2_br.link.set_up()
return (router1, router2)
def _get_master_and_slave_routers(self, router1, router2):
try:
common_utils.wait_until_true(
lambda: router1.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router1))
master_router = router1
slave_router = router2
except common_utils.WaitTimeout:
common_utils.wait_until_true(
lambda: router2.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router2))
master_router = router2
slave_router = router1
common_utils.wait_until_true(
lambda: master_router.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(master_router))
common_utils.wait_until_true(
lambda: slave_router.ha_state == 'backup')
return master_router, slave_router
def fail_ha_router(self, router):
device_name = router.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router.ha_namespace)
ha_device.link.set_down()
@staticmethod
def fail_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_down()
@staticmethod
def restore_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_up()
@classmethod
def _get_addresses_on_device(cls, namespace, interface):
return [address['cidr'] for address in
ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
def _assert_no_ip_addresses_on_interface(self, namespace, interface):
self.assertEqual(
[], self._get_addresses_on_device(namespace, interface))
def _assert_ip_addresses_on_interface(self,
namespace, interface, ip_addresses):
for ip_address in ip_addresses:
self._assert_ip_address_on_interface(namespace, interface,
ip_address)
def _assert_ip_address_on_interface(self,
namespace, interface, ip_address):
self.assertIn(
ip_address, self._get_addresses_on_device(namespace, interface))
def _assert_ping_reply_from_expected_address(
self, ping_result, expected_address):
ping_results = ping_result.split('\n')
self.assertGreater(
len(ping_results), 1,
"The result from ping should be multiple lines")
self.assertIn(
expected_address, ping_results[1],
("Expect to see %s in the reply of ping, but failed" %
expected_address))
| 44.048817
| 79
| 0.634181
|
import copy
import functools
import textwrap
import mock
import netaddr
from neutron_lib import constants
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import testtools
from neutron.agent.common import ovs_lib
from neutron.agent.l3 import agent as neutron_l3_agent
from neutron.agent.l3 import namespaces
from neutron.agent.l3 import router_info as l3_router_info
from neutron.agent import l3_agent as l3_agent_main
from neutron.agent.linux import external_process
from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib
from neutron.agent.linux import keepalived
from neutron.common import constants as n_const
from neutron.common import utils as common_utils
from neutron.conf.agent import common as agent_config
from neutron.conf import common as common_config
from neutron.tests.common import l3_test_common
from neutron.tests.common import net_helpers
from neutron.tests.functional import base
_uuid = uuidutils.generate_uuid
OVS_INTERFACE_DRIVER = 'neutron.agent.linux.interface.OVSInterfaceDriver'
def get_ovs_bridge(br_name):
return ovs_lib.OVSBridge(br_name)
class L3AgentTestFramework(base.BaseSudoTestCase):
INTERFACE_DRIVER = OVS_INTERFACE_DRIVER
NESTED_NAMESPACE_SEPARATOR = '@'
def setUp(self):
super(L3AgentTestFramework, self).setUp()
self.mock_plugin_api = mock.patch(
'neutron.agent.l3.agent.L3PluginApi').start().return_value
mock.patch('neutron.agent.rpc.PluginReportStateAPI').start()
self.conf = self._configure_agent('agent1')
self.agent = neutron_l3_agent.L3NATAgentWithStateReport('agent1',
self.conf)
def _get_config_opts(self):
config = cfg.ConfigOpts()
config.register_opts(common_config.core_opts)
config.register_opts(common_config.core_cli_opts)
logging.register_options(config)
agent_config.register_process_monitor_opts(config)
return config
def _configure_agent(self, host, agent_mode='dvr_snat'):
conf = self._get_config_opts()
l3_agent_main.register_opts(conf)
conf.set_override('interface_driver', self.INTERFACE_DRIVER)
br_int = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
br_ex = self.useFixture(net_helpers.OVSBridgeFixture()).bridge
conf.set_override('ovs_integration_bridge', br_int.br_name)
conf.set_override('external_network_bridge', br_ex.br_name)
temp_dir = self.get_new_temp_dir()
get_temp_file_path = functools.partial(self.get_temp_file_path,
root=temp_dir)
conf.set_override('state_path', temp_dir.path)
conf.set_override('log_file',
get_temp_file_path('log_file'))
conf.set_override('metadata_proxy_socket',
get_temp_file_path('metadata_proxy'))
conf.set_override('ha_confs_path',
get_temp_file_path('ha_confs'))
conf.set_override('external_pids',
get_temp_file_path('external/pids'))
conf.set_override('host', host)
conf.set_override('agent_mode', agent_mode)
return conf
def _get_agent_ovs_integration_bridge(self, agent):
return get_ovs_bridge(agent.conf.ovs_integration_bridge)
def generate_router_info(self, enable_ha,
ip_version=constants.IP_VERSION_4,
extra_routes=True,
enable_fip=True, enable_snat=True,
num_internal_ports=1,
dual_stack=False, v6_ext_gw_with_sub=True,
qos_policy_id=None):
if ip_version == constants.IP_VERSION_6 and not dual_stack:
enable_snat = False
enable_fip = False
extra_routes = False
return l3_test_common.prepare_router_data(ip_version=ip_version,
enable_snat=enable_snat,
num_internal_ports=(
num_internal_ports),
enable_floating_ip=enable_fip,
enable_ha=enable_ha,
extra_routes=extra_routes,
dual_stack=dual_stack,
v6_ext_gw_with_sub=(
v6_ext_gw_with_sub),
qos_policy_id=qos_policy_id)
def _test_conntrack_disassociate_fip(self, ha):
router_info = self.generate_router_info(enable_ha=ha)
router = self.manage_router(self.agent, router_info)
port = net_helpers.get_free_namespace_port(
constants.PROTO_NAME_TCP, router.ns_name)
client_address = '19.4.4.3'
server_address = '35.4.0.4'
def clean_fips(router):
router.router[constants.FLOATINGIP_KEY] = []
clean_fips(router)
self._add_fip(router, client_address, fixed_address=server_address)
router.process()
router_ns = ip_lib.IPWrapper(namespace=router.ns_name)
netcat = net_helpers.NetcatTester(
router.ns_name, router.ns_name, client_address, port,
protocol=net_helpers.NetcatTester.TCP)
self.addCleanup(netcat.stop_processes)
def assert_num_of_conntrack_rules(n):
out = router_ns.netns.execute(["conntrack", "-L",
"--orig-src", client_address])
self.assertEqual(
n, len([line for line in out.strip().split('\n') if line]))
if ha:
common_utils.wait_until_true(lambda: router.ha_state == 'master')
with self.assert_max_execution_time(100):
assert_num_of_conntrack_rules(0)
self.assertTrue(netcat.test_connectivity())
assert_num_of_conntrack_rules(1)
clean_fips(router)
router.process()
assert_num_of_conntrack_rules(0)
with testtools.ExpectedException(RuntimeError):
netcat.test_connectivity()
def _test_update_floatingip_statuses(self, router_info):
router = self.manage_router(self.agent, router_info)
rpc = self.agent.plugin_rpc.update_floatingip_statuses
self.assertTrue(rpc.called)
expected_fips = set([
(fip['id'], constants.FLOATINGIP_STATUS_ACTIVE) for fip in
router.router[constants.FLOATINGIP_KEY]])
call = [args[0] for args in rpc.call_args_list][0]
actual_fips = set(
[(fip_id, status) for fip_id, status in call[2].items()])
self.assertEqual(expected_fips, actual_fips)
def _gateway_check(self, gateway_ip, external_device):
expected_gateway = gateway_ip
ip_vers = netaddr.IPAddress(expected_gateway).version
existing_gateway = (external_device.route.get_gateway(
ip_version=ip_vers).get('gateway'))
self.assertEqual(expected_gateway, existing_gateway)
def _assert_ha_device(self, router):
def ha_router_dev_name_getter(not_used):
return router.get_ha_device_name()
self.assertTrue(self.device_exists_with_ips_and_mac(
router.router[constants.HA_INTERFACE_KEY],
ha_router_dev_name_getter, router.ns_name))
def _assert_gateway(self, router, v6_ext_gw_with_sub=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
external_device = ip_lib.IPDevice(external_device_name,
namespace=router.ns_name)
for subnet in external_port['subnets']:
self._gateway_check(subnet['gateway_ip'], external_device)
if not v6_ext_gw_with_sub:
self._gateway_check(self.agent.conf.ipv6_gateway,
external_device)
def _check_external_device(self, router):
external_port = router.get_ex_gw_port()
return (self.device_exists_with_ips_and_mac(
external_port, router.get_external_device_name,
router.ns_name))
def _assert_external_device(self, router):
self.assertTrue(self._check_external_device(router))
def _assert_ipv6_accept_ra(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
ra_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.accept_ra' % external_device_name])
self.assertEqual(enabled, int(ra_state) != n_const.ACCEPT_RA_DISABLED)
def _assert_ipv6_forwarding(self, router, enabled=True):
external_port = router.get_ex_gw_port()
external_device_name = router.get_external_device_name(
external_port['id'])
ip_wrapper = ip_lib.IPWrapper(namespace=router.ns_name)
fwd_state = ip_wrapper.netns.execute(['sysctl', '-b',
'net.ipv6.conf.%s.forwarding' % external_device_name])
self.assertEqual(int(enabled), int(fwd_state))
def _router_lifecycle(self, enable_ha, ip_version=constants.IP_VERSION_4,
dual_stack=False, v6_ext_gw_with_sub=True,
router_info=None):
router_info = router_info or self.generate_router_info(
enable_ha, ip_version, dual_stack=dual_stack,
v6_ext_gw_with_sub=(v6_ext_gw_with_sub))
return_copy = copy.deepcopy(router_info)
router = self.manage_router(self.agent, router_info)
slaac = constants.IPV6_SLAAC
slaac_mode = {'ra_mode': slaac, 'address_mode': slaac}
subnet_modes = [slaac_mode] * 2
self._add_internal_interface_by_subnet(router.router,
count=2, ip_version=constants.IP_VERSION_6,
ipv6_subnet_modes=subnet_modes)
router.process()
if enable_ha:
port = router.get_ex_gw_port()
interface_name = router.get_external_device_name(port['id'])
self._assert_no_ip_addresses_on_interface(router.ns_name,
interface_name)
common_utils.wait_until_true(lambda: router.ha_state == 'master')
device = router.router[constants.INTERFACE_KEY][-1]
device_exists = functools.partial(
self.device_exists_with_ips_and_mac,
device,
router.get_internal_device_name,
router.ns_name)
common_utils.wait_until_true(device_exists)
self.assertTrue(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: self._metadata_proxy_exists(self.agent.conf, router))
self._assert_internal_devices(router)
self._assert_external_device(router)
if not (enable_ha and
(ip_version == constants.IP_VERSION_6 or dual_stack)):
self._assert_gateway(router, v6_ext_gw_with_sub)
self.assertTrue(self.floating_ips_configured(router))
self._assert_snat_chains(router)
self._assert_floating_ip_chains(router)
self._assert_iptables_rules_converged(router)
self._assert_extra_routes(router)
if (ip_version == constants.IP_VERSION_6 or dual_stack):
ip_versions = [constants.IP_VERSION_4, constants.IP_VERSION_6]
else:
ip_versions = [constants.IP_VERSION_4]
self._assert_onlink_subnet_routes(router, ip_versions)
self._assert_metadata_chains(router)
if router.use_ipv6 and not v6_ext_gw_with_sub:
if not self.agent.conf.ipv6_gateway:
self._assert_ipv6_accept_ra(router)
if enable_ha:
self._assert_ha_device(router)
common_utils.wait_until_true(
lambda: router.keepalived_manager.get_process().active,
timeout=15)
self._delete_router(self.agent, router.router_id)
self._assert_interfaces_deleted_from_ovs()
self._assert_router_does_not_exist(router)
if enable_ha:
common_utils.wait_until_true(
lambda: not router.keepalived_manager.get_process().active,
timeout=15)
return return_copy
def manage_router(self, agent, router):
self.addCleanup(agent._safe_router_removed, router['id'])
# agent tagging ports, and all ports remain untagged
# during test execution.
# Workaround related to lp#1767422 plugs new ports as
# dead vlan (4095) to avoid issues, we need to remove
# such tag during functional l3 testing.
original_plug_new = interface.OVSInterfaceDriver.plug_new
def new_ovs_plug(self, *args, **kwargs):
original_plug_new(self, *args, **kwargs)
bridge = (kwargs.get('bridge') or args[4] or
self.conf.ovs_integration_bridge)
device_name = kwargs.get('device_name') or args[2]
ovsbr = ovs_lib.OVSBridge(bridge)
ovsbr.clear_db_attribute('Port', device_name, 'tag')
with mock.patch(OVS_INTERFACE_DRIVER + '.plug_new', autospec=True) as (
ovs_plug):
ovs_plug.side_effect = new_ovs_plug
agent._process_added_router(router)
return agent.router_info[router['id']]
def _delete_router(self, agent, router_id):
agent._router_removed(router_id)
def _add_fip(self, router, fip_address, fixed_address='10.0.0.2',
host=None, fixed_ip_address_scope=None):
fip = {'id': _uuid(),
'port_id': _uuid(),
'floating_ip_address': fip_address,
'fixed_ip_address': fixed_address,
'host': host,
'fixed_ip_address_scope': fixed_ip_address_scope}
router.router[constants.FLOATINGIP_KEY].append(fip)
def _add_internal_interface_by_subnet(self, router, count=1,
ip_version=constants.IP_VERSION_4,
ipv6_subnet_modes=None,
interface_id=None):
return l3_test_common.router_append_subnet(router, count,
ip_version, ipv6_subnet_modes, interface_id)
def _namespace_exists(self, namespace):
return ip_lib.network_namespace_exists(namespace)
def _metadata_proxy_exists(self, conf, router):
pm = external_process.ProcessManager(
conf,
router.router_id,
router.ns_name)
return pm.active
def device_exists_with_ips_and_mac(self, expected_device, name_getter,
namespace):
ip_cidrs = common_utils.fixed_ip_cidrs(expected_device['fixed_ips'])
return ip_lib.device_exists_with_ips_and_mac(
name_getter(expected_device['id']), ip_cidrs,
expected_device['mac_address'], namespace)
@staticmethod
def _port_first_ip_cidr(port):
fixed_ip = port['fixed_ips'][0]
return common_utils.ip_to_cidr(fixed_ip['ip_address'],
fixed_ip['prefixlen'])
def get_device_mtu(self, target_device, name_getter, namespace):
device = ip_lib.IPDevice(name_getter(target_device), namespace)
return device.link.mtu
def get_expected_keepalive_configuration(self, router):
ha_device_name = router.get_ha_device_name()
external_port = router.get_ex_gw_port()
ex_port_ipv6 = ip_lib.get_ipv6_lladdr(external_port['mac_address'])
ex_device_name = router.get_external_device_name(
external_port['id'])
external_device_cidr = self._port_first_ip_cidr(external_port)
internal_port = router.router[constants.INTERFACE_KEY][0]
int_port_ipv6 = ip_lib.get_ipv6_lladdr(internal_port['mac_address'])
internal_device_name = router.get_internal_device_name(
internal_port['id'])
internal_device_cidr = self._port_first_ip_cidr(internal_port)
floating_ip_cidr = common_utils.ip_to_cidr(
router.get_floating_ips()[0]['floating_ip_address'])
default_gateway_ip = external_port['subnets'][0].get('gateway_ip')
extra_subnet_cidr = external_port['extra_subnets'][0].get('cidr')
return textwrap.dedent("""\
global_defs {
notification_email_from %(email_from)s
router_id %(router_id)s
}
vrrp_instance VR_1 {
state BACKUP
interface %(ha_device_name)s
virtual_router_id 1
priority 50
garp_master_delay 60
nopreempt
advert_int 2
track_interface {
%(ha_device_name)s
}
virtual_ipaddress {
169.254.0.1/24 dev %(ha_device_name)s
}
virtual_ipaddress_excluded {
%(floating_ip_cidr)s dev %(ex_device_name)s
%(external_device_cidr)s dev %(ex_device_name)s
%(internal_device_cidr)s dev %(internal_device_name)s
%(ex_port_ipv6)s dev %(ex_device_name)s scope link
%(int_port_ipv6)s dev %(internal_device_name)s scope link
}
virtual_routes {
0.0.0.0/0 via %(default_gateway_ip)s dev %(ex_device_name)s
8.8.8.0/24 via 19.4.4.4
%(extra_subnet_cidr)s dev %(ex_device_name)s scope link
}
}""") % {
'email_from': keepalived.KEEPALIVED_EMAIL_FROM,
'router_id': keepalived.KEEPALIVED_ROUTER_ID,
'ha_device_name': ha_device_name,
'ex_device_name': ex_device_name,
'external_device_cidr': external_device_cidr,
'internal_device_name': internal_device_name,
'internal_device_cidr': internal_device_cidr,
'floating_ip_cidr': floating_ip_cidr,
'default_gateway_ip': default_gateway_ip,
'int_port_ipv6': int_port_ipv6,
'ex_port_ipv6': ex_port_ipv6,
'extra_subnet_cidr': extra_subnet_cidr,
}
def _get_rule(self, iptables_manager, table, chain, predicate):
rules = iptables_manager.get_chain(table, chain)
result = next(rule for rule in rules if predicate(rule))
return result
def _assert_router_does_not_exist(self, router):
# If the namespace assertion succeeds
# then the devices and iptable rules have also been deleted,
# so there's no need to check that explicitly.
self.assertFalse(self._namespace_exists(router.ns_name))
common_utils.wait_until_true(
lambda: not self._metadata_proxy_exists(self.agent.conf, router))
def _assert_snat_chains(self, router):
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'POSTROUTING'))
def _assert_floating_ip_chains(self, router, snat_bound_fip=False):
if snat_bound_fip:
self.assertFalse(router.snat_iptables_manager.is_chain_empty(
'nat', 'float-snat'))
self.assertFalse(router.iptables_manager.is_chain_empty(
'nat', 'float-snat'))
def _assert_iptables_rules_converged(self, router):
self.assertFalse(router.iptables_manager.apply())
def _assert_metadata_chains(self, router):
metadata_port_filter = lambda rule: (
str(self.agent.conf.metadata_port) in rule.rule)
self.assertTrue(self._get_rule(router.iptables_manager,
'nat',
'PREROUTING',
metadata_port_filter))
self.assertTrue(self._get_rule(router.iptables_manager,
'filter',
'INPUT',
metadata_port_filter))
def _assert_internal_devices(self, router):
internal_devices = router.router[constants.INTERFACE_KEY]
self.assertGreater(len(internal_devices), 0)
for device in internal_devices:
self.assertTrue(self.device_exists_with_ips_and_mac(
device, router.get_internal_device_name, router.ns_name))
def _assert_extra_routes(self, router, namespace=None):
if namespace is None:
namespace = router.ns_name
routes = ip_lib.get_routing_table(4, namespace=namespace)
routes = [{'nexthop': route['nexthop'],
'destination': route['destination']} for route in routes]
for extra_route in router.router['routes']:
self.assertIn(extra_route, routes)
def _assert_onlink_subnet_routes(
self, router, ip_versions, namespace=None):
ns_name = namespace or router.ns_name
routes = []
for ip_version in ip_versions:
_routes = ip_lib.get_routing_table(ip_version,
namespace=ns_name)
routes.extend(_routes)
routes = set(route['destination'] for route in routes)
extra_subnets = router.get_ex_gw_port()['extra_subnets']
for extra_subnet in (route['cidr'] for route in extra_subnets):
self.assertIn(extra_subnet, routes)
def _assert_interfaces_deleted_from_ovs(self):
def assert_ovs_bridge_empty(bridge_name):
bridge = ovs_lib.OVSBridge(bridge_name)
self.assertFalse(bridge.get_port_name_list())
assert_ovs_bridge_empty(self.agent.conf.ovs_integration_bridge)
assert_ovs_bridge_empty(self.agent.conf.external_network_bridge)
def floating_ips_configured(self, router):
floating_ips = router.router[constants.FLOATINGIP_KEY]
external_port = router.get_ex_gw_port()
return len(floating_ips) and all(
ip_lib.device_exists_with_ips_and_mac(
router.get_external_device_name(external_port['id']),
['%s/32' % fip['floating_ip_address']],
external_port['mac_address'],
namespace=router.ns_name) for fip in floating_ips)
def _create_router(self, router_info, agent):
ns_name = "%s%s%s" % (
'qrouter-' + router_info['id'],
self.NESTED_NAMESPACE_SEPARATOR, agent.host)
ext_name = "qg-%s-%s" % (agent.host, _uuid()[-4:])
int_name = "qr-%s-%s" % (agent.host, _uuid()[-4:])
get_ns_name = mock.patch.object(
namespaces.RouterNamespace, '_get_ns_name').start()
get_ns_name.return_value = ns_name
get_ext_name = mock.patch.object(l3_router_info.RouterInfo,
'get_external_device_name').start()
get_ext_name.return_value = ext_name
get_int_name = mock.patch.object(l3_router_info.RouterInfo,
'get_internal_device_name').start()
get_int_name.return_value = int_name
router = self.manage_router(agent, router_info)
router_ext_name = mock.patch.object(router,
'get_external_device_name').start()
router_ext_name.return_value = get_ext_name.return_value
router_int_name = mock.patch.object(router,
'get_internal_device_name').start()
router_int_name.return_value = get_int_name.return_value
return router
def create_ha_routers(self):
router_info = self.generate_router_info(enable_ha=True)
router1 = self._create_router(router_info, self.agent)
self._add_fip(router1, '192.168.111.12')
r1_br = ip_lib.IPDevice(router1.driver.conf.external_network_bridge)
r1_br.addr.add('19.4.4.1/24')
r1_br.link.set_up()
router_info_2 = copy.deepcopy(router_info)
router_info_2[constants.HA_INTERFACE_KEY] = (
l3_test_common.get_ha_interface(ip='169.254.192.2',
mac='22:22:22:22:22:22'))
router2 = self._create_router(router_info_2, self.failover_agent)
r2_br = ip_lib.IPDevice(router2.driver.conf.external_network_bridge)
r2_br.addr.add('19.4.4.1/24')
r2_br.link.set_up()
return (router1, router2)
def _get_master_and_slave_routers(self, router1, router2):
try:
common_utils.wait_until_true(
lambda: router1.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router1))
master_router = router1
slave_router = router2
except common_utils.WaitTimeout:
common_utils.wait_until_true(
lambda: router2.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(router2))
master_router = router2
slave_router = router1
common_utils.wait_until_true(
lambda: master_router.ha_state == 'master')
common_utils.wait_until_true(
lambda: self._check_external_device(master_router))
common_utils.wait_until_true(
lambda: slave_router.ha_state == 'backup')
return master_router, slave_router
def fail_ha_router(self, router):
device_name = router.get_ha_device_name()
ha_device = ip_lib.IPDevice(device_name, router.ha_namespace)
ha_device.link.set_down()
@staticmethod
def fail_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_down()
@staticmethod
def restore_gw_router_port(router):
r_br = ip_lib.IPDevice(router.driver.conf.external_network_bridge)
r_br.link.set_up()
@classmethod
def _get_addresses_on_device(cls, namespace, interface):
return [address['cidr'] for address in
ip_lib.IPDevice(interface, namespace=namespace).addr.list()]
def _assert_no_ip_addresses_on_interface(self, namespace, interface):
self.assertEqual(
[], self._get_addresses_on_device(namespace, interface))
def _assert_ip_addresses_on_interface(self,
namespace, interface, ip_addresses):
for ip_address in ip_addresses:
self._assert_ip_address_on_interface(namespace, interface,
ip_address)
def _assert_ip_address_on_interface(self,
namespace, interface, ip_address):
self.assertIn(
ip_address, self._get_addresses_on_device(namespace, interface))
def _assert_ping_reply_from_expected_address(
self, ping_result, expected_address):
ping_results = ping_result.split('\n')
self.assertGreater(
len(ping_results), 1,
"The result from ping should be multiple lines")
self.assertIn(
expected_address, ping_results[1],
("Expect to see %s in the reply of ping, but failed" %
expected_address))
| true
| true
|
f707879e076cd18ac6fe4a33eb20c917b29be644
| 12,427
|
py
|
Python
|
pressure_control_interface/utils/comm_handler.py
|
cbteeple/pressure_control_interface
|
803491533da2bfd0cc980f3b9553c691b6dec0d8
|
[
"MIT"
] | null | null | null |
pressure_control_interface/utils/comm_handler.py
|
cbteeple/pressure_control_interface
|
803491533da2bfd0cc980f3b9553c691b6dec0d8
|
[
"MIT"
] | 10
|
2020-08-12T00:23:27.000Z
|
2022-01-03T19:54:02.000Z
|
pressure_control_interface/utils/comm_handler.py
|
cbteeple/pressure_control_interface
|
803491533da2bfd0cc980f3b9553c691b6dec0d8
|
[
"MIT"
] | null | null | null |
import serial
import time
from datetime import datetime
import sys
import os
import yaml
import csv
import re
from validate_commands import CommandValidator
def build_cmd_string(command, values=None, format="%0.3f"):
txt = command
if values is not None:
#print("%s \t %s"%(command, values))
if isinstance(values, list) or isinstance(values, tuple):
if values:
for val in values:
txt+= ";"+format%(val)
else:
txt+=";"+format%(values)
cmd = txt+'\n'
return cmd
class CommandHandler:
def __init__(self, comm_list):
self.comm_list = comm_list
self.validators = []
self.num_chans = []
self.cmd_specs = []
for settings in self.comm_list:
self.num_chans.append(settings['num_channels'])
self.cmd_specs.append(settings['cmd_spec'])
self.validators.append(CommandValidator(settings["cmd_spec"], settings["num_channels"]))
def split_command(self, command, values, format="%0.3f"):
commands_out = []
for idx,_ in enumerate(self.comm_list):
spec = self.validators[idx].get_spec(command)
if spec is None:
split_how = None
else:
split_how = spec.get('split_how',None)
split_idx = None
switch_idx = None
# If we are not splitting the command, send the same thing to each controller
if split_how == None:
commands_out.append({'command':command, 'values':values})
# If we are splitting the command, determine how
else:
if isinstance(values,list) or isinstance(values,tuple):
values = list(values)
if len(values) ==0:
commands_out.append({'command':command, 'values':values})
continue
split_how_single = split_how.get('single_arg',None)
if spec['num_args'][0] == len(values):
if split_how_single is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_single:
channel = 0
split_idx = eval(split_how_single)
elif 'idx' in split_how_single:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
split_how_multi = split_how.get('multi_arg',None)
if split_how_multi is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_multi:
channel = 0
split_idx = eval(split_how_multi)
elif 'idx' in split_how_multi:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
commands_out.append({'command':command, 'values':values})
max_chan = sum(self.num_chans[0:idx+1])
min_chan = sum(self.num_chans[0:idx])
if split_idx is not None:
curr_vals = values[0:split_idx]
if len(values) >=max_chan+split_idx:
curr_vals.extend(values[min_chan+split_idx:max_chan+split_idx])
else:
curr_vals.extend(values[min_chan+split_idx:])
commands_out.append({'command':command, 'values':curr_vals})
elif switch_idx is not None:
if values[switch_idx] < max_chan and values[switch_idx] >= min_chan:
values[switch_idx] = float(values[switch_idx]) - min_chan
commands_out.append({'command':command, 'values':values})
else:
commands_out.append(None)
return commands_out
class CommHandler:
def __init__(self):
self.serial_settings = None
self.s = None
def initialize(self, devname=None, baudrate=None, ser=None):
if devname is not None and baudrate is not None:
self.s = [serial.Serial(devname,baudrate)]
elif ser is not None:
self.s = [ser]
elif self.serial_settings is not None:
self.s = []
for settings in self.serial_settings:
self.s.append(serial.Serial(settings["devname"], settings["baudrate"]))
else:
self.s = None
raise ValueError("CommHandler expects either a devname and baudrate, or and existing serial object")
self.command_handler=CommandHandler(self.serial_settings)
self.validator = []
self.loggers = []
for idx, settings in enumerate(self.serial_settings):
validator_curr = self.command_handler.validators[idx]
self.validator.append(validator_curr)
self.loggers.append(DataSaver(settings,validator_curr))
# Get serial settings from a file
def read_serial_settings(self, file=None):
file_path = os.path.dirname(os.path.realpath(__file__))
if file is None:
file=os.path.join(file_path,"..","config","comms","comms_config.yaml")
with open(file) as f:
# use safe_load instead of load
hardware_settings = yaml.safe_load(f)
f.close()
hw_file = hardware_settings.get('hardware')
devnames = hardware_settings.get('devnames')
hw_fullfile=os.path.join(file_path,"..","config","hardware",hw_file+".yaml")
with open(hw_fullfile) as f:
# use safe_load instead of load
serial_settings = yaml.safe_load(f)
f.close()
for idx, obj in enumerate(serial_settings):
obj['devname'] = devnames[idx]
self.serial_settings = serial_settings
return serial_settings
# Set serial settings directly
def get_serial_settings(self):
return self.serial_settings
# Set serial settings directly
def set_serial_settings(self, serial_settings):
self.serial_settings=serial_settings
# Send commands out
def send_command(self, command, values=None, format="%0.3f"):
cmd_obj = self.command_handler.split_command(command, values)
cmd = []
for cmd_curr in cmd_obj:
if cmd_curr is None:
cmd.append(None)
else:
cmd.append(build_cmd_string(cmd_curr['command'], cmd_curr['values'], format) )
print(cmd, len(cmd))
for ser, cmd_curr in zip(self.s,cmd):
if cmd_curr is not None:
ser.write(cmd_curr.encode())
# Send a raw string out
def send_string(self, string, eol='\n'):
string+=eol
for ser in self.s:
ser.write(string.encode())
# Read one line
def read_line(self, display=False, raw=False):
out=[None]*len(self.s)
for idx,ser in enumerate(self.s):
curr_line = None
if ser.in_waiting: # Or: while ser.inWaiting():
curr_line = ser.readline().decode().strip()
out[idx] = curr_line
if curr_line is not None and display:
print(curr_line)
if out == [None]*len(self.s):
return None
elif raw:
return out
else:
new_out = []
for idx,_ in enumerate(out):
line = self.validator[idx].process_line(out[idx])
if line is None:
new_out.append(None)
else:
new_out.append(line[0])
if new_out == [None]*len(self.s):
return None
return new_out
def read_all(self, display=False, raw=False):
out = []
new_line = []
while new_line != None: # Or: while ser.inWaiting():
new_line = self.read_line(display, raw)
if new_line is not None:
out.append(new_line)
if len(out) ==0:
return None
else:
return out
def save_init(self, filename, filetype='csv'):
for idx,logger in enumerate(self.loggers):
file, file_extension = os.path.splitext(filename)
logger.save_init(file+"_%02d"%(idx)+file_extension,filetype)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
for idx, _ in enumerate(self.serial_settings):
self.loggers[idx].save_data_line(data_line[idx])
# Upon shutdown, close the serial instance
def shutdown(self):
if self.s is not None:
for ser in self.s:
ser.close()
for logger in self.loggers:
logger.shutdown()
# Upon object deletion, shut down the serial handler
def __del__(self):
self.shutdown()
class DataSaver:
def __init__(self, serial_settings, validator):
self.serial_settings = serial_settings
self.validator = validator
self.out_file = None
self.file_writer = None
def save_init(self, filename, filetype='csv'):
num_channels = self.serial_settings['num_channels']
data_to_save = self.validator.cmd_data['types'].keys()
data_flat_labels = ['time']
data_labels = ['time']
data_lens = [1]
for data_type in data_to_save:
curr_type = self.validator.cmd_data['types'][data_type]
curr_label = curr_type['label']
curr_len = curr_type['length']
if curr_len == 'num_channels':
curr_len = num_channels
data_labels.append(curr_label)
data_lens.append(curr_len)
if curr_len>1:
for idx in range(curr_len):
data_flat_labels.append(curr_label+"[%d]"%(idx))
else:
data_flat_labels.append(curr_label)
data_labels.extend(['_command', '_args'])
data_flat_labels.extend(['_command', '_args'])
data_lens.extend([1,1])
self.data_to_save = data_to_save
self.data_labels = data_labels
self.data_lens = data_lens
self.data_flat_labels = data_flat_labels
self.out_file = open(filename, "w+")
self.file_writer = csv.writer(self.out_file)
self.file_writer.writerow(self.data_flat_labels)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
try:
if data_line is None:
return
data=[]
for idx,key in enumerate(self.data_labels):
expected_len = self.data_lens[idx]
dat = data_line.get(key,None)
if isinstance(dat, list):
for curr_dat in dat:
data.append(curr_dat)
if expected_len > len(dat):
for idx in range(expected_len-len(dat)):
data.append("")
if expected_len < len(dat):
print("data array is longer than we expected")
elif dat is not None:
data.append(dat)
else:
for idx in range(expected_len):
data.append("")
self.file_writer.writerow(data)
except IOError:
print("I/O error")
# Upon shutdown, close the serial instance
def shutdown(self):
if self.out_file is not None:
self.out_file.close()
def __del__(self):
self.shutdown()
| 31.381313
| 112
| 0.540114
|
import serial
import time
from datetime import datetime
import sys
import os
import yaml
import csv
import re
from validate_commands import CommandValidator
def build_cmd_string(command, values=None, format="%0.3f"):
txt = command
if values is not None:
if isinstance(values, list) or isinstance(values, tuple):
if values:
for val in values:
txt+= ";"+format%(val)
else:
txt+=";"+format%(values)
cmd = txt+'\n'
return cmd
class CommandHandler:
def __init__(self, comm_list):
self.comm_list = comm_list
self.validators = []
self.num_chans = []
self.cmd_specs = []
for settings in self.comm_list:
self.num_chans.append(settings['num_channels'])
self.cmd_specs.append(settings['cmd_spec'])
self.validators.append(CommandValidator(settings["cmd_spec"], settings["num_channels"]))
def split_command(self, command, values, format="%0.3f"):
commands_out = []
for idx,_ in enumerate(self.comm_list):
spec = self.validators[idx].get_spec(command)
if spec is None:
split_how = None
else:
split_how = spec.get('split_how',None)
split_idx = None
switch_idx = None
if split_how == None:
commands_out.append({'command':command, 'values':values})
else:
if isinstance(values,list) or isinstance(values,tuple):
values = list(values)
if len(values) ==0:
commands_out.append({'command':command, 'values':values})
continue
split_how_single = split_how.get('single_arg',None)
if spec['num_args'][0] == len(values):
if split_how_single is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_single:
channel = 0
split_idx = eval(split_how_single)
elif 'idx' in split_how_single:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
split_how_multi = split_how.get('multi_arg',None)
if split_how_multi is None:
commands_out.append({'command':command, 'values':values})
elif 'channel' in split_how_multi:
channel = 0
split_idx = eval(split_how_multi)
elif 'idx' in split_how_multi:
channel = 0
switch_idx = int(re.match('.*?([0-9]+)$', split_how_single).group(1))
else:
commands_out.append(None)
else:
commands_out.append({'command':command, 'values':values})
max_chan = sum(self.num_chans[0:idx+1])
min_chan = sum(self.num_chans[0:idx])
if split_idx is not None:
curr_vals = values[0:split_idx]
if len(values) >=max_chan+split_idx:
curr_vals.extend(values[min_chan+split_idx:max_chan+split_idx])
else:
curr_vals.extend(values[min_chan+split_idx:])
commands_out.append({'command':command, 'values':curr_vals})
elif switch_idx is not None:
if values[switch_idx] < max_chan and values[switch_idx] >= min_chan:
values[switch_idx] = float(values[switch_idx]) - min_chan
commands_out.append({'command':command, 'values':values})
else:
commands_out.append(None)
return commands_out
class CommHandler:
def __init__(self):
self.serial_settings = None
self.s = None
def initialize(self, devname=None, baudrate=None, ser=None):
if devname is not None and baudrate is not None:
self.s = [serial.Serial(devname,baudrate)]
elif ser is not None:
self.s = [ser]
elif self.serial_settings is not None:
self.s = []
for settings in self.serial_settings:
self.s.append(serial.Serial(settings["devname"], settings["baudrate"]))
else:
self.s = None
raise ValueError("CommHandler expects either a devname and baudrate, or and existing serial object")
self.command_handler=CommandHandler(self.serial_settings)
self.validator = []
self.loggers = []
for idx, settings in enumerate(self.serial_settings):
validator_curr = self.command_handler.validators[idx]
self.validator.append(validator_curr)
self.loggers.append(DataSaver(settings,validator_curr))
def read_serial_settings(self, file=None):
file_path = os.path.dirname(os.path.realpath(__file__))
if file is None:
file=os.path.join(file_path,"..","config","comms","comms_config.yaml")
with open(file) as f:
hardware_settings = yaml.safe_load(f)
f.close()
hw_file = hardware_settings.get('hardware')
devnames = hardware_settings.get('devnames')
hw_fullfile=os.path.join(file_path,"..","config","hardware",hw_file+".yaml")
with open(hw_fullfile) as f:
serial_settings = yaml.safe_load(f)
f.close()
for idx, obj in enumerate(serial_settings):
obj['devname'] = devnames[idx]
self.serial_settings = serial_settings
return serial_settings
def get_serial_settings(self):
return self.serial_settings
def set_serial_settings(self, serial_settings):
self.serial_settings=serial_settings
def send_command(self, command, values=None, format="%0.3f"):
cmd_obj = self.command_handler.split_command(command, values)
cmd = []
for cmd_curr in cmd_obj:
if cmd_curr is None:
cmd.append(None)
else:
cmd.append(build_cmd_string(cmd_curr['command'], cmd_curr['values'], format) )
print(cmd, len(cmd))
for ser, cmd_curr in zip(self.s,cmd):
if cmd_curr is not None:
ser.write(cmd_curr.encode())
def send_string(self, string, eol='\n'):
string+=eol
for ser in self.s:
ser.write(string.encode())
def read_line(self, display=False, raw=False):
out=[None]*len(self.s)
for idx,ser in enumerate(self.s):
curr_line = None
if ser.in_waiting:
curr_line = ser.readline().decode().strip()
out[idx] = curr_line
if curr_line is not None and display:
print(curr_line)
if out == [None]*len(self.s):
return None
elif raw:
return out
else:
new_out = []
for idx,_ in enumerate(out):
line = self.validator[idx].process_line(out[idx])
if line is None:
new_out.append(None)
else:
new_out.append(line[0])
if new_out == [None]*len(self.s):
return None
return new_out
def read_all(self, display=False, raw=False):
out = []
new_line = []
while new_line != None:
new_line = self.read_line(display, raw)
if new_line is not None:
out.append(new_line)
if len(out) ==0:
return None
else:
return out
def save_init(self, filename, filetype='csv'):
for idx,logger in enumerate(self.loggers):
file, file_extension = os.path.splitext(filename)
logger.save_init(file+"_%02d"%(idx)+file_extension,filetype)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
for idx, _ in enumerate(self.serial_settings):
self.loggers[idx].save_data_line(data_line[idx])
def shutdown(self):
if self.s is not None:
for ser in self.s:
ser.close()
for logger in self.loggers:
logger.shutdown()
def __del__(self):
self.shutdown()
class DataSaver:
def __init__(self, serial_settings, validator):
self.serial_settings = serial_settings
self.validator = validator
self.out_file = None
self.file_writer = None
def save_init(self, filename, filetype='csv'):
num_channels = self.serial_settings['num_channels']
data_to_save = self.validator.cmd_data['types'].keys()
data_flat_labels = ['time']
data_labels = ['time']
data_lens = [1]
for data_type in data_to_save:
curr_type = self.validator.cmd_data['types'][data_type]
curr_label = curr_type['label']
curr_len = curr_type['length']
if curr_len == 'num_channels':
curr_len = num_channels
data_labels.append(curr_label)
data_lens.append(curr_len)
if curr_len>1:
for idx in range(curr_len):
data_flat_labels.append(curr_label+"[%d]"%(idx))
else:
data_flat_labels.append(curr_label)
data_labels.extend(['_command', '_args'])
data_flat_labels.extend(['_command', '_args'])
data_lens.extend([1,1])
self.data_to_save = data_to_save
self.data_labels = data_labels
self.data_lens = data_lens
self.data_flat_labels = data_flat_labels
self.out_file = open(filename, "w+")
self.file_writer = csv.writer(self.out_file)
self.file_writer.writerow(self.data_flat_labels)
def save_data_lines(self,data_lines):
for line in data_lines:
self.save_data_line(line)
def save_data_line(self,data_line):
try:
if data_line is None:
return
data=[]
for idx,key in enumerate(self.data_labels):
expected_len = self.data_lens[idx]
dat = data_line.get(key,None)
if isinstance(dat, list):
for curr_dat in dat:
data.append(curr_dat)
if expected_len > len(dat):
for idx in range(expected_len-len(dat)):
data.append("")
if expected_len < len(dat):
print("data array is longer than we expected")
elif dat is not None:
data.append(dat)
else:
for idx in range(expected_len):
data.append("")
self.file_writer.writerow(data)
except IOError:
print("I/O error")
def shutdown(self):
if self.out_file is not None:
self.out_file.close()
def __del__(self):
self.shutdown()
| true
| true
|
f70789ebeed4d28a5c9bc51ca8340e29d07e47a9
| 34,157
|
py
|
Python
|
release/scripts/startup/bl_ui/properties_freestyle.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 39
|
2020-05-26T15:21:14.000Z
|
2022-03-24T04:46:31.000Z
|
release/scripts/startup/bl_ui/properties_freestyle.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 7
|
2020-05-11T14:04:54.000Z
|
2020-06-03T15:00:20.000Z
|
release/scripts/startup/bl_ui/properties_freestyle.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2020-04-25T14:38:01.000Z
|
2021-03-03T08:48:58.000Z
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from bpy.types import Menu, Panel, UIList
# Render properties
class RenderFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "render"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
with_freestyle = bpy.app.build_options.freestyle
return scene and with_freestyle and(context.engine in cls.COMPAT_ENGINES)
class RENDER_PT_freestyle(RenderFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
bl_options = {'DEFAULT_CLOSED'}
bl_order = 10
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_freestyle", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False # No animation.
rd = context.scene.render
layout.active = rd.use_freestyle
layout.prop(rd, "line_thickness_mode", expand=True)
if rd.line_thickness_mode == 'ABSOLUTE':
layout.prop(rd, "line_thickness")
# Render layer properties
class ViewLayerFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "view_layer"
bl_order = 10
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
rd = scene.render
with_freestyle = bpy.app.build_options.freestyle
return (scene and with_freestyle and rd.use_freestyle and
(context.engine in cls.COMPAT_ENGINES))
class ViewLayerFreestyleEditorButtonsPanel(ViewLayerFreestyleButtonsPanel):
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
if not super().poll(context):
return False
view_layer = context.view_layer
return view_layer and view_layer.freestyle_settings.mode == 'EDITOR'
class VIEWLAYER_UL_linesets(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, index):
lineset = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(lineset, "name", text="", emboss=False, icon_value=icon)
layout.prop(lineset, "show_render", text="", index=index)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class RENDER_MT_lineset_context_menu(Menu):
bl_label = "Lineset Specials"
def draw(self, _context):
layout = self.layout
layout.operator("scene.freestyle_lineset_copy", icon='COPYDOWN')
layout.operator("scene.freestyle_lineset_paste", icon='PASTEDOWN')
class VIEWLAYER_PT_freestyle(ViewLayerFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
layout.active = view_layer.use_freestyle
row = layout.row()
layout.prop(freestyle, "mode", text="Control Mode")
layout.prop(freestyle, "use_view_map_cache", text="View Map Cache")
layout.prop(freestyle, "as_render_pass", text="As Render Pass")
layout.label(text="Edge Detection Options:")
split = layout.split()
col = split.column()
col.prop(freestyle, "crease_angle")
col.prop(freestyle, "use_culling")
col.prop(freestyle, "use_advanced_options")
col = split.column()
col.prop(freestyle, "use_smoothness")
if freestyle.mode == 'SCRIPT':
col.prop(freestyle, "use_material_boundaries")
# Advanced options are hidden by default to warn new users
if freestyle.use_advanced_options:
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.prop(freestyle, "use_ridges_and_valleys")
row.prop(freestyle, "use_suggestive_contours")
row = layout.row()
row.prop(freestyle, "sphere_radius")
row.prop(freestyle, "kr_derivative_epsilon")
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.label(text="Style modules:")
row.operator("scene.freestyle_module_add", text="Add")
for module in freestyle.modules:
box = layout.box()
box.context_pointer_set("freestyle_module", module)
row = box.row(align=True)
row.prop(module, "use", text="")
row.prop(module, "script", text="")
row.operator("scene.freestyle_module_open", icon='FILEBROWSER', text="")
row.operator("scene.freestyle_module_remove", icon='X', text="")
row.operator("scene.freestyle_module_move", icon='TRIA_UP', text="").direction = 'UP'
row.operator("scene.freestyle_module_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
class VIEWLAYER_PT_freestyle_lineset(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Set"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_edge_type_buttons(self, box, lineset, edge_type):
# property names
select_edge_type = "select_" + edge_type
exclude_edge_type = "exclude_" + edge_type
# draw edge type buttons
row = box.row(align=True)
row.prop(lineset, select_edge_type)
sub = row.column(align=True)
sub.prop(lineset, exclude_edge_type, text="")
sub.active = getattr(lineset, select_edge_type)
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
lineset = freestyle.linesets.active
layout.active = view_layer.use_freestyle
row = layout.row()
rows = 4 if lineset else 2
row.template_list(
"VIEWLAYER_UL_linesets",
"",
freestyle,
"linesets",
freestyle.linesets,
"active_index",
rows=rows,
)
sub = row.column(align=True)
sub.operator("scene.freestyle_lineset_add", icon='ADD', text="")
sub.operator("scene.freestyle_lineset_remove", icon='REMOVE', text="")
sub.menu("RENDER_MT_lineset_context_menu", icon='DOWNARROW_HLT', text="")
if lineset:
sub.separator()
sub.separator()
sub.operator("scene.freestyle_lineset_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_lineset_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
col = layout.column()
col.label(text="Selection By:")
row = col.row(align=True)
row.prop(lineset, "select_by_visibility", text="Visibility", toggle=True)
row.prop(lineset, "select_by_edge_types", text="Edge Types", toggle=True)
row.prop(lineset, "select_by_face_marks", text="Face Marks", toggle=True)
row.prop(lineset, "select_by_collection", text="Collection", toggle=True)
row.prop(lineset, "select_by_image_border", text="Image Border", toggle=True)
if lineset.select_by_visibility:
col.label(text="Visibility:")
row = col.row(align=True)
row.prop(lineset, "visibility", expand=True)
if lineset.visibility == 'RANGE':
row = col.row(align=True)
row.prop(lineset, "qi_start")
row.prop(lineset, "qi_end")
if lineset.select_by_edge_types:
col.label(text="Edge Types:")
row = col.row()
row.prop(lineset, "edge_type_negation", expand=True)
row.prop(lineset, "edge_type_combination", expand=True)
split = col.split()
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "silhouette")
self.draw_edge_type_buttons(sub, lineset, "border")
self.draw_edge_type_buttons(sub, lineset, "contour")
self.draw_edge_type_buttons(sub, lineset, "suggestive_contour")
self.draw_edge_type_buttons(sub, lineset, "ridge_valley")
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "crease")
self.draw_edge_type_buttons(sub, lineset, "edge_mark")
self.draw_edge_type_buttons(sub, lineset, "external_contour")
self.draw_edge_type_buttons(sub, lineset, "material_boundary")
if lineset.select_by_face_marks:
col.label(text="Face Marks:")
row = col.row()
row.prop(lineset, "face_mark_negation", expand=True)
row.prop(lineset, "face_mark_condition", expand=True)
if lineset.select_by_collection:
col.label(text="Collection:")
row = col.row()
row.prop(lineset, "collection", text="")
row.prop(lineset, "collection_negation", expand=True)
class VIEWLAYER_PT_freestyle_linestyle(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Style"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_modifier_box_header(self, box, modifier):
row = box.row()
row.context_pointer_set("modifier", modifier)
if modifier.expanded:
icon = 'TRIA_DOWN'
else:
icon = 'TRIA_RIGHT'
row.prop(modifier, "expanded", text="", icon=icon, emboss=False)
# TODO: Use icons rather than text label, would save some room!
row.label(text=modifier.rna_type.name)
row.prop(modifier, "name", text="")
if modifier.use:
icon = 'RESTRICT_RENDER_OFF'
else:
icon = 'RESTRICT_RENDER_ON'
row.prop(modifier, "use", text="", icon=icon)
sub = row.row(align=True)
sub.operator("scene.freestyle_modifier_copy", icon='NONE', text="Copy")
sub.operator("scene.freestyle_modifier_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_modifier_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
sub.operator("scene.freestyle_modifier_remove", icon='X', text="")
def draw_modifier_box_error(self, box, _modifier, message):
row = box.row()
row.label(text=message, icon='ERROR')
def draw_modifier_common(self, box, modifier):
row = box.row()
row.prop(modifier, "blend", text="")
row.prop(modifier, "influence")
def draw_modifier_color_ramp_common(self, box, modifier, has_range):
box.template_color_ramp(modifier, "color_ramp", expand=True)
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
def draw_modifier_curve_common(self, box, modifier, has_range, has_value):
row = box.row()
row.prop(modifier, "mapping", text="")
sub = row.column()
sub.prop(modifier, "invert")
if modifier.mapping == 'CURVE':
sub.active = False
box.template_curve_mapping(modifier, "curve")
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
if has_value:
row = box.row(align=True)
row.prop(modifier, "value_min")
row.prop(modifier, "value_max")
def draw_color_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
row = box.row()
row.prop(modifier, "material_attribute", text="")
sub = row.column()
sub.prop(modifier, "use_ramp")
if modifier.material_attribute in {'LINE', 'DIFF', 'SPEC'}:
sub.active = True
show_ramp = modifier.use_ramp
else:
sub.active = False
show_ramp = True
if show_ramp:
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'NOISE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_alpha_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'NOISE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_thickness_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'CALLIGRAPHY':
box.prop(modifier, "orientation")
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
self.mapping = 'CURVE'
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'NOISE':
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row = box.row(align=False)
row.prop(modifier, "seed")
row.prop(modifier, "use_asymmetric")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_geometry_modifier(self, _context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
if modifier.type == 'SAMPLING':
box.prop(modifier, "sampling")
elif modifier.type == 'BEZIER_CURVE':
box.prop(modifier, "error")
elif modifier.type == 'SINUS_DISPLACEMENT':
split = box.split()
col = split.column()
col.prop(modifier, "wavelength")
col.prop(modifier, "amplitude")
col = split.column()
col.prop(modifier, "phase")
elif modifier.type == 'SPATIAL_NOISE':
split = box.split()
col = split.column()
col.prop(modifier, "amplitude")
col.prop(modifier, "scale")
col.prop(modifier, "octaves")
col = split.column()
col.prop(modifier, "smooth")
col.prop(modifier, "use_pure_random")
elif modifier.type == 'PERLIN_NOISE_1D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'PERLIN_NOISE_2D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'BACKBONE_STRETCHER':
box.prop(modifier, "backbone_length")
elif modifier.type == 'TIP_REMOVER':
box.prop(modifier, "tip_length")
elif modifier.type == 'POLYGONIZATION':
box.prop(modifier, "error")
elif modifier.type == 'GUIDING_LINES':
box.prop(modifier, "offset")
elif modifier.type == 'BLUEPRINT':
row = box.row()
row.prop(modifier, "shape", expand=True)
box.prop(modifier, "rounds")
row = box.row()
if modifier.shape in {'CIRCLES', 'ELLIPSES'}:
row.prop(modifier, "random_radius")
row.prop(modifier, "random_center")
elif modifier.shape == 'SQUARES':
row.prop(modifier, "backbone_length")
row.prop(modifier, "random_backbone")
elif modifier.type == '2D_OFFSET':
row = box.row(align=True)
row.prop(modifier, "start")
row.prop(modifier, "end")
row = box.row(align=True)
row.prop(modifier, "x")
row.prop(modifier, "y")
elif modifier.type == '2D_TRANSFORM':
box.prop(modifier, "pivot")
if modifier.pivot == 'PARAM':
box.prop(modifier, "pivot_u")
elif modifier.pivot == 'ABSOLUTE':
row = box.row(align=True)
row.prop(modifier, "pivot_x")
row.prop(modifier, "pivot_y")
row = box.row(align=True)
row.prop(modifier, "scale_x")
row.prop(modifier, "scale_y")
box.prop(modifier, "angle")
elif modifier.type == 'SIMPLIFICATION':
box.prop(modifier, "tolerance")
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
lineset = view_layer.freestyle_settings.linesets.active
layout.active = view_layer.use_freestyle
if lineset is None:
return
linestyle = lineset.linestyle
layout.template_ID(lineset, "linestyle", new="scene.freestyle_linestyle_new")
if linestyle is None:
return
row = layout.row(align=True)
row.prop(linestyle, "panel", expand=True)
if linestyle.panel == 'STROKES':
# Chaining
layout.prop(linestyle, "use_chaining", text="Chaining:")
split = layout.split(align=True)
split.active = linestyle.use_chaining
# First column
col = split.column()
col.active = linestyle.use_chaining
col.prop(linestyle, "chaining", text="")
if linestyle.chaining == 'SKETCHY':
col.prop(linestyle, "rounds")
# Second column
col = split.column()
col.prop(linestyle, "use_same_object")
# Splitting
layout.label(text="Splitting:")
split = layout.split(align=True)
# First column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_angle_min", text="")
sub = row.row()
sub.active = linestyle.use_angle_min
sub.prop(linestyle, "angle_min")
row = col.row(align=True)
row.prop(linestyle, "use_angle_max", text="")
sub = row.row()
sub.active = linestyle.use_angle_max
sub.prop(linestyle, "angle_max")
# Second column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_split_length", text="")
sub = row.row()
sub.active = linestyle.use_split_length
sub.prop(linestyle, "split_length", text="2D Length")
row = col.row(align=True)
row.prop(linestyle, "material_boundary")
# End of columns
row = layout.row(align=True)
row.prop(linestyle, "use_split_pattern", text="")
sub = row.row(align=True)
sub.active = linestyle.use_split_pattern
sub.prop(linestyle, "split_dash1", text="D1")
sub.prop(linestyle, "split_gap1", text="G1")
sub.prop(linestyle, "split_dash2", text="D2")
sub.prop(linestyle, "split_gap2", text="G2")
sub.prop(linestyle, "split_dash3", text="D3")
sub.prop(linestyle, "split_gap3", text="G3")
# Sorting
layout.prop(linestyle, "use_sorting", text="Sorting:")
col = layout.column()
col.active = linestyle.use_sorting
row = col.row(align=True)
row.prop(linestyle, "sort_key", text="")
sub = row.row()
sub.active = linestyle.sort_key in {'DISTANCE_FROM_CAMERA',
'PROJECTED_X',
'PROJECTED_Y'}
sub.prop(linestyle, "integration_type", text="")
row = col.row(align=True)
row.prop(linestyle, "sort_order", expand=True)
# Selection
layout.label(text="Selection:")
split = layout.split(align=True)
# First column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_length_min", text="")
sub = row.row()
sub.active = linestyle.use_length_min
sub.prop(linestyle, "length_min")
row = col.row(align=True)
row.prop(linestyle, "use_length_max", text="")
sub = row.row()
sub.active = linestyle.use_length_max
sub.prop(linestyle, "length_max")
# Second column
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_chain_count", text="")
sub = row.row()
sub.active = linestyle.use_chain_count
sub.prop(linestyle, "chain_count")
# Caps
layout.label(text="Caps:")
row = layout.row(align=True)
row.prop(linestyle, "caps", expand=True)
# Dashed lines
layout.prop(linestyle, "use_dashed_line", text="Dashed Line:")
row = layout.row(align=True)
row.active = linestyle.use_dashed_line
row.prop(linestyle, "dash1", text="D1")
row.prop(linestyle, "gap1", text="G1")
row.prop(linestyle, "dash2", text="D2")
row.prop(linestyle, "gap2", text="G2")
row.prop(linestyle, "dash3", text="D3")
row.prop(linestyle, "gap3", text="G3")
elif linestyle.panel == 'COLOR':
col = layout.column()
row = col.row()
row.label(text="Base Color:")
row.prop(linestyle, "color", text="")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_color_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.color_modifiers:
self.draw_color_modifier(context, modifier)
elif linestyle.panel == 'ALPHA':
col = layout.column()
row = col.row()
row.label(text="Base Transparency:")
row.prop(linestyle, "alpha")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_alpha_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.alpha_modifiers:
self.draw_alpha_modifier(context, modifier)
elif linestyle.panel == 'THICKNESS':
col = layout.column()
row = col.row()
row.label(text="Base Thickness:")
row.prop(linestyle, "thickness")
subcol = col.column()
subcol.active = linestyle.chaining == 'PLAIN' and linestyle.use_same_object
row = subcol.row()
row.prop(linestyle, "thickness_position", expand=True)
row = subcol.row()
row.prop(linestyle, "thickness_ratio")
row.active = (linestyle.thickness_position == 'RELATIVE')
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_thickness_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.thickness_modifiers:
self.draw_thickness_modifier(context, modifier)
elif linestyle.panel == 'GEOMETRY':
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_geometry_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.geometry_modifiers:
self.draw_geometry_modifier(context, modifier)
elif linestyle.panel == 'TEXTURE':
layout.separator()
row = layout.row()
row.prop(linestyle, "use_nodes")
row.prop(linestyle, "texture_spacing", text="Spacing Along Stroke")
row = layout.row()
props = row.operator(
"wm.properties_context_change",
text="Go to Linestyle Textures Properties",
icon='TEXTURE',
)
props.context = 'TEXTURE'
elif linestyle.panel == 'MISC':
pass
# Material properties
class MaterialFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
# COMPAT_ENGINES must be defined in each subclass, external engines can add themselves here
@classmethod
def poll(cls, context):
scene = context.scene
material = context.material
with_freestyle = bpy.app.build_options.freestyle
return (
with_freestyle and material and scene and scene.render.use_freestyle and
(context.engine in cls.COMPAT_ENGINES)
)
class MATERIAL_PT_freestyle_line(MaterialFreestyleButtonsPanel, Panel):
bl_label = "Freestyle Line"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.prop(mat, "line_color", text="")
row.prop(mat, "line_priority", text="Priority")
classes = (
RENDER_PT_freestyle,
VIEWLAYER_UL_linesets,
RENDER_MT_lineset_context_menu,
VIEWLAYER_PT_freestyle,
VIEWLAYER_PT_freestyle_lineset,
VIEWLAYER_PT_freestyle_linestyle,
MATERIAL_PT_freestyle_line,
)
if __name__ == "__main__": # only for live edit.
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| 40.043376
| 105
| 0.581726
|
rn scene and with_freestyle and(context.engine in cls.COMPAT_ENGINES)
class RENDER_PT_freestyle(RenderFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
bl_options = {'DEFAULT_CLOSED'}
bl_order = 10
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_header(self, context):
rd = context.scene.render
self.layout.prop(rd, "use_freestyle", text="")
def draw(self, context):
layout = self.layout
layout.use_property_split = True
layout.use_property_decorate = False
rd = context.scene.render
layout.active = rd.use_freestyle
layout.prop(rd, "line_thickness_mode", expand=True)
if rd.line_thickness_mode == 'ABSOLUTE':
layout.prop(rd, "line_thickness")
class ViewLayerFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "view_layer"
bl_order = 10
@classmethod
def poll(cls, context):
scene = context.scene
rd = scene.render
with_freestyle = bpy.app.build_options.freestyle
return (scene and with_freestyle and rd.use_freestyle and
(context.engine in cls.COMPAT_ENGINES))
class ViewLayerFreestyleEditorButtonsPanel(ViewLayerFreestyleButtonsPanel):
@classmethod
def poll(cls, context):
if not super().poll(context):
return False
view_layer = context.view_layer
return view_layer and view_layer.freestyle_settings.mode == 'EDITOR'
class VIEWLAYER_UL_linesets(UIList):
def draw_item(self, _context, layout, _data, item, icon, _active_data, _active_propname, index):
lineset = item
if self.layout_type in {'DEFAULT', 'COMPACT'}:
layout.prop(lineset, "name", text="", emboss=False, icon_value=icon)
layout.prop(lineset, "show_render", text="", index=index)
elif self.layout_type == 'GRID':
layout.alignment = 'CENTER'
layout.label(text="", icon_value=icon)
class RENDER_MT_lineset_context_menu(Menu):
bl_label = "Lineset Specials"
def draw(self, _context):
layout = self.layout
layout.operator("scene.freestyle_lineset_copy", icon='COPYDOWN')
layout.operator("scene.freestyle_lineset_paste", icon='PASTEDOWN')
class VIEWLAYER_PT_freestyle(ViewLayerFreestyleButtonsPanel, Panel):
bl_label = "Freestyle"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
layout.active = view_layer.use_freestyle
row = layout.row()
layout.prop(freestyle, "mode", text="Control Mode")
layout.prop(freestyle, "use_view_map_cache", text="View Map Cache")
layout.prop(freestyle, "as_render_pass", text="As Render Pass")
layout.label(text="Edge Detection Options:")
split = layout.split()
col = split.column()
col.prop(freestyle, "crease_angle")
col.prop(freestyle, "use_culling")
col.prop(freestyle, "use_advanced_options")
col = split.column()
col.prop(freestyle, "use_smoothness")
if freestyle.mode == 'SCRIPT':
col.prop(freestyle, "use_material_boundaries")
if freestyle.use_advanced_options:
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.prop(freestyle, "use_ridges_and_valleys")
row.prop(freestyle, "use_suggestive_contours")
row = layout.row()
row.prop(freestyle, "sphere_radius")
row.prop(freestyle, "kr_derivative_epsilon")
if freestyle.mode == 'SCRIPT':
row = layout.row()
row.label(text="Style modules:")
row.operator("scene.freestyle_module_add", text="Add")
for module in freestyle.modules:
box = layout.box()
box.context_pointer_set("freestyle_module", module)
row = box.row(align=True)
row.prop(module, "use", text="")
row.prop(module, "script", text="")
row.operator("scene.freestyle_module_open", icon='FILEBROWSER', text="")
row.operator("scene.freestyle_module_remove", icon='X', text="")
row.operator("scene.freestyle_module_move", icon='TRIA_UP', text="").direction = 'UP'
row.operator("scene.freestyle_module_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
class VIEWLAYER_PT_freestyle_lineset(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Set"
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_edge_type_buttons(self, box, lineset, edge_type):
select_edge_type = "select_" + edge_type
exclude_edge_type = "exclude_" + edge_type
row = box.row(align=True)
row.prop(lineset, select_edge_type)
sub = row.column(align=True)
sub.prop(lineset, exclude_edge_type, text="")
sub.active = getattr(lineset, select_edge_type)
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
freestyle = view_layer.freestyle_settings
lineset = freestyle.linesets.active
layout.active = view_layer.use_freestyle
row = layout.row()
rows = 4 if lineset else 2
row.template_list(
"VIEWLAYER_UL_linesets",
"",
freestyle,
"linesets",
freestyle.linesets,
"active_index",
rows=rows,
)
sub = row.column(align=True)
sub.operator("scene.freestyle_lineset_add", icon='ADD', text="")
sub.operator("scene.freestyle_lineset_remove", icon='REMOVE', text="")
sub.menu("RENDER_MT_lineset_context_menu", icon='DOWNARROW_HLT', text="")
if lineset:
sub.separator()
sub.separator()
sub.operator("scene.freestyle_lineset_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_lineset_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
col = layout.column()
col.label(text="Selection By:")
row = col.row(align=True)
row.prop(lineset, "select_by_visibility", text="Visibility", toggle=True)
row.prop(lineset, "select_by_edge_types", text="Edge Types", toggle=True)
row.prop(lineset, "select_by_face_marks", text="Face Marks", toggle=True)
row.prop(lineset, "select_by_collection", text="Collection", toggle=True)
row.prop(lineset, "select_by_image_border", text="Image Border", toggle=True)
if lineset.select_by_visibility:
col.label(text="Visibility:")
row = col.row(align=True)
row.prop(lineset, "visibility", expand=True)
if lineset.visibility == 'RANGE':
row = col.row(align=True)
row.prop(lineset, "qi_start")
row.prop(lineset, "qi_end")
if lineset.select_by_edge_types:
col.label(text="Edge Types:")
row = col.row()
row.prop(lineset, "edge_type_negation", expand=True)
row.prop(lineset, "edge_type_combination", expand=True)
split = col.split()
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "silhouette")
self.draw_edge_type_buttons(sub, lineset, "border")
self.draw_edge_type_buttons(sub, lineset, "contour")
self.draw_edge_type_buttons(sub, lineset, "suggestive_contour")
self.draw_edge_type_buttons(sub, lineset, "ridge_valley")
sub = split.column()
self.draw_edge_type_buttons(sub, lineset, "crease")
self.draw_edge_type_buttons(sub, lineset, "edge_mark")
self.draw_edge_type_buttons(sub, lineset, "external_contour")
self.draw_edge_type_buttons(sub, lineset, "material_boundary")
if lineset.select_by_face_marks:
col.label(text="Face Marks:")
row = col.row()
row.prop(lineset, "face_mark_negation", expand=True)
row.prop(lineset, "face_mark_condition", expand=True)
if lineset.select_by_collection:
col.label(text="Collection:")
row = col.row()
row.prop(lineset, "collection", text="")
row.prop(lineset, "collection_negation", expand=True)
class VIEWLAYER_PT_freestyle_linestyle(ViewLayerFreestyleEditorButtonsPanel, Panel):
bl_label = "Freestyle Line Style"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw_modifier_box_header(self, box, modifier):
row = box.row()
row.context_pointer_set("modifier", modifier)
if modifier.expanded:
icon = 'TRIA_DOWN'
else:
icon = 'TRIA_RIGHT'
row.prop(modifier, "expanded", text="", icon=icon, emboss=False)
row.label(text=modifier.rna_type.name)
row.prop(modifier, "name", text="")
if modifier.use:
icon = 'RESTRICT_RENDER_OFF'
else:
icon = 'RESTRICT_RENDER_ON'
row.prop(modifier, "use", text="", icon=icon)
sub = row.row(align=True)
sub.operator("scene.freestyle_modifier_copy", icon='NONE', text="Copy")
sub.operator("scene.freestyle_modifier_move", icon='TRIA_UP', text="").direction = 'UP'
sub.operator("scene.freestyle_modifier_move", icon='TRIA_DOWN', text="").direction = 'DOWN'
sub.operator("scene.freestyle_modifier_remove", icon='X', text="")
def draw_modifier_box_error(self, box, _modifier, message):
row = box.row()
row.label(text=message, icon='ERROR')
def draw_modifier_common(self, box, modifier):
row = box.row()
row.prop(modifier, "blend", text="")
row.prop(modifier, "influence")
def draw_modifier_color_ramp_common(self, box, modifier, has_range):
box.template_color_ramp(modifier, "color_ramp", expand=True)
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
def draw_modifier_curve_common(self, box, modifier, has_range, has_value):
row = box.row()
row.prop(modifier, "mapping", text="")
sub = row.column()
sub.prop(modifier, "invert")
if modifier.mapping == 'CURVE':
sub.active = False
box.template_curve_mapping(modifier, "curve")
if has_range:
row = box.row(align=True)
row.prop(modifier, "range_min")
row.prop(modifier, "range_max")
if has_value:
row = box.row(align=True)
row.prop(modifier, "value_min")
row.prop(modifier, "value_max")
def draw_color_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_color_ramp_common(box, modifier, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'COLOR'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
row = box.row()
row.prop(modifier, "material_attribute", text="")
sub = row.column()
sub.prop(modifier, "use_ramp")
if modifier.material_attribute in {'LINE', 'DIFF', 'SPEC'}:
sub.active = True
show_ramp = modifier.use_ramp
else:
sub.active = False
show_ramp = True
if show_ramp:
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_color_ramp_common(box, modifier, False)
elif modifier.type == 'NOISE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_color_ramp_common(box, modifier, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_alpha_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, False)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'ALPHA'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
elif modifier.type == 'NOISE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row.prop(modifier, "seed")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_thickness_modifier(self, context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
self.draw_modifier_common(box, modifier)
if modifier.type == 'ALONG_STROKE':
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'DISTANCE_FROM_OBJECT':
box.prop(modifier, "target")
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'DISTANCE_FROM_CAMERA':
self.draw_modifier_curve_common(box, modifier, True, True)
prop = box.operator("scene.freestyle_fill_range_by_selection")
prop.type = 'THICKNESS'
prop.name = modifier.name
elif modifier.type == 'MATERIAL':
box.prop(modifier, "material_attribute", text="")
self.draw_modifier_curve_common(box, modifier, False, True)
elif modifier.type == 'CALLIGRAPHY':
box.prop(modifier, "orientation")
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'TANGENT':
self.draw_modifier_curve_common(box, modifier, False, False)
self.mapping = 'CURVE'
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
elif modifier.type == 'NOISE':
row = box.row(align=False)
row.prop(modifier, "amplitude")
row.prop(modifier, "period")
row = box.row(align=False)
row.prop(modifier, "seed")
row.prop(modifier, "use_asymmetric")
elif modifier.type == 'CREASE_ANGLE':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "angle_min")
row.prop(modifier, "angle_max")
elif modifier.type == 'CURVATURE_3D':
self.draw_modifier_curve_common(box, modifier, False, False)
row = box.row(align=True)
row.prop(modifier, "thickness_min")
row.prop(modifier, "thickness_max")
row = box.row(align=True)
row.prop(modifier, "curvature_min")
row.prop(modifier, "curvature_max")
freestyle = context.view_layer.freestyle_settings
if not freestyle.use_smoothness:
message = "Enable Face Smoothness to use this modifier"
self.draw_modifier_box_error(col.box(), modifier, message)
def draw_geometry_modifier(self, _context, modifier):
layout = self.layout
col = layout.column(align=True)
self.draw_modifier_box_header(col.box(), modifier)
if modifier.expanded:
box = col.box()
if modifier.type == 'SAMPLING':
box.prop(modifier, "sampling")
elif modifier.type == 'BEZIER_CURVE':
box.prop(modifier, "error")
elif modifier.type == 'SINUS_DISPLACEMENT':
split = box.split()
col = split.column()
col.prop(modifier, "wavelength")
col.prop(modifier, "amplitude")
col = split.column()
col.prop(modifier, "phase")
elif modifier.type == 'SPATIAL_NOISE':
split = box.split()
col = split.column()
col.prop(modifier, "amplitude")
col.prop(modifier, "scale")
col.prop(modifier, "octaves")
col = split.column()
col.prop(modifier, "smooth")
col.prop(modifier, "use_pure_random")
elif modifier.type == 'PERLIN_NOISE_1D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'PERLIN_NOISE_2D':
split = box.split()
col = split.column()
col.prop(modifier, "frequency")
col.prop(modifier, "amplitude")
col.prop(modifier, "seed")
col = split.column()
col.prop(modifier, "octaves")
col.prop(modifier, "angle")
elif modifier.type == 'BACKBONE_STRETCHER':
box.prop(modifier, "backbone_length")
elif modifier.type == 'TIP_REMOVER':
box.prop(modifier, "tip_length")
elif modifier.type == 'POLYGONIZATION':
box.prop(modifier, "error")
elif modifier.type == 'GUIDING_LINES':
box.prop(modifier, "offset")
elif modifier.type == 'BLUEPRINT':
row = box.row()
row.prop(modifier, "shape", expand=True)
box.prop(modifier, "rounds")
row = box.row()
if modifier.shape in {'CIRCLES', 'ELLIPSES'}:
row.prop(modifier, "random_radius")
row.prop(modifier, "random_center")
elif modifier.shape == 'SQUARES':
row.prop(modifier, "backbone_length")
row.prop(modifier, "random_backbone")
elif modifier.type == '2D_OFFSET':
row = box.row(align=True)
row.prop(modifier, "start")
row.prop(modifier, "end")
row = box.row(align=True)
row.prop(modifier, "x")
row.prop(modifier, "y")
elif modifier.type == '2D_TRANSFORM':
box.prop(modifier, "pivot")
if modifier.pivot == 'PARAM':
box.prop(modifier, "pivot_u")
elif modifier.pivot == 'ABSOLUTE':
row = box.row(align=True)
row.prop(modifier, "pivot_x")
row.prop(modifier, "pivot_y")
row = box.row(align=True)
row.prop(modifier, "scale_x")
row.prop(modifier, "scale_y")
box.prop(modifier, "angle")
elif modifier.type == 'SIMPLIFICATION':
box.prop(modifier, "tolerance")
def draw(self, context):
layout = self.layout
view_layer = context.view_layer
lineset = view_layer.freestyle_settings.linesets.active
layout.active = view_layer.use_freestyle
if lineset is None:
return
linestyle = lineset.linestyle
layout.template_ID(lineset, "linestyle", new="scene.freestyle_linestyle_new")
if linestyle is None:
return
row = layout.row(align=True)
row.prop(linestyle, "panel", expand=True)
if linestyle.panel == 'STROKES':
layout.prop(linestyle, "use_chaining", text="Chaining:")
split = layout.split(align=True)
split.active = linestyle.use_chaining
col = split.column()
col.active = linestyle.use_chaining
col.prop(linestyle, "chaining", text="")
if linestyle.chaining == 'SKETCHY':
col.prop(linestyle, "rounds")
col = split.column()
col.prop(linestyle, "use_same_object")
layout.label(text="Splitting:")
split = layout.split(align=True)
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_angle_min", text="")
sub = row.row()
sub.active = linestyle.use_angle_min
sub.prop(linestyle, "angle_min")
row = col.row(align=True)
row.prop(linestyle, "use_angle_max", text="")
sub = row.row()
sub.active = linestyle.use_angle_max
sub.prop(linestyle, "angle_max")
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_split_length", text="")
sub = row.row()
sub.active = linestyle.use_split_length
sub.prop(linestyle, "split_length", text="2D Length")
row = col.row(align=True)
row.prop(linestyle, "material_boundary")
row = layout.row(align=True)
row.prop(linestyle, "use_split_pattern", text="")
sub = row.row(align=True)
sub.active = linestyle.use_split_pattern
sub.prop(linestyle, "split_dash1", text="D1")
sub.prop(linestyle, "split_gap1", text="G1")
sub.prop(linestyle, "split_dash2", text="D2")
sub.prop(linestyle, "split_gap2", text="G2")
sub.prop(linestyle, "split_dash3", text="D3")
sub.prop(linestyle, "split_gap3", text="G3")
layout.prop(linestyle, "use_sorting", text="Sorting:")
col = layout.column()
col.active = linestyle.use_sorting
row = col.row(align=True)
row.prop(linestyle, "sort_key", text="")
sub = row.row()
sub.active = linestyle.sort_key in {'DISTANCE_FROM_CAMERA',
'PROJECTED_X',
'PROJECTED_Y'}
sub.prop(linestyle, "integration_type", text="")
row = col.row(align=True)
row.prop(linestyle, "sort_order", expand=True)
layout.label(text="Selection:")
split = layout.split(align=True)
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_length_min", text="")
sub = row.row()
sub.active = linestyle.use_length_min
sub.prop(linestyle, "length_min")
row = col.row(align=True)
row.prop(linestyle, "use_length_max", text="")
sub = row.row()
sub.active = linestyle.use_length_max
sub.prop(linestyle, "length_max")
col = split.column()
row = col.row(align=True)
row.prop(linestyle, "use_chain_count", text="")
sub = row.row()
sub.active = linestyle.use_chain_count
sub.prop(linestyle, "chain_count")
layout.label(text="Caps:")
row = layout.row(align=True)
row.prop(linestyle, "caps", expand=True)
layout.prop(linestyle, "use_dashed_line", text="Dashed Line:")
row = layout.row(align=True)
row.active = linestyle.use_dashed_line
row.prop(linestyle, "dash1", text="D1")
row.prop(linestyle, "gap1", text="G1")
row.prop(linestyle, "dash2", text="D2")
row.prop(linestyle, "gap2", text="G2")
row.prop(linestyle, "dash3", text="D3")
row.prop(linestyle, "gap3", text="G3")
elif linestyle.panel == 'COLOR':
col = layout.column()
row = col.row()
row.label(text="Base Color:")
row.prop(linestyle, "color", text="")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_color_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.color_modifiers:
self.draw_color_modifier(context, modifier)
elif linestyle.panel == 'ALPHA':
col = layout.column()
row = col.row()
row.label(text="Base Transparency:")
row.prop(linestyle, "alpha")
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_alpha_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.alpha_modifiers:
self.draw_alpha_modifier(context, modifier)
elif linestyle.panel == 'THICKNESS':
col = layout.column()
row = col.row()
row.label(text="Base Thickness:")
row.prop(linestyle, "thickness")
subcol = col.column()
subcol.active = linestyle.chaining == 'PLAIN' and linestyle.use_same_object
row = subcol.row()
row.prop(linestyle, "thickness_position", expand=True)
row = subcol.row()
row.prop(linestyle, "thickness_ratio")
row.active = (linestyle.thickness_position == 'RELATIVE')
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_thickness_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.thickness_modifiers:
self.draw_thickness_modifier(context, modifier)
elif linestyle.panel == 'GEOMETRY':
col = layout.column()
col.label(text="Modifiers:")
col.operator_menu_enum("scene.freestyle_geometry_modifier_add", "type", text="Add Modifier")
for modifier in linestyle.geometry_modifiers:
self.draw_geometry_modifier(context, modifier)
elif linestyle.panel == 'TEXTURE':
layout.separator()
row = layout.row()
row.prop(linestyle, "use_nodes")
row.prop(linestyle, "texture_spacing", text="Spacing Along Stroke")
row = layout.row()
props = row.operator(
"wm.properties_context_change",
text="Go to Linestyle Textures Properties",
icon='TEXTURE',
)
props.context = 'TEXTURE'
elif linestyle.panel == 'MISC':
pass
class MaterialFreestyleButtonsPanel:
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "material"
@classmethod
def poll(cls, context):
scene = context.scene
material = context.material
with_freestyle = bpy.app.build_options.freestyle
return (
with_freestyle and material and scene and scene.render.use_freestyle and
(context.engine in cls.COMPAT_ENGINES)
)
class MATERIAL_PT_freestyle_line(MaterialFreestyleButtonsPanel, Panel):
bl_label = "Freestyle Line"
bl_options = {'DEFAULT_CLOSED'}
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_EEVEE'}
def draw(self, context):
layout = self.layout
mat = context.material
row = layout.row()
row.prop(mat, "line_color", text="")
row.prop(mat, "line_priority", text="Priority")
classes = (
RENDER_PT_freestyle,
VIEWLAYER_UL_linesets,
RENDER_MT_lineset_context_menu,
VIEWLAYER_PT_freestyle,
VIEWLAYER_PT_freestyle_lineset,
VIEWLAYER_PT_freestyle_linestyle,
MATERIAL_PT_freestyle_line,
)
if __name__ == "__main__":
from bpy.utils import register_class
for cls in classes:
register_class(cls)
| true
| true
|
f7078b2efc7f2c5bfe394f2cd6f4e657fe16fb16
| 50
|
py
|
Python
|
mini_blockchain/__init__.py
|
aliciawyy/financial_machine_learning
|
00c20c250976104dd8ea1484697064272c8231b7
|
[
"MIT"
] | null | null | null |
mini_blockchain/__init__.py
|
aliciawyy/financial_machine_learning
|
00c20c250976104dd8ea1484697064272c8231b7
|
[
"MIT"
] | null | null | null |
mini_blockchain/__init__.py
|
aliciawyy/financial_machine_learning
|
00c20c250976104dd8ea1484697064272c8231b7
|
[
"MIT"
] | null | null | null |
from .base import Block, GenesisBlock, BlockChain
| 25
| 49
| 0.82
|
from .base import Block, GenesisBlock, BlockChain
| true
| true
|
f7078c0be67a7343ea85a08294eaad322d6610d2
| 35,863
|
py
|
Python
|
sympy/solvers/pde.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 1
|
2021-06-24T09:01:18.000Z
|
2021-06-24T09:01:18.000Z
|
sympy/solvers/pde.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 3
|
2021-02-28T03:58:40.000Z
|
2021-03-07T06:12:47.000Z
|
sympy/solvers/pde.py
|
qcgm1978/sympy
|
cc46047f4449b525b7b0edd4c634bf93d6e7c83d
|
[
"BSD-3-Clause"
] | 1
|
2020-08-12T10:51:20.000Z
|
2020-08-12T10:51:20.000Z
|
"""
This module contains pdsolve() and different helper functions that it
uses. It is heavily inspired by the ode module and hence the basic
infrastructure remains the same.
**Functions in this module**
These are the user functions in this module:
- pdsolve() - Solves PDE's
- classify_pde() - Classifies PDEs into possible hints for dsolve().
- pde_separate() - Separate variables in partial differential equation either by
additive or multiplicative separation approach.
These are the helper functions in this module:
- pde_separate_add() - Helper function for searching additive separable solutions.
- pde_separate_mul() - Helper function for searching multiplicative
separable solutions.
**Currently implemented solver methods**
The following methods are implemented for solving partial differential
equations. See the docstrings of the various pde_hint() functions for
more information on each (run help(pde)):
- 1st order linear homogeneous partial differential equations
with constant coefficients.
- 1st order linear general partial differential equations
with constant coefficients.
- 1st order linear partial differential equations with
variable coefficients.
"""
from itertools import combinations_with_replacement
from sympy.simplify import simplify # type: ignore
from sympy.core import Add, S
from sympy.core.compatibility import reduce, is_sequence
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
"""
Solves any (supported) kind of partial differential equation.
**Usage**
pdsolve(eq, f(x,y), hint) -> Solve partial differential equation
eq for function f(x,y), using method hint.
**Details**
``eq`` can be any supported partial differential equation (see
the pde docstring for supported methods). This can either
be an Equality, or an expression, which is assumed to be
equal to 0.
``f(x,y)`` is a function of two variables whose derivatives in that
variable make up the partial differential equation. In many
cases it is not necessary to provide this; it will be autodetected
(and an error raised if it couldn't be detected).
``hint`` is the solving method that you want pdsolve to use. Use
classify_pde(eq, f(x,y)) to get all of the possible hints for
a PDE. The default hint, 'default', will use whatever hint
is returned first by classify_pde(). See Hints below for
more options that you can use for hint.
``solvefun`` is the convention used for arbitrary functions returned
by the PDE solver. If not set by the user, it is set by default
to be F.
**Hints**
Aside from the various solving methods, there are also some
meta-hints that you can pass to pdsolve():
"default":
This uses whatever hint is returned first by
classify_pde(). This is the default argument to
pdsolve().
"all":
To make pdsolve apply all relevant classification hints,
use pdsolve(PDE, func, hint="all"). This will return a
dictionary of hint:solution terms. If a hint causes
pdsolve to raise the NotImplementedError, value of that
hint's key will be the exception object raised. The
dictionary will also include some special keys:
- order: The order of the PDE. See also ode_order() in
deutils.py
- default: The solution that would be returned by
default. This is the one produced by the hint that
appears first in the tuple returned by classify_pde().
"all_Integral":
This is the same as "all", except if a hint also has a
corresponding "_Integral" hint, it only returns the
"_Integral" hint. This is useful if "all" causes
pdsolve() to hang because of a difficult or impossible
integral. This meta-hint will also be much faster than
"all", because integrate() is an expensive routine.
See also the classify_pde() docstring for more info on hints,
and the pde docstring for a list of all supported hints.
**Tips**
- You can declare the derivative of an unknown function this way:
>>> from sympy import Function, Derivative
>>> from sympy.abc import x, y # x and y are the independent variables
>>> f = Function("f")(x, y) # f is a function of x and y
>>> # fx will be the partial derivative of f with respect to x
>>> fx = Derivative(f, x)
>>> # fy will be the partial derivative of f with respect to y
>>> fy = Derivative(f, y)
- See test_pde.py for many tests, which serves also as a set of
examples for how to use pdsolve().
- pdsolve always returns an Equality class (except for the case
when the hint is "all" or "all_Integral"). Note that it is not possible
to get an explicit solution for f(x, y) as in the case of ODE's
- Do help(pde.pde_hintname) to get help more information on a
specific hint
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> pdsolve(eq)
Eq(f(x, y), F(3*x - 2*y)*exp(-2*x/13 - 3*y/13))
"""
if not solvefun:
solvefun = Function('F')
# See the docstring of _desolve for more details.
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
# TODO : 'best' hint should be implemented when adequate
# number of hints are added.
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
"""Helper function of pdsolve that calls the respective
pde functions to solve for the partial differential
equations. This minimizes the computation in
calling _desolve multiple times.
"""
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
r"""
Converts a solution with integrals in it into an actual solution.
Simplifies the integral mainly using doit()
"""
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
"""
Returns a tuple of possible pdsolve() classifications for a PDE.
The tuple is ordered so that first item is the classification that
pdsolve() uses to solve the PDE by default. In general,
classifications near the beginning of the list will produce
better solutions faster than those near the end, though there are
always exceptions. To make pdsolve use a different classification,
use pdsolve(PDE, func, hint=<classification>). See also the pdsolve()
docstring for different meta-hints you can use.
If ``dict`` is true, classify_pde() will return a dictionary of
hint:match expression terms. This is intended for internal use by
pdsolve(). Note that because dictionaries are ordered arbitrarily,
this will most likely not be in the same order as the tuple.
You can get help on different hints by doing help(pde.pde_hintname),
where hintname is the name of the hint without "_Integral".
See sympy.pde.allhints or the sympy.pde docstring for a list of all
supported hints that can be returned from classify_pde.
Examples
========
>>> from sympy.solvers.pde import classify_pde
>>> from sympy import Function, Eq
>>> from sympy.abc import x, y
>>> f = Function('f')
>>> u = f(x, y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> eq = Eq(1 + (2*(ux/u)) + (3*(uy/u)), 0)
>>> classify_pde(eq)
('1st_linear_constant_coeff_homogeneous',)
"""
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
# TODO : For now pde.py uses support offered by the ode_order function
# to find the order with respect to a multi-variable function. An
# improvement could be to classify the order of the PDE on the basis of
# individual variables.
order = ode_order(eq, f(x,y))
# hint:matchdict or hint:(tuple of matchdicts)
# Also will contain "default":<default hint> and "order":order items.
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
# Try removing the smallest power of f(x,y)
# from the highest partial derivatives of f(x,y)
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
## Linear first-order homogeneous partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d})
matching_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
## Linear first-order general partial-differential
## equation with constant coefficients
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
# Order keys based on allhints.
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
# Dictionaries are ordered arbitrarily, so make note of which
# hint would come first for pdsolve(). Use an ordered dict in Py 3.
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
"""
Checks if the given solution satisfies the partial differential
equation.
pde is the partial differential equation which can be given in the
form of an equation or an expression. sol is the solution for which
the pde is to be checked. This can also be given in an equation or
an expression form. If the function is not provided, the helper
function _preprocess from deutils is used to identify the function.
If a sequence of solutions is passed, the same sort of container will be
used to return the result for each solution.
The following methods are currently being implemented to check if the
solution satisfies the PDE:
1. Directly substitute the solution in the PDE and check. If the
solution hasn't been solved for f, then it will solve for f
provided solve_for_func hasn't been set to False.
If the solution satisfies the PDE, then a tuple (True, 0) is returned.
Otherwise a tuple (False, expr) where expr is the value obtained
after substituting the solution in the PDE. However if a known solution
returns False, it may be due to the inability of doit() to simplify it to zero.
Examples
========
>>> from sympy import Function, symbols
>>> from sympy.solvers.pde import checkpdesol, pdsolve
>>> x, y = symbols('x y')
>>> f = Function('f')
>>> eq = 2*f(x,y) + 3*f(x,y).diff(x) + 4*f(x,y).diff(y)
>>> sol = pdsolve(eq)
>>> assert checkpdesol(eq, sol)[0]
>>> eq = x*f(x,y) + f(x,y).diff(x)
>>> checkpdesol(eq, sol)
(False, (x*F(4*x - 3*y) - 6*F(4*x - 3*y)/25 + 4*Subs(Derivative(F(_xi_1), _xi_1), _xi_1, 4*x - 3*y))*exp(-6*x/25 - 8*y/25))
"""
# Converting the pde into an equation
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
# If no function is given, try finding the function present.
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
# If the given solution is in the form of a list or a set
# then return a list or set of tuples.
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
# Convert solution into an equation
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
# Try solving for the function
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
# try direct substitution of the solution into the PDE and simplify
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
r"""
Solves a first order linear homogeneous
partial differential equation with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y} + c f(x,y) = 0
where `a`, `b` and `c` are constants.
The general solution is of the form:
.. math::
f(x, y) = F(- a y + b x ) e^{- \frac{c (a x + b y)}{a^2 + b^2}}
and can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y)
dx dy
>>> pprint(pdsolve(genform))
-c*(a*x + b*y)
---------------
2 2
a + b
f(x, y) = F(-a*y + b*x)*e
Examples
========
>>> from sympy import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y))
Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
>>> pprint(pdsolve(f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)))
x y
- - - -
2 2
f(x, y) = F(x - y)*e
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with constant coefficients.
The general form of this partial differential equation is
.. math:: a \frac{\partial f(x,y)}{\partial x}
+ b \frac{\partial f(x,y)}{\partial y}
+ c f(x,y) = G(x,y)
where `a`, `b` and `c` are constants and `G(x, y)` can be an arbitrary
function in `x` and `y`.
The general solution of the PDE is:
.. math::
f(x, y) = \left. \left[F(\eta) + \frac{1}{a^2 + b^2}
\int\limits^{a x + b y} G\left(\frac{a \xi + b \eta}{a^2 + b^2},
\frac{- a \eta + b \xi}{a^2 + b^2} \right)
e^{\frac{c \xi}{a^2 + b^2}}\, d\xi\right]
e^{- \frac{c \xi}{a^2 + b^2}}
\right|_{\substack{\eta=- a y + b x\\ \xi=a x + b y }}\, ,
where `F(\eta)` is an arbitrary single-valued function. The solution
can be found in SymPy with ``pdsolve``::
>>> from sympy.solvers import pdsolve
>>> from sympy.abc import x, y, a, b, c
>>> from sympy import Function, pprint
>>> f = Function('f')
>>> G = Function('G')
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a*ux + b*uy + c*u - G(x,y)
>>> pprint(genform)
d d
a*--(f(x, y)) + b*--(f(x, y)) + c*f(x, y) - G(x, y)
dx dy
>>> pprint(pdsolve(genform, hint='1st_linear_constant_coeff_Integral'))
// a*x + b*y \
|| / |
|| | |
|| | c*xi |
|| | ------- |
|| | 2 2 |
|| | /a*xi + b*eta -a*eta + b*xi\ a + b |
|| | G|------------, -------------|*e d(xi)|
|| | | 2 2 2 2 | |
|| | \ a + b a + b / |
|| | |
|| / |
|| |
f(x, y) = ||F(eta) + -------------------------------------------------------|*
|| 2 2 |
\\ a + b /
<BLANKLINE>
\|
||
||
||
||
||
||
||
||
-c*xi ||
-------||
2 2||
a + b ||
e ||
||
/|eta=-a*y + b*x, xi=a*x + b*y
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint, exp
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = -2*f(x,y).diff(x) + 4*f(x,y).diff(y) + 5*f(x,y) - exp(x + 3*y)
>>> pdsolve(eq)
Eq(f(x, y), (F(4*x + 2*y) + exp(x/2 + 4*y)/15)*exp(x/2 - y))
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
# TODO : For now homogeneous first order linear PDE's having
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
# Integral should remain as it is in terms of xi,
# doit() should be done in _handle_Integral.
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
r"""
Solves a first order linear partial differential equation
with variable coefficients. The general form of this partial
differential equation is
.. math:: a(x, y) \frac{\partial f(x, y)}{\partial x}
+ b(x, y) \frac{\partial f(x, y)}{\partial y}
+ c(x, y) f(x, y) = G(x, y)
where `a(x, y)`, `b(x, y)`, `c(x, y)` and `G(x, y)` are arbitrary
functions in `x` and `y`. This PDE is converted into an ODE by
making the following transformation:
1. `\xi` as `x`
2. `\eta` as the constant in the solution to the differential
equation `\frac{dy}{dx} = -\frac{b}{a}`
Making the previous substitutions reduces it to the linear ODE
.. math:: a(\xi, \eta)\frac{du}{d\xi} + c(\xi, \eta)u - G(\xi, \eta) = 0
which can be solved using ``dsolve``.
>>> from sympy.abc import x, y
>>> from sympy import Function, pprint
>>> a, b, c, G, f= [Function(i) for i in ['a', 'b', 'c', 'G', 'f']]
>>> u = f(x,y)
>>> ux = u.diff(x)
>>> uy = u.diff(y)
>>> genform = a(x, y)*u + b(x, y)*ux + c(x, y)*uy - G(x,y)
>>> pprint(genform)
d d
-G(x, y) + a(x, y)*f(x, y) + b(x, y)*--(f(x, y)) + c(x, y)*--(f(x, y))
dx dy
Examples
========
>>> from sympy.solvers.pde import pdsolve
>>> from sympy import Function, pprint
>>> from sympy.abc import x,y
>>> f = Function('f')
>>> eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
>>> pdsolve(eq)
Eq(f(x, y), F(x*y)*exp(y**2/2) + 1)
References
==========
- Viktor Grigoryan, "Partial Differential Equations"
Math 124A - Fall 2010, pp.7
"""
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
# To deal with cases like b*ux = e or c*uy = e
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
# To deal with cases when c is 0, a simpler method is used.
# The PDE reduces to b*(u.diff(x)) + d*u = e, which is a linear ODE in x
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
# To deal with cases when b is 0, a simpler method is used.
# The PDE reduces to c*(u.diff(y)) + d*u = e, which is a linear ODE in y
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
r"""
Helper function to replace constants by functions in 1st_linear_variable_coeff
"""
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
"""Separate variables in partial differential equation either by additive
or multiplicative separation approach. It tries to rewrite an equation so
that one of the specified variables occurs on a different side of the
equation than the others.
:param eq: Partial differential equation
:param fun: Original function F(x, y, z)
:param sep: List of separated functions [X(x), u(y, z)]
:param strategy: Separation strategy. You can choose between additive
separation ('add') and multiplicative separation ('mul') which is
default.
Examples
========
>>> from sympy import E, Eq, Function, pde_separate, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='add')
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
>>> eq = Eq(D(u(x, t), x, 2), D(u(x, t), t, 2))
>>> pde_separate(eq, u(x, t), [X(x), T(t)], strategy='mul')
[Derivative(X(x), (x, 2))/X(x), Derivative(T(t), (t, 2))/T(t)]
See Also
========
pde_separate_add, pde_separate_mul
"""
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
# Handle arguments
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
# Check whether variables match
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
# Check for duplicate arguments like [X(x), u(x, y)]
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
# Check whether the variables match
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
# Substitute original function with separated...
result = eq.lhs.subs(fun, functions).doit()
# Divide by terms when doing multiplicative separation
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
"""
Helper function for searching additive separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x) + y(y, z)`
Examples
========
>>> from sympy import E, Eq, Function, pde_separate_add, Derivative as D
>>> from sympy.abc import x, t
>>> u, X, T = map(Function, 'uXT')
>>> eq = Eq(D(u(x, t), x), E**(u(x, t))*D(u(x, t), t))
>>> pde_separate_add(eq, u(x, t), [X(x), T(t)])
[exp(-X(x))*Derivative(X(x), x), exp(T(t))*Derivative(T(t), t)]
"""
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
"""
Helper function for searching multiplicative separable solutions.
Consider an equation of two independent variables x, y and a dependent
variable w, we look for the product of two functions depending on different
arguments:
`w(x, y, z) = X(x)*u(y, z)`
Examples
========
>>> from sympy import Function, Eq, pde_separate_mul, Derivative as D
>>> from sympy.abc import x, y
>>> u, X, Y = map(Function, 'uXY')
>>> eq = Eq(D(u(x, y), x, 2), D(u(x, y), y, 2))
>>> pde_separate_mul(eq, u(x, y), [X(x), Y(y)])
[Derivative(X(x), (x, 2))/X(x), Derivative(Y(y), (y, 2))/Y(y)]
"""
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
"""Separate expression into two parts based on dependencies of variables."""
# FIRST PASS
# Extract derivatives depending our separable variable...
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
# Find the factor that we need to divide by
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
div.add(ext)
# FIXME: Find lcm() of all the divisors and divide with it, instead of
# current hack :(
# https://github.com/sympy/sympy/issues/4597
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
# SECOND PASS - separate the derivatives
div = set()
lhs = rhs = 0
for term in eq.args:
# Check, whether we have already term with independent variable...
if not term.has(*others):
lhs += term
continue
# ...otherwise, try to separate
temp, sep = term.expand().as_independent(dep)
# Failed?
if sep.has(*others):
return None
# Extract the divisors
div.add(sep)
rhs -= term.expand()
# Do the division
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
# ...and check whether we were successful :)
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| 35.26352
| 127
| 0.547249
|
from itertools import combinations_with_replacement
from sympy.simplify import simplify
from sympy.core import Add, S
from sympy.core.compatibility import reduce, is_sequence
from sympy.core.function import Function, expand, AppliedUndef, Subs
from sympy.core.relational import Equality, Eq
from sympy.core.symbol import Symbol, Wild, symbols
from sympy.functions import exp
from sympy.integrals.integrals import Integral
from sympy.utilities.iterables import has_dups
from sympy.utilities.misc import filldedent
from sympy.solvers.deutils import _preprocess, ode_order, _desolve
from sympy.solvers.solvers import solve
from sympy.simplify.radsimp import collect
import operator
allhints = (
"1st_linear_constant_coeff_homogeneous",
"1st_linear_constant_coeff",
"1st_linear_constant_coeff_Integral",
"1st_linear_variable_coeff"
)
def pdsolve(eq, func=None, hint='default', dict=False, solvefun=None, **kwargs):
if not solvefun:
solvefun = Function('F')
hints = _desolve(eq, func=func, hint=hint, simplify=True,
type='pde', **kwargs)
eq = hints.pop('eq', False)
all_ = hints.pop('all', False)
if all_:
pdedict = {}
failed_hints = {}
gethints = classify_pde(eq, dict=True)
pdedict.update({'order': gethints['order'],
'default': gethints['default']})
for hint in hints:
try:
rv = _helper_simplify(eq, hint, hints[hint]['func'],
hints[hint]['order'], hints[hint][hint], solvefun)
except NotImplementedError as detail:
failed_hints[hint] = detail
else:
pdedict[hint] = rv
pdedict.update(failed_hints)
return pdedict
else:
return _helper_simplify(eq, hints['hint'], hints['func'],
hints['order'], hints[hints['hint']], solvefun)
def _helper_simplify(eq, hint, func, order, match, solvefun):
if hint.endswith("_Integral"):
solvefunc = globals()[
"pde_" + hint[:-len("_Integral")]]
else:
solvefunc = globals()["pde_" + hint]
return _handle_Integral(solvefunc(eq, func, order,
match, solvefun), func, order, hint)
def _handle_Integral(expr, func, order, hint):
if hint.endswith("_Integral"):
return expr
elif hint == "1st_linear_constant_coeff":
return simplify(expr.doit())
else:
return expr
def classify_pde(eq, func=None, dict=False, *, prep=True, **kwargs):
if func and len(func.args) != 2:
raise NotImplementedError("Right now only partial "
"differential equations of two variables are supported")
if prep or func is None:
prep, func_ = _preprocess(eq, func)
if func is None:
func = func_
if isinstance(eq, Equality):
if eq.rhs != 0:
return classify_pde(eq.lhs - eq.rhs, func)
eq = eq.lhs
f = func.func
x = func.args[0]
y = func.args[1]
fx = f(x,y).diff(x)
fy = f(x,y).diff(y)
order = ode_order(eq, f(x,y))
matching_hints = {'order': order}
if not order:
if dict:
matching_hints["default"] = None
return matching_hints
else:
return ()
eq = expand(eq)
a = Wild('a', exclude = [f(x,y)])
b = Wild('b', exclude = [f(x,y), fx, fy, x, y])
c = Wild('c', exclude = [f(x,y), fx, fy, x, y])
d = Wild('d', exclude = [f(x,y), fx, fy, x, y])
e = Wild('e', exclude = [f(x,y), fx, fy])
n = Wild('n', exclude = [x, y])
reduced_eq = None
if eq.is_Add:
var = set(combinations_with_replacement((x,y), order))
dummyvar = var.copy()
power = None
for i in var:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a]:
power = match[n]
dummyvar.remove(i)
break
dummyvar.remove(i)
for i in dummyvar:
coeff = eq.coeff(f(x,y).diff(*i))
if coeff != 1:
match = coeff.match(a*f(x,y)**n)
if match and match[a] and match[n] < power:
power = match[n]
if power:
den = f(x,y)**power
reduced_eq = Add(*[arg/den for arg in eq.args])
if not reduced_eq:
reduced_eq = eq
if order == 1:
reduced_eq = collect(reduced_eq, f(x, y))
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
if not r[e]:
ing_hints["1st_linear_constant_coeff_homogeneous"] = r
else:
if r[b]**2 + r[c]**2 != 0:
matching_hints["1st_linear_constant_coeff"] = r
matching_hints[
"1st_linear_constant_coeff_Integral"] = r
else:
b = Wild('b', exclude=[f(x, y), fx, fy])
c = Wild('c', exclude=[f(x, y), fx, fy])
d = Wild('d', exclude=[f(x, y), fx, fy])
r = reduced_eq.match(b*fx + c*fy + d*f(x,y) + e)
if r:
r.update({'b': b, 'c': c, 'd': d, 'e': e})
matching_hints["1st_linear_variable_coeff"] = r
retlist = []
for i in allhints:
if i in matching_hints:
retlist.append(i)
if dict:
matching_hints["default"] = None
matching_hints["ordered_hints"] = tuple(retlist)
for i in allhints:
if i in matching_hints:
matching_hints["default"] = i
break
return matching_hints
else:
return tuple(retlist)
def checkpdesol(pde, sol, func=None, solve_for_func=True):
if not isinstance(pde, Equality):
pde = Eq(pde, 0)
if func is None:
try:
_, func = _preprocess(pde.lhs)
except ValueError:
funcs = [s.atoms(AppliedUndef) for s in (
sol if is_sequence(sol, set) else [sol])]
funcs = set().union(funcs)
if len(funcs) != 1:
raise ValueError(
'must pass func arg to checkpdesol for this case.')
func = funcs.pop()
if is_sequence(sol, set):
return type(sol)([checkpdesol(
pde, i, func=func,
solve_for_func=solve_for_func) for i in sol])
if not isinstance(sol, Equality):
sol = Eq(func, sol)
elif sol.rhs == func:
sol = sol.reversed
solved = sol.lhs == func and not sol.rhs.has(func)
if solve_for_func and not solved:
solved = solve(sol, func)
if solved:
if len(solved) == 1:
return checkpdesol(pde, Eq(func, solved[0]),
func=func, solve_for_func=False)
else:
return checkpdesol(pde, [Eq(func, t) for t in solved],
func=func, solve_for_func=False)
if sol.lhs == func:
pde = pde.lhs - pde.rhs
s = simplify(pde.subs(func, sol.rhs).doit())
return s is S.Zero, s
raise NotImplementedError(filldedent('''
Unable to test if %s is a solution to %s.''' % (sol, pde)))
def pde_1st_linear_constant_coeff_homogeneous(eq, func, order, match, solvefun):
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
return Eq(f(x,y), exp(-S(d)/(b**2 + c**2)*(b*x + c*y))*solvefun(c*x - b*y))
def pde_1st_linear_constant_coeff(eq, func, order, match, solvefun):
# two variables are implemented. Once there is support for
# solving systems of ODE's, this can be extended to n variables.
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
expterm = exp(-S(d)/(b**2 + c**2)*xi)
functerm = solvefun(eta)
solvedict = solve((b*x + c*y - xi, c*x - b*y - eta), x, y)
genterm = (1/S(b**2 + c**2))*Integral(
(1/expterm*e).subs(solvedict), (xi, b*x + c*y))
return Eq(f(x,y), Subs(expterm*(functerm + genterm),
(eta, xi), (c*x - b*y, b*x + c*y)))
def pde_1st_linear_variable_coeff(eq, func, order, match, solvefun):
from sympy.integrals.integrals import integrate
from sympy.solvers.ode import dsolve
xi, eta = symbols("xi eta")
f = func.func
x = func.args[0]
y = func.args[1]
b = match[match['b']]
c = match[match['c']]
d = match[match['d']]
e = -match[match['e']]
if not d:
if not (b and c):
if c:
try:
tsol = integrate(e/c, y)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(x) + tsol)
if b:
try:
tsol = integrate(e/b, x)
except NotImplementedError:
raise NotImplementedError("Unable to find a solution"
" due to inability of integrate")
else:
return Eq(f(x,y), solvefun(y) + tsol)
if not c:
plode = f(x).diff(x)*b + d*f(x) - e
sol = dsolve(plode, f(x))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, y)
return Eq(f(x, y), rhs)
if not b:
plode = f(y).diff(y)*c + d*f(y) - e
sol = dsolve(plode, f(y))
syms = sol.free_symbols - plode.free_symbols - {x, y}
rhs = _simplify_variable_coeff(sol.rhs, syms, solvefun, x)
return Eq(f(x, y), rhs)
dummy = Function('d')
h = (c/b).subs(y, dummy(x))
sol = dsolve(dummy(x).diff(x) - h, dummy(x))
if isinstance(sol, list):
sol = sol[0]
solsym = sol.free_symbols - h.free_symbols - {x, y}
if len(solsym) == 1:
solsym = solsym.pop()
etat = (solve(sol, solsym)[0]).subs(dummy(x), y)
ysub = solve(eta - etat, y)[0]
deq = (b*(f(x).diff(x)) + d*f(x) - e).subs(y, ysub)
final = (dsolve(deq, f(x), hint='1st_linear')).rhs
if isinstance(final, list):
final = final[0]
finsyms = final.free_symbols - deq.free_symbols - {x, y}
rhs = _simplify_variable_coeff(final, finsyms, solvefun, etat)
return Eq(f(x, y), rhs)
else:
raise NotImplementedError("Cannot solve the partial differential equation due"
" to inability of constantsimp")
def _simplify_variable_coeff(sol, syms, func, funcarg):
eta = Symbol("eta")
if len(syms) == 1:
sym = syms.pop()
final = sol.subs(sym, func(funcarg))
else:
for key, sym in enumerate(syms):
final = sol.subs(sym, func(funcarg))
return simplify(final.subs(eta, funcarg))
def pde_separate(eq, fun, sep, strategy='mul'):
do_add = False
if strategy == 'add':
do_add = True
elif strategy == 'mul':
do_add = False
else:
raise ValueError('Unknown strategy: %s' % strategy)
if isinstance(eq, Equality):
if eq.rhs != 0:
return pde_separate(Eq(eq.lhs - eq.rhs, 0), fun, sep, strategy)
else:
return pde_separate(Eq(eq, 0), fun, sep, strategy)
if eq.rhs != 0:
raise ValueError("Value should be 0")
orig_args = list(fun.args)
subs_args = []
for s in sep:
for j in range(0, len(s.args)):
subs_args.append(s.args[j])
if do_add:
functions = reduce(operator.add, sep)
else:
functions = reduce(operator.mul, sep)
if len(subs_args) != len(orig_args):
raise ValueError("Variable counts do not match")
if has_dups(subs_args):
raise ValueError("Duplicate substitution arguments detected")
if set(orig_args) != set(subs_args):
raise ValueError("Arguments do not match")
result = eq.lhs.subs(fun, functions).doit()
if not do_add:
eq = 0
for i in result.args:
eq += i/functions
result = eq
svar = subs_args[0]
dvar = subs_args[1:]
return _separate(result, svar, dvar)
def pde_separate_add(eq, fun, sep):
return pde_separate(eq, fun, sep, strategy='add')
def pde_separate_mul(eq, fun, sep):
return pde_separate(eq, fun, sep, strategy='mul')
def _separate(eq, dep, others):
terms = set()
for term in eq.args:
if term.is_Mul:
for i in term.args:
if i.is_Derivative and not i.has(*others):
terms.add(term)
continue
elif term.is_Derivative and not term.has(*others):
terms.add(term)
div = set()
for term in terms:
ext, sep = term.expand().as_independent(dep)
if sep.has(*others):
return None
div.add(ext)
if len(div) > 0:
final = 0
for term in eq.args:
eqn = 0
for i in div:
eqn += term / i
final += simplify(eqn)
eq = final
div = set()
lhs = rhs = 0
for term in eq.args:
if not term.has(*others):
lhs += term
continue
temp, sep = term.expand().as_independent(dep)
if sep.has(*others):
return None
div.add(sep)
rhs -= term.expand()
fulldiv = reduce(operator.add, div)
lhs = simplify(lhs/fulldiv).expand()
rhs = simplify(rhs/fulldiv).expand()
if lhs.has(*others) or rhs.has(dep):
return None
return [lhs, rhs]
| true
| true
|
f7078c443de8ebce4f491fccd4921a22ea0062f2
| 36,421
|
py
|
Python
|
Packs/GroupIB_ThreatIntelligenceAttribution/Integrations/GroupIB_TIA_Feed/GroupIB_TIA_Feed.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/GroupIB_ThreatIntelligenceAttribution/Integrations/GroupIB_TIA_Feed/GroupIB_TIA_Feed.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/GroupIB_ThreatIntelligenceAttribution/Integrations/GroupIB_TIA_Feed/GroupIB_TIA_Feed.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
""" IMPORTS """
from typing import Dict, Generator, List, Optional, Tuple, Union
import dateparser
import urllib3
# Disable insecure warnings
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
# todo: add all necessary field types
COMMON_FIELD_TYPES = ['trafficlightprotocol']
DATE_FIELDS_LIST = ["creationdate", "firstseenbysource", "lastseenbysource", "gibdatecompromised"]
MAPPING: dict = {
"compromised/mule": {
"indicators":
[
{
"main_field": 'account', "main_field_type": 'GIB Compromised Mule',
"add_fields": [
'dateAdd', 'sourceType', 'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'creationdate', 'source', 'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region', 'malware.name',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation', 'gibmalwarename',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"compromised/imei": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'device.imei', "main_field_type": 'GIB Compromised IMEI',
"add_fields": [
'dateDetected', 'dateCompromised', 'device.model',
'client.ipv4.asn', 'client.ipv4.countryName',
'client.ipv4.region', 'client.ipv4.ip',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types":[
'creationdate', 'gibdatecompromised', 'devicemodel',
'asn', 'geocountry', 'geolocation', 'ipaddress',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/ddos": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'target.ipv4.ip', "main_field_type": 'GIB Victim IP',
"add_fields": [
'target.ipv4.asn', 'target.ipv4.countryName', 'target.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'dateBegin', 'dateEnd'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"attacks/deface": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetDomain', "main_field_type": 'Domain',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetIp.ip', "main_field_type": 'IP',
"add_fields": [
'targetIp.asn', 'targetIp.countryName', 'targetIp.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/phishing": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
},
{
"main_field": 'phishingDomain.domain', "main_field_type": 'Domain',
"add_fields":
[
'phishingDomain.dateRegistered', 'dateDetected',
'phishingDomain.registrar',
'phishingDomain.title', 'targetBrand',
'targetCategory', 'targetDomain'
],
"add_fields_types":
[
'creationdate', 'firstseenbysource',
'registrarname',
'gibphishingtitle', 'gibtargetbrand',
'gibtargetcategory', 'gibtargetdomain'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region'],
"add_fields_types": ['asn', 'geocountry', 'geolocation']
}
]
},
"attacks/phishing_kit": {
"indicators":
[
{
"main_field": 'emails', "main_field_type": 'Email',
"add_fields": ['dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['firstseenbysource', 'lastseenbysource']
}
]
},
"apt/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"hi/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/tor_node": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"suspicious_ip/open_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields":
[
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'port', 'anonymous', 'source',
'dateFirstSeen', 'dateDetected'
],
"add_fields_types":
[
'asn', 'geocountry', 'geolocation',
'gibproxyport', 'gibproxyanonymous', 'source',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/socks_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"malware/cnc": {
'indicators':
[
{
'main_field': 'url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
'main_field': 'domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"osi/vulnerability": {
'indicators':
[
{
'main_field': 'id', "main_field_type": 'CVE',
"add_fields":
[
'cvss.score', 'cvss.vector', 'softwareMixed',
'description', 'dateModified', 'datePublished'
],
"add_fields_types":
[
'cvss', 'gibcvssvector', 'gibsoftwaremixed',
'cvedescription', 'cvemodified', 'published'
]
}
]
},
}
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def create_update_generator(self, collection_name: str, date_from: Optional[str] = None,
seq_update: Union[int, str] = None, limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds class objects for an update session
(feeds are sorted in ascending order) `collection_name` with set parameters.
`seq_update` allows you to receive all relevant feeds. Such a request uses the seq_update parameter,
you will receive a portion of feeds that starts with the next `seq_update` parameter for the current collection.
For all feeds in the Group IB Intelligence continuous numbering is carried out.
For example, the `seq_update` equal to 1999998 can be in the `compromised/accounts` collection,
and a feed with seq_update equal to 1999999 can be in the `attacks/ddos` collection.
If item updates (for example, if new attacks were associated with existing APT by our specialists
or tor node has been detected as active again), the item gets a new parameter and it automatically rises
in the database and "becomes relevant" again.
:param collection_name: collection to update.
:param date_from: start date of update session.
:param seq_update: identification number from which to start the session.
:param limit: size of portion in iteration.
"""
while True:
params = {'df': date_from, 'limit': limit, 'seqUpdate': seq_update}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name + '/updated',
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if portion.get("count") == 0:
break
seq_update = portion.get("seqUpdate")
date_from = None
yield portion.get('items')
def create_search_generator(self, collection_name: str, date_from: str = None,
limit: int = 200) -> Generator:
"""
Creates generator of lists with feeds for the search session
(feeds are sorted in descending order) for `collection_name` with set parameters.
:param collection_name: collection to search.
:param date_from: start date of search session.
:param limit: size of portion in iteration.
"""
result_id = None
while True:
params = {'df': date_from, 'limit': limit, 'resultId': result_id}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name,
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if len(portion.get('items')) == 0:
break
result_id = portion.get("resultId")
date_from = None
yield portion.get('items')
def search_feed_by_id(self, collection_name: str, feed_id: str) -> Dict:
"""
Searches for feed with `feed_id` in collection with `collection_name`.
:param collection_name: in what collection to search.
:param feed_id: id of feed to search.
"""
portion = self._http_request(method="GET", url_suffix=collection_name + '/' + feed_id, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
return portion
def test_module(client: Client) -> str:
"""
Returning 'ok' indicates that the integration works like it is supposed to. Connection to the service is successful.
:param client: GIB_TI&A_Feed client
:return: 'ok' if test passed, anything else will fail the test.
"""
generator = client.create_update_generator(collection_name='compromised/mule', limit=10)
generator.__next__()
return 'ok'
""" Support functions """
def find_element_by_key(obj, key):
"""
Recursively finds element or elements in dict.
"""
path = key.split(".", 1)
if len(path) == 1:
if isinstance(obj, list):
return [i.get(path[0]) for i in obj]
elif isinstance(obj, dict):
return obj.get(path[0])
else:
return obj
else:
if isinstance(obj, list):
return [find_element_by_key(i.get(path[0]), path[1]) for i in obj]
elif isinstance(obj, dict):
return find_element_by_key(obj.get(path[0]), path[1])
else:
return obj
def unpack_iocs(iocs, ioc_type, fields, fields_names, collection_name):
"""
Recursively ties together and transforms indicator data.
"""
unpacked = []
if isinstance(iocs, list):
for i, ioc in enumerate(iocs):
buf_fields = []
for field in fields:
if isinstance(field, list):
buf_fields.append(field[i])
else:
buf_fields.append(field)
unpacked.extend(unpack_iocs(ioc, ioc_type, buf_fields, fields_names, collection_name))
else:
if iocs in ['255.255.255.255', '0.0.0.0', '', None]:
return unpacked
fields_dict = {fields_names[i]: fields[i] for i in range(len(fields_names)) if fields[i] is not None}
# Transforming one certain field into a markdown table
if ioc_type == "CVE" and len(fields_dict["gibsoftwaremixed"]) != 0:
soft_mixed = fields_dict.get("gibsoftwaremixed", {})
buffer = ''
for chunk in soft_mixed:
software_name = ', '.join(chunk.get('softwareName'))
software_type = ', '.join(chunk.get('softwareType'))
software_version = ', '.join(chunk.get('softwareVersion'))
if len(software_name) != 0 or len(software_type) != 0 or len(software_version) != 0:
buffer += '| {0} | {1} | {2} |\n'.format(software_name, software_type,
software_version.replace('||', ', '))
if len(buffer) != 0:
buffer = "| Software Name | Software Type | Software Version |\n" \
"| ------------- | ------------- | ---------------- |\n" + buffer
fields_dict["gibsoftwaremixed"] = buffer
else:
del fields_dict["gibsoftwaremixed"]
# Transforming into correct date format
for date_field in DATE_FIELDS_LIST:
if fields_dict.get(date_field):
fields_dict[date_field] = dateparser.parse(fields_dict.get(date_field)).strftime('%Y-%m-%dT%H:%M:%SZ')
fields_dict.update({'gibcollection': collection_name})
unpacked.append({'value': iocs, 'type': ioc_type,
'rawJSON': {'value': iocs, 'type': ioc_type, **fields_dict}, 'fields': fields_dict})
return unpacked
def find_iocs_in_feed(feed: Dict, collection_name: str, common_fields: Dict) -> List:
"""
Finds IOCs in the feed and transform them to the appropriate format to ingest them into Demisto.
:param feed: feed from GIB TI&A.
:param collection_name: which collection this feed belongs to.
:param common_fields: fields defined by user.
"""
indicators = []
indicators_info = MAPPING.get(collection_name, {}).get('indicators', [])
for i in indicators_info:
main_field = find_element_by_key(feed, i['main_field'])
main_field_type = i['main_field_type']
add_fields = []
add_fields_list = i.get('add_fields', []) + ['id']
for j in add_fields_list:
add_fields.append(find_element_by_key(feed, j))
add_fields_types = i.get('add_fields_types', []) + ['gibid']
for field_type in COMMON_FIELD_TYPES:
if common_fields.get(field_type):
add_fields.append(common_fields.get(field_type))
add_fields_types.append(field_type)
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
add_fields.append(', '.join(find_element_by_key(feed, "malwareList.name")))
add_fields_types = add_fields_types + ['gibmalwarename']
indicators.extend(unpack_iocs(main_field, main_field_type, add_fields,
add_fields_types, collection_name))
return indicators
def get_human_readable_feed(indicators: List, type_: str, collection_name: str) -> str:
headers = ['value', 'type']
for fields in MAPPING.get(collection_name, {}).get('indicators', {}):
if fields.get('main_field_type') == type_:
headers.extend(fields['add_fields_types'])
break
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
headers.append('gibmalwarename')
return tableToMarkdown("{0} indicators".format(type_), indicators,
removeNull=True, headers=headers)
def format_result_for_manual(indicators: List) -> Dict:
formatted_indicators: Dict[str, Any] = {}
for indicator in indicators:
indicator = indicator.get('rawJSON')
type_ = indicator.get('type')
if type_ == 'CVE':
del indicator["gibsoftwaremixed"]
if formatted_indicators.get(type_) is None:
formatted_indicators[type_] = [indicator]
else:
formatted_indicators[type_].append(indicator)
return formatted_indicators
""" Commands """
def fetch_indicators_command(client: Client, last_run: Dict, first_fetch_time: str,
indicator_collections: List, requests_count: int,
common_fields: Dict) -> Tuple[Dict, List]:
"""
This function will execute each interval (default is 1 minute).
:param client: GIB_TI&A_Feed client.
:param last_run: the greatest sequpdate we fetched from last fetch.
:param first_fetch_time: if last_run is None then fetch all incidents since first_fetch_time.
:param indicator_collections: list of collections enabled by client.
:param requests_count: count of requests to API per collection.
:param common_fields: fields defined by user.
:return: next_run will be last_run in the next fetch-indicators; indicators will be created in Demisto.
"""
indicators = []
next_run: Dict[str, Dict[str, Union[int, Any]]] = {"last_fetch": {}}
tags = common_fields.pop("tags", [])
for collection_name in indicator_collections:
last_fetch = last_run.get('last_fetch', {}).get(collection_name)
# Handle first time fetch
date_from = None
seq_update = None
if not last_fetch:
date_from = dateparser.parse(first_fetch_time)
if date_from is None:
raise DemistoException('Inappropriate indicators_first_fetch format, '
'please use something like this: 2020-01-01 or January 1 2020 or 3 days')
date_from = date_from.strftime('%Y-%m-%d')
else:
seq_update = last_fetch
portions = client.create_update_generator(collection_name=collection_name,
date_from=date_from, seq_update=seq_update)
k = 0
for portion in portions:
for feed in portion:
seq_update = feed.get('seqUpdate')
indicators.extend(find_iocs_in_feed(feed, collection_name, common_fields))
k += 1
if k >= requests_count:
break
if tags:
for indicator in indicators:
indicator["fields"].update({"tags": tags})
indicator["rawJSON"].update({"tags": tags})
next_run['last_fetch'][collection_name] = seq_update
return next_run, indicators
def get_indicators_command(client: Client, args: Dict[str, str]):
"""
Returns limited portion of indicators to War Room.
:param client: GIB_TI&A_Feed client.
:param args: arguments, provided by client.
"""
id_, collection_name = args.get('id'), args.get('collection', '')
indicators = []
raw_json = None
try:
limit = int(args.get('limit', '50'))
if limit > 50:
raise Exception('A limit should be lower than 50.')
except ValueError:
raise Exception('A limit should be a number, not a string.')
if collection_name not in MAPPING.keys():
raise Exception('Incorrect collection name. Please, choose one of the displayed options.')
if not id_:
portions = client.create_search_generator(collection_name=collection_name, limit=limit)
for portion in portions:
for feed in portion:
indicators.extend(find_iocs_in_feed(feed, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
break
if len(indicators) >= limit:
break
else:
raw_json = client.search_feed_by_id(collection_name=collection_name, feed_id=id_)
indicators.extend(find_iocs_in_feed(raw_json, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
formatted_indicators = format_result_for_manual(indicators)
results = []
for type_, indicator in formatted_indicators.items():
results.append(CommandResults(
readable_output=get_human_readable_feed(indicator, type_, collection_name),
raw_response=raw_json,
ignore_auto_extract=True
))
return results
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
base_url = str(params.get("url"))
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
# Set and define the fetch incidents command to run after activated via integration settings.
common_fields = {
'trafficlightprotocol': params.get("tlp_color"),
'tags': argToList(params.get("feedTags")),
}
next_run, indicators = fetch_indicators_command(client=client, last_run=get_integration_context(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count,
common_fields=common_fields)
set_integration_context(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| 42.949292
| 120
| 0.503775
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from typing import Dict, Generator, List, Optional, Tuple, Union
import dateparser
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
COMMON_FIELD_TYPES = ['trafficlightprotocol']
DATE_FIELDS_LIST = ["creationdate", "firstseenbysource", "lastseenbysource", "gibdatecompromised"]
MAPPING: dict = {
"compromised/mule": {
"indicators":
[
{
"main_field": 'account', "main_field_type": 'GIB Compromised Mule',
"add_fields": [
'dateAdd', 'sourceType', 'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'creationdate', 'source', 'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region', 'malware.name',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation', 'gibmalwarename',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"compromised/imei": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'device.imei', "main_field_type": 'GIB Compromised IMEI',
"add_fields": [
'dateDetected', 'dateCompromised', 'device.model',
'client.ipv4.asn', 'client.ipv4.countryName',
'client.ipv4.region', 'client.ipv4.ip',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types":[
'creationdate', 'gibdatecompromised', 'devicemodel',
'asn', 'geocountry', 'geolocation', 'ipaddress',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/ddos": {
"indicators":
[
{
"main_field": 'cnc.url', "main_field_type": 'URL',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.domain', "main_field_type": 'Domain',
"add_fields": [
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'cnc.ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'cnc.ipv4.asn', 'cnc.ipv4.countryName', 'cnc.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid'
]
},
{
"main_field": 'target.ipv4.ip', "main_field_type": 'GIB Victim IP',
"add_fields": [
'target.ipv4.asn', 'target.ipv4.countryName', 'target.ipv4.region',
'malware.name', 'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'dateBegin', 'dateEnd'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibmalwarename', 'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"attacks/deface": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetDomain', "main_field_type": 'Domain',
"add_fields": ['threatActor.name', 'threatActor.isAPT', 'threatActor.id'],
"add_fields_types": ['gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid']
},
{
"main_field": 'targetIp.ip', "main_field_type": 'IP',
"add_fields": [
'targetIp.asn', 'targetIp.countryName', 'targetIp.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid'
]
}
]
},
"attacks/phishing": {
"indicators":
[
{
"main_field": 'url', "main_field_type": 'URL',
},
{
"main_field": 'phishingDomain.domain', "main_field_type": 'Domain',
"add_fields":
[
'phishingDomain.dateRegistered', 'dateDetected',
'phishingDomain.registrar',
'phishingDomain.title', 'targetBrand',
'targetCategory', 'targetDomain'
],
"add_fields_types":
[
'creationdate', 'firstseenbysource',
'registrarname',
'gibphishingtitle', 'gibtargetbrand',
'gibtargetcategory', 'gibtargetdomain'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region'],
"add_fields_types": ['asn', 'geocountry', 'geolocation']
}
]
},
"attacks/phishing_kit": {
"indicators":
[
{
"main_field": 'emails', "main_field_type": 'Email',
"add_fields": ['dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['firstseenbysource', 'lastseenbysource']
}
]
},
"apt/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"hi/threat": {
"indicators":
[
{
"main_field": 'indicators.params.ipv4', "main_field_type": 'IP',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name',
'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname',
'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'indicators.params.hashes.md5', "main_field_type": 'File',
"add_fields": [
'indicators.params.name', 'indicators.params.hashes.md5',
'indicators.params.hashes.sha1',
'indicators.params.hashes.sha256', 'indicators.params.size',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'indicators.dateFirstSeen', 'indicators.dateLastSeen'
],
"add_fields_types": [
'gibfilename', 'md5', 'sha1', 'sha256', 'size',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/tor_node": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"suspicious_ip/open_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields":
[
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'port', 'anonymous', 'source',
'dateFirstSeen', 'dateDetected'
],
"add_fields_types":
[
'asn', 'geocountry', 'geolocation',
'gibproxyport', 'gibproxyanonymous', 'source',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"suspicious_ip/socks_proxy": {
'indicators':
[
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": ['ipv4.asn', 'ipv4.countryName', 'ipv4.region', 'dateFirstSeen', 'dateLastSeen'],
"add_fields_types": ['asn', 'geocountry', 'geolocation', 'firstseenbysource', 'lastseenbysource']
}
]
},
"malware/cnc": {
'indicators':
[
{
'main_field': 'url', "main_field_type": 'URL',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
'main_field': 'domain', "main_field_type": 'Domain',
"add_fields": [
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
},
{
"main_field": 'ipv4.ip', "main_field_type": 'IP',
"add_fields": [
'ipv4.asn', 'ipv4.countryName', 'ipv4.region',
'threatActor.name', 'threatActor.isAPT', 'threatActor.id',
'dateDetected', 'dateLastSeen'
],
"add_fields_types": [
'asn', 'geocountry', 'geolocation',
'gibthreatactorname', 'gibthreatactorisapt', 'gibthreatactorid',
'firstseenbysource', 'lastseenbysource'
]
}
]
},
"osi/vulnerability": {
'indicators':
[
{
'main_field': 'id', "main_field_type": 'CVE',
"add_fields":
[
'cvss.score', 'cvss.vector', 'softwareMixed',
'description', 'dateModified', 'datePublished'
],
"add_fields_types":
[
'cvss', 'gibcvssvector', 'gibsoftwaremixed',
'cvedescription', 'cvemodified', 'published'
]
}
]
},
}
class Client(BaseClient):
def create_update_generator(self, collection_name: str, date_from: Optional[str] = None,
seq_update: Union[int, str] = None, limit: int = 200) -> Generator:
while True:
params = {'df': date_from, 'limit': limit, 'seqUpdate': seq_update}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name + '/updated',
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if portion.get("count") == 0:
break
seq_update = portion.get("seqUpdate")
date_from = None
yield portion.get('items')
def create_search_generator(self, collection_name: str, date_from: str = None,
limit: int = 200) -> Generator:
result_id = None
while True:
params = {'df': date_from, 'limit': limit, 'resultId': result_id}
params = {key: value for key, value in params.items() if value}
portion = self._http_request(method="GET", url_suffix=collection_name,
params=params, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
if len(portion.get('items')) == 0:
break
result_id = portion.get("resultId")
date_from = None
yield portion.get('items')
def search_feed_by_id(self, collection_name: str, feed_id: str) -> Dict:
portion = self._http_request(method="GET", url_suffix=collection_name + '/' + feed_id, timeout=60.,
retries=4, status_list_to_retry=[429, 500])
return portion
def test_module(client: Client) -> str:
generator = client.create_update_generator(collection_name='compromised/mule', limit=10)
generator.__next__()
return 'ok'
def find_element_by_key(obj, key):
path = key.split(".", 1)
if len(path) == 1:
if isinstance(obj, list):
return [i.get(path[0]) for i in obj]
elif isinstance(obj, dict):
return obj.get(path[0])
else:
return obj
else:
if isinstance(obj, list):
return [find_element_by_key(i.get(path[0]), path[1]) for i in obj]
elif isinstance(obj, dict):
return find_element_by_key(obj.get(path[0]), path[1])
else:
return obj
def unpack_iocs(iocs, ioc_type, fields, fields_names, collection_name):
unpacked = []
if isinstance(iocs, list):
for i, ioc in enumerate(iocs):
buf_fields = []
for field in fields:
if isinstance(field, list):
buf_fields.append(field[i])
else:
buf_fields.append(field)
unpacked.extend(unpack_iocs(ioc, ioc_type, buf_fields, fields_names, collection_name))
else:
if iocs in ['255.255.255.255', '0.0.0.0', '', None]:
return unpacked
fields_dict = {fields_names[i]: fields[i] for i in range(len(fields_names)) if fields[i] is not None}
if ioc_type == "CVE" and len(fields_dict["gibsoftwaremixed"]) != 0:
soft_mixed = fields_dict.get("gibsoftwaremixed", {})
buffer = ''
for chunk in soft_mixed:
software_name = ', '.join(chunk.get('softwareName'))
software_type = ', '.join(chunk.get('softwareType'))
software_version = ', '.join(chunk.get('softwareVersion'))
if len(software_name) != 0 or len(software_type) != 0 or len(software_version) != 0:
buffer += '| {0} | {1} | {2} |\n'.format(software_name, software_type,
software_version.replace('||', ', '))
if len(buffer) != 0:
buffer = "| Software Name | Software Type | Software Version |\n" \
"| ------------- | ------------- | ---------------- |\n" + buffer
fields_dict["gibsoftwaremixed"] = buffer
else:
del fields_dict["gibsoftwaremixed"]
for date_field in DATE_FIELDS_LIST:
if fields_dict.get(date_field):
fields_dict[date_field] = dateparser.parse(fields_dict.get(date_field)).strftime('%Y-%m-%dT%H:%M:%SZ')
fields_dict.update({'gibcollection': collection_name})
unpacked.append({'value': iocs, 'type': ioc_type,
'rawJSON': {'value': iocs, 'type': ioc_type, **fields_dict}, 'fields': fields_dict})
return unpacked
def find_iocs_in_feed(feed: Dict, collection_name: str, common_fields: Dict) -> List:
indicators = []
indicators_info = MAPPING.get(collection_name, {}).get('indicators', [])
for i in indicators_info:
main_field = find_element_by_key(feed, i['main_field'])
main_field_type = i['main_field_type']
add_fields = []
add_fields_list = i.get('add_fields', []) + ['id']
for j in add_fields_list:
add_fields.append(find_element_by_key(feed, j))
add_fields_types = i.get('add_fields_types', []) + ['gibid']
for field_type in COMMON_FIELD_TYPES:
if common_fields.get(field_type):
add_fields.append(common_fields.get(field_type))
add_fields_types.append(field_type)
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
add_fields.append(', '.join(find_element_by_key(feed, "malwareList.name")))
add_fields_types = add_fields_types + ['gibmalwarename']
indicators.extend(unpack_iocs(main_field, main_field_type, add_fields,
add_fields_types, collection_name))
return indicators
def get_human_readable_feed(indicators: List, type_: str, collection_name: str) -> str:
headers = ['value', 'type']
for fields in MAPPING.get(collection_name, {}).get('indicators', {}):
if fields.get('main_field_type') == type_:
headers.extend(fields['add_fields_types'])
break
if collection_name in ['apt/threat', 'hi/threat', 'malware/cnc']:
headers.append('gibmalwarename')
return tableToMarkdown("{0} indicators".format(type_), indicators,
removeNull=True, headers=headers)
def format_result_for_manual(indicators: List) -> Dict:
formatted_indicators: Dict[str, Any] = {}
for indicator in indicators:
indicator = indicator.get('rawJSON')
type_ = indicator.get('type')
if type_ == 'CVE':
del indicator["gibsoftwaremixed"]
if formatted_indicators.get(type_) is None:
formatted_indicators[type_] = [indicator]
else:
formatted_indicators[type_].append(indicator)
return formatted_indicators
def fetch_indicators_command(client: Client, last_run: Dict, first_fetch_time: str,
indicator_collections: List, requests_count: int,
common_fields: Dict) -> Tuple[Dict, List]:
indicators = []
next_run: Dict[str, Dict[str, Union[int, Any]]] = {"last_fetch": {}}
tags = common_fields.pop("tags", [])
for collection_name in indicator_collections:
last_fetch = last_run.get('last_fetch', {}).get(collection_name)
date_from = None
seq_update = None
if not last_fetch:
date_from = dateparser.parse(first_fetch_time)
if date_from is None:
raise DemistoException('Inappropriate indicators_first_fetch format, '
'please use something like this: 2020-01-01 or January 1 2020 or 3 days')
date_from = date_from.strftime('%Y-%m-%d')
else:
seq_update = last_fetch
portions = client.create_update_generator(collection_name=collection_name,
date_from=date_from, seq_update=seq_update)
k = 0
for portion in portions:
for feed in portion:
seq_update = feed.get('seqUpdate')
indicators.extend(find_iocs_in_feed(feed, collection_name, common_fields))
k += 1
if k >= requests_count:
break
if tags:
for indicator in indicators:
indicator["fields"].update({"tags": tags})
indicator["rawJSON"].update({"tags": tags})
next_run['last_fetch'][collection_name] = seq_update
return next_run, indicators
def get_indicators_command(client: Client, args: Dict[str, str]):
id_, collection_name = args.get('id'), args.get('collection', '')
indicators = []
raw_json = None
try:
limit = int(args.get('limit', '50'))
if limit > 50:
raise Exception('A limit should be lower than 50.')
except ValueError:
raise Exception('A limit should be a number, not a string.')
if collection_name not in MAPPING.keys():
raise Exception('Incorrect collection name. Please, choose one of the displayed options.')
if not id_:
portions = client.create_search_generator(collection_name=collection_name, limit=limit)
for portion in portions:
for feed in portion:
indicators.extend(find_iocs_in_feed(feed, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
break
if len(indicators) >= limit:
break
else:
raw_json = client.search_feed_by_id(collection_name=collection_name, feed_id=id_)
indicators.extend(find_iocs_in_feed(raw_json, collection_name, {}))
if len(indicators) >= limit:
indicators = indicators[:limit]
formatted_indicators = format_result_for_manual(indicators)
results = []
for type_, indicator in formatted_indicators.items():
results.append(CommandResults(
readable_output=get_human_readable_feed(indicator, type_, collection_name),
raw_response=raw_json,
ignore_auto_extract=True
))
return results
def main():
params = demisto.params()
username = params.get('credentials').get('identifier')
password = params.get('credentials').get('password')
proxy = params.get('proxy', False)
verify_certificate = not params.get('insecure', False)
base_url = str(params.get("url"))
indicator_collections = params.get('indicator_collections', [])
indicators_first_fetch = params.get('indicators_first_fetch', '3 days').strip()
requests_count = int(params.get('requests_count', 2))
args = demisto.args()
command = demisto.command()
LOG(f'Command being called is {command}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy,
headers={"Accept": "*/*"})
commands = {'gibtia-get-indicators': get_indicators_command}
if command == 'test-module':
result = test_module(client)
demisto.results(result)
elif command == 'fetch-indicators':
common_fields = {
'trafficlightprotocol': params.get("tlp_color"),
'tags': argToList(params.get("feedTags")),
}
next_run, indicators = fetch_indicators_command(client=client, last_run=get_integration_context(),
first_fetch_time=indicators_first_fetch,
indicator_collections=indicator_collections,
requests_count=requests_count,
common_fields=common_fields)
set_integration_context(next_run)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
else:
return_results(commands[command](client, args))
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
| true
| true
|
f7078ca12c2ce382a8408308ee089ed0282cb5c2
| 2,632
|
py
|
Python
|
corner/input/plot_field.py
|
bderembl/mitgcm_configs
|
8aa0343fc56e9da831e7a8b857838c4f4a76aa9a
|
[
"MIT"
] | 1
|
2020-01-13T05:18:38.000Z
|
2020-01-13T05:18:38.000Z
|
corner/input/plot_field.py
|
bderembl/mitgcm_configs
|
8aa0343fc56e9da831e7a8b857838c4f4a76aa9a
|
[
"MIT"
] | null | null | null |
corner/input/plot_field.py
|
bderembl/mitgcm_configs
|
8aa0343fc56e9da831e7a8b857838c4f4a76aa9a
|
[
"MIT"
] | 5
|
2018-04-10T15:18:39.000Z
|
2020-12-01T02:05:37.000Z
|
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.netcdf as netcdf
plt.ion()
flag_mov = 0
flag_traj = 0
dir0 = '../run/'
file1 = 'diags.0000000000.t001.nc'
file2 = 'grid.t001.nc'
f1 = netcdf.netcdf_file(dir0 + file1)
f2 = netcdf.netcdf_file(dir0 + file2)
x = f2.variables['X'][:].copy()
y = f2.variables['Y'][:].copy()
xp1 = f2.variables['Xp1'][:].copy()
yp1 = f2.variables['Yp1'][:].copy()
T = f1.variables['T'][:].copy()
si_x = len(x)
si_y = len(y)
si_t = len(T)
h_mit = f2.variables['Depth'][:,:].copy()
vort = f1.variables['momVort3'][0,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
xunit = 1000.0 # 1:m -- 1000:km
posxy = np.zeros((2,si_t),dtype='int')
if flag_traj == 1:
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)
plt.figure()
if flag_mov == -1:
nt = 0
mytime = [49]
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')
plt.title('Day ' + str(mytime[nt]+1))
plt.xlabel('x (km)')
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[120]/xunit,y[5]/xunit,myci)
if flag_traj:
plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')
plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')
elif flag_mov == 0:
mytime = [0,9,19,29]
for nt in range(0,len(mytime)):
plt.subplot(2,2,nt+1, aspect='equal')
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
plt.title('Day ' + str(mytime[nt]+1))
if nt == 2 or nt == 3:
plt.xlabel('x (km)')
if nt == 0 or nt == 2:
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[-170]/xunit,y[5]/xunit,myci)
plt.savefig('corner_10mit.eps')
elif flag_mov == 1:
vort = f1.variables['momVort3'][:,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
vort = vort.squeeze()
vort[0,0] = vmin
vort[0,1] = vmax
plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
ext = '0'
if nt > 9:
ext = ''
plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png')
plt.clf()
f1.close()
f2.close()
| 23.711712
| 83
| 0.599164
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.netcdf as netcdf
plt.ion()
flag_mov = 0
flag_traj = 0
dir0 = '../run/'
file1 = 'diags.0000000000.t001.nc'
file2 = 'grid.t001.nc'
f1 = netcdf.netcdf_file(dir0 + file1)
f2 = netcdf.netcdf_file(dir0 + file2)
x = f2.variables['X'][:].copy()
y = f2.variables['Y'][:].copy()
xp1 = f2.variables['Xp1'][:].copy()
yp1 = f2.variables['Yp1'][:].copy()
T = f1.variables['T'][:].copy()
si_x = len(x)
si_y = len(y)
si_t = len(T)
h_mit = f2.variables['Depth'][:,:].copy()
vort = f1.variables['momVort3'][0,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
xunit = 1000.0
posxy = np.zeros((2,si_t),dtype='int')
if flag_traj == 1:
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)
plt.figure()
if flag_mov == -1:
nt = 0
mytime = [49]
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')
plt.title('Day ' + str(mytime[nt]+1))
plt.xlabel('x (km)')
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[120]/xunit,y[5]/xunit,myci)
if flag_traj:
plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')
plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')
elif flag_mov == 0:
mytime = [0,9,19,29]
for nt in range(0,len(mytime)):
plt.subplot(2,2,nt+1, aspect='equal')
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
plt.title('Day ' + str(mytime[nt]+1))
if nt == 2 or nt == 3:
plt.xlabel('x (km)')
if nt == 0 or nt == 2:
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[-170]/xunit,y[5]/xunit,myci)
plt.savefig('corner_10mit.eps')
elif flag_mov == 1:
vort = f1.variables['momVort3'][:,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
vort = vort.squeeze()
vort[0,0] = vmin
vort[0,1] = vmax
plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
ext = '0'
if nt > 9:
ext = ''
plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png')
plt.clf()
f1.close()
f2.close()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.