content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import re
from django.core.exceptions import ValidationError
| [
11748,
302,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
3254,
24765,
12331,
628,
198
] | 3.705882 | 17 |
from apiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
SCOPES = ['https://www.googleapis.com/auth/analytics.readonly']
| [
6738,
2471,
291,
75,
1153,
13,
67,
40821,
1330,
1382,
198,
6738,
267,
18439,
17,
16366,
13,
15271,
62,
23317,
1330,
4809,
30116,
34,
445,
14817,
198,
198,
6173,
3185,
1546,
796,
37250,
5450,
1378,
2503,
13,
13297,
499,
271,
13,
785,
... | 3.269231 | 52 |
import cv2
import os
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
cam = cv2.VideoCapture(0)
id = input('enter Name: ')
createFolder('./'+str(id)+'/')
FaceCount=1
while(True):
ret,img = cam.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5 , minNeighbors=5)
for(x,y,w,h) in faces:
FaceCount = FaceCount+1
print(FaceCount)
path = './'+str(id)+'/'
cv2.imwrite(os.path.join(path , str(id)+"."+str(FaceCount)+".jpg"),gray[y:y+h,x:x+w])
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),10)
#cv2.waitKey(100000)
cv2.imshow("Face",img)
#cv2.destroyAllWindows()
if(FaceCount>500):
break
else:
continue
cam.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
28686,
198,
198,
2550,
62,
66,
28966,
796,
269,
85,
17,
13,
34,
28966,
9487,
7483,
7,
33967,
17,
13,
7890,
13,
3099,
5605,
3372,
2367,
1343,
705,
3099,
5605,
28966,
62,
8534,
1604,
558,
62,
12286,
1... | 1.959641 | 446 |
"""Main entrypoint for DRS processing."""
import os
import argparse
from datetime import datetime
from uuid import uuid4
import xarray as xr
import axiom.utilities as au
import axiom.drs.utilities as adu
from axiom.drs.domain import Domain
import axiom_schemas as axs
import json
import sys
from distributed import Client, LocalCluster
from axiom.config import load_config
from axiom import __version__ as axiom_version
from axiom.exceptions import NoFilesToProcessException, DRSContextInterpolationException
import shutil
from dask.distributed import progress, wait
def consume(json_filepath):
"""Consume a json payload (for message passing)
Args:
json_filepath (str): Path to the JSON file.
"""
logger = au.get_logger(__name__)
# Check if the file has already been consumed
consumed_filepath = json_filepath.replace('.json', '.consumed')
if os.path.isfile(consumed_filepath):
logger.info(
f'{json_filepath} has already been consumed and needs to be cleaned up by another process. Terminating.')
sys.exit()
# Check if the file is locked
if au.is_locked(json_filepath):
logger.info(
f'{json_filepath} is locked, possibly by another process. Terminating.')
sys.exit()
# Lock the file
au.lock(json_filepath)
# Convert to dict
payload = json.loads(open(json_filepath, 'r').read())
# Process
process_multi(**payload)
# Mark consumed by touching another file.
au.touch(consumed_filepath)
# Unlock
au.unlock(json_filepath)
def process(
input_files,
output_directory,
variable,
project,
model,
domain,
start_year, end_year,
output_frequency,
level=None,
input_resolution=None,
overwrite=True,
preprocessor=None,
postprocessor=None,
**kwargs
):
"""Method to process a single variable/domain/resolution combination.
Args:
input_files (str or list): Globbable string or list of filepaths.
output_directory (str) : Path from which to build DRS structure.
variable (str): Variable to process.
level (numeric or list) : Vertical levels to process.
project (str): Project metadata to apply (loaded from user config).
model (str): Model metadata to apply (loaded from user config).
start_year (int): Start year.
end_year (int): End year.
output_frequency (str): Output frequency to process.
input_resolution (float, optional): Input resolution in km. Leave black to auto-detect from filepaths.
overwrite (bool): Overwrite the data at the destination. Defaults to True.
preprocessor (str): Data preprocessor to activate on input data. Defaults to None.
postprocesser (str): Data postprocess to activate before writing data. Defaults to None.
**kwargs: Additional keyword arguments used in metadata interpolation.
"""
# Start the clock
timer = au.Timer()
timer.start()
# Capture what was passed into this method for interpolation context later.
local_args = locals()
# Load the logger and configuration
logger = au.get_logger(__name__)
config = load_config('drs')
# Dump the job id if available
if 'PBS_JOBID' in os.environ.keys():
jobid = os.getenv('PBS_JOBID')
logger.info(f'My PBS_JOBID is {jobid}')
# Get a list of the potential filepaths
input_files = au.auto_glob(input_files)
num_files = len(input_files)
logger.debug(f'{num_files} to consider before filtering.')
# Filter by those that actually have the variable in the filename.
if config.filename_filtering['variable']:
input_files = [
f for f in input_files if f'{variable}_' in os.path.basename(f)]
num_files = len(input_files)
logger.debug(
f'{num_files} to consider after filename variable filtering.')
# Filter by those that actually have the year in the filename.
if config.filename_filtering['year']:
input_files = [
f for f in input_files if f'{start_year}' in os.path.basename(f)]
num_files = len(input_files)
logger.debug(f'{num_files} to consider after filename year filtering.')
# Is there anything left to process?
if len(input_files) == 0:
raise NoFilesToProcessException()
# Detect the input resolution if it it not supplied
if input_resolution is None:
logger.debug('No input resolution supplied, auto-detecting')
input_resolution = adu.detect_resolution(input_files)
logger.debug(f'Input resolution detected as {input_resolution} km')
# Load project config
logger.info(f'Loading project config ({project})')
project = load_config('projects')[project]
# Load model config
logger.info(f'Loading model config ({model})')
model = load_config('models')[model]
logger.debug(
'Loading files into distributed memory, this may take some time.')
# TODO: Remove!!!! This is just to make CCAM work in the short term
if preprocessor is None and 'ccam' in input_files[0]:
logger.warn('CCAM preprocessor override used')
preprocessor = 'ccam'
postprocessor = 'ccam'
# Load a preprocessor, if one exists.
preprocessor = adu.load_preprocessor(preprocessor)
# Account for fixed variables, if defined
if 'variables_fixed' in project.keys() and variable in project['variables_fixed']:
# Load just the first file
ds = xr.open_dataset(input_files[0], engine='h5netcdf')
ds = preprocess(ds, variable=variable)
else:
ds = xr.open_mfdataset(input_files, chunks=dict(
time=100), preprocess=preprocess, engine='h5netcdf')
# Persist now, get it on the cluster
ds = ds.persist()
# Determine time-invariance
time_invariant = 'time' not in list(ds.coords.keys())
# Assemble the context object (order dependent!)
logger.debug('Assembling interpolation context.')
context = config.metadata_defaults.copy()
# Add metadata from the input data
context.update(ds.attrs)
# Add user-supplied metadata
context.update(kwargs)
# Add project and model metadata
context.update(project)
context.update(model)
# Add additional args
context.update(local_args)
context['res_km'] = input_resolution
# Select the variable from the dataset
if level:
# Select each of the levels requested into a new variable.
for _level in au.pluralise(level):
ds[f'{variable}{_level}'] = ds[variable].sel(lev=_level, drop=True)
# Drop the original variable
ds = ds.drop(variable)
# Sort the dimensions (fixes domain subsetting)
logger.debug('Sorting data')
sort_coords = list()
for coord in 'time,lev,lat,lon'.split(','):
if coord in ds.coords.keys():
sort_coords.append(coord)
ds = ds.sortby(sort_coords)
logger.debug('Applying metadata schema')
schema = axs.load_schema(config['schema'])
ds = au.apply_schema(ds, schema)
logger.info(f'Parsing domain {domain}')
if isinstance(domain, str):
# Registered domain
if adu.is_registered_domain(domain):
domain = adu.get_domain(domain)
# Attempt to parse
else:
domain = Domain.from_directive(domain)
# We will only otherwise accept a domain object.
elif isinstance(domain, Domain) == False:
raise Exception(f'Unable to parse domain {domain}.')
logger.debug('Domain: ' + domain.to_directive())
# Subset the geographical domain
logger.debug('Subsetting geographical domain.')
ds = domain.subset_xarray(ds, drop=True)
# TODO: Need to find a less manual way o do this.
for year in generate_years_list(start_year, end_year):
logger.info(f'Processing {year}')
# Subset the data into just this year
if not time_invariant:
_ds = ds.where(ds['time.year'] == year, drop=True)
else:
_ds = ds.copy()
# Historical cutoff is defined in $HOME/.axiom/drs.json
context['experiment'] = 'historical' if year < config.historical_cutoff else context['rcp']
native_frequency = adu.detect_input_frequency(_ds)
logger.info(f'Native frequency of data detected as {native_frequency}')
# Automatically detect the output_frequency from the input data, this will not require resampling
if output_frequency == 'from_input' or output_frequency == native_frequency:
output_frequency = adu.detect_input_frequency(_ds)
logger.info(
f'output_frequency detected from inputs ({output_frequency})')
logger.info(f'No need to resample.')
# Map the frequency to something DRS-compliant
context['frequency_mapping'] = config['frequency_mapping'][output_frequency]
# Fixed variables, just change the frequency_mapping
elif adu.is_time_invariant(_ds):
output_frequency = 'fx'
logger.info(
'Data is time-invariant (fixed variable), overriding frequency_mapping to fx')
context['frequency_mapping'] = 'fx'
# Actually perform the resample
else:
logger.debug(f'Resampling to {output_frequency} mean.')
context['frequency_mapping'] = config['frequency_mapping'][output_frequency]
_ds = _ds.resample(time=output_frequency).mean()
# Start persisting the computation now
_ds = _ds.persist()
# TODO: Add cell methods?
# Monthly data should have the days truncated
context['start_date'] = f'{year}0101' if output_frequency[-1] != 'M' else f'{year}01'
context['end_date'] = f'{year}1231' if output_frequency[-1] != 'M' else f'{year}12'
# Tracking info
context['created'] = datetime.utcnow()
context['uuid'] = uuid4()
# Interpolate context
logger.info('Interpolating context.')
context = adu.interpolate_context(context)
# Assemble the global meta, add axiom details
logger.debug('Assembling global metadata.')
global_attrs = dict(
axiom_version=axiom_version,
axiom_schemas_version=axs.__version__,
axiom_schema=config.schema
)
for key, value in config.metadata_defaults.items():
global_attrs[key] = str(value) % context
# Strip and reapply metadata
logger.debug('Applying metadata')
_ds.attrs = global_attrs
# Add in the variable to the context
context['variable'] = variable
# Reapply the schema
logger.info('Reapplying schema')
_ds = au.apply_schema(_ds, schema)
# Copy coordinate attributes straight off the inputs
if config.copy_coordinates_from_inputs:
for coord in list(_ds.coords.keys()):
_ds[coord].attrs = ds[coord].attrs
# Get the full output filepath with string interpolation
logger.debug('Working out output paths')
drs_path = adu.get_template(config, 'drs_path') % context
output_filename = adu.get_template(config, 'filename') % context
output_filepath = os.path.join(
output_directory, drs_path, output_filename)
logger.debug(f'output_filepath = {output_filepath}')
# Skip if already there and overwrite is not set, otherwise continue
if os.path.isfile(output_filepath) and overwrite == False:
logger.debug(
f'{output_filepath} exists and overwrite is set to False, skipping.')
continue
# Check for uninterpolated keys in the output path, which should fail at this point.
uninterpolated_keys = adu.get_uninterpolated_placeholders(
output_filepath)
if len(uninterpolated_keys) > 0:
logger.error('Uninterpolated keys remain in the output filepath.')
logger.error(f'output_filepath = {output_filepath}')
raise DRSContextInterpolationException(uninterpolated_keys)
# Create the output directory
output_dir = os.path.dirname(output_filepath)
logger.debug(f'Creating {output_dir}')
os.makedirs(output_dir, exist_ok=True)
# Assemble the encoding dictionaries (to ensure time units work!)
logger.debug('Applying encoding')
encoding = dict()
for coord in list(_ds.coords.keys()):
if coord not in config.encoding.keys():
logger.warn(
f'Coordinate {coord} is not specified in drs.json file, omitting encoding.')
continue
encoding[coord] = config.encoding[coord]
# Apply a blanket variable encoding.
encoding[variable] = config.encoding['variables']
# Postprocess data if required
postprocessor = adu.load_postprocessor(postprocessor)
_ds = postprocess(_ds)
# Write to temp file in memory, then move (performance)
# if config.
# TODO: To be implemented later
# tmp_filepath = os.path.join(
# os.getenv('PBS_JOBFS'),
# os.path.basename(output_filepath)
# )
# logger.info('Rechunking data for speed')
# _ds[variable] = _ds[variable].chunk(dict(time=100))
logger.info('Waiting for computations to finish.')
progress(_ds)
logger.debug(f'Writing {output_filepath}')
write = _ds.to_netcdf(
output_filepath,
format='NETCDF4',
encoding=encoding,
unlimited_dims=['time']
)
elapsed_time = timer.stop()
logger.info(f'DRS processing task took {elapsed_time} seconds.')
def generate_years_list(start_year, end_year):
"""Generate a list of years (decades) to process.
Args:
start_year (int): Start year.
end_year (int): End year.
Returns:
iterator : Years to process.
"""
return range(start_year, end_year+1, 10)
def load_variable_config(project_config):
"""Extract the variable configuration out of the project configuration.
Args:
project_config (dict-like): Project configuration.
Returns:
dict: Variable dictionary with name: [levels] (single level will have a list containing None.)
"""
# Extract the different rank variables
v2ds = project_config['variables_2d']
v3ds = project_config['variables_3d']
# Create a dictionary of variables to process keyed to an empty list of levels for 2D
variables = {v2d: [None] for v2d in v2ds}
# Add in the 3D variables, with levels this time
for v3d, levels in v3ds.items():
variables[v3d] = levels
return variables
| [
37811,
13383,
5726,
4122,
329,
360,
6998,
7587,
526,
15931,
198,
11748,
28686,
198,
11748,
1822,
29572,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
334,
27112,
1330,
334,
27112,
19,
198,
11748,
2124,
18747,
355,
2124,
81,
198,
1... | 2.571825 | 5,764 |
from argparse import ArgumentParser
from typing import Any
from django.core.management.base import CommandError
from zerver.actions.users import do_delete_user
from zerver.lib.management import ZulipBaseCommand
from zerver.lib.users import get_active_bots_owned_by_user
| [
6738,
1822,
29572,
1330,
45751,
46677,
198,
6738,
19720,
1330,
4377,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
9455,
12331,
198,
198,
6738,
1976,
18497,
13,
4658,
13,
18417,
1330,
466,
62,
33678,
62,
7220,
198,
... | 3.64 | 75 |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/containerregistry/v1/repository_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from yandex.cloud.api import operation_pb2 as yandex_dot_cloud_dot_api_dot_operation__pb2
from yandex.cloud.access import access_pb2 as yandex_dot_cloud_dot_access_dot_access__pb2
from yandex.cloud.containerregistry.v1 import repository_pb2 as yandex_dot_cloud_dot_containerregistry_dot_v1_dot_repository__pb2
from yandex.cloud.operation import operation_pb2 as yandex_dot_cloud_dot_operation_dot_operation__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/containerregistry/v1/repository_service.proto',
package='yandex.cloud.containerregistry.v1',
syntax='proto3',
serialized_options=_b('\n%yandex.cloud.api.containerregistry.v1ZWgithub.com/yandex-cloud/go-genproto/yandex/cloud/containerregistry/v1;containerregistry'),
serialized_pb=_b('\n:yandex/cloud/containerregistry/v1/repository_service.proto\x12!yandex.cloud.containerregistry.v1\x1a yandex/cloud/api/operation.proto\x1a yandex/cloud/access/access.proto\x1a\x32yandex/cloud/containerregistry/v1/repository.proto\x1a&yandex/cloud/operation/operation.proto\x1a\x1dyandex/cloud/validation.proto\x1a\x1cgoogle/api/annotations.proto\";\n\x14GetRepositoryRequest\x12#\n\rrepository_id\x18\x01 \x01(\tB\x0c\xe8\xc7\x31\x01\x8a\xc8\x31\x04<=50\"}\n\x1aGetRepositoryByNameRequest\x12_\n\x0frepository_name\x18\x01 \x01(\tBF\xe8\xc7\x31\x01\xf2\xc7\x31>[a-z0-9]+(?:[._-][a-z0-9]+)*(/([a-z0-9]+(?:[._-][a-z0-9]+)*))*\"\xcc\x01\n\x17ListRepositoriesRequest\x12\x1d\n\x0bregistry_id\x18\x01 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1b\n\tfolder_id\x18\x06 \x01(\tB\x08\x8a\xc8\x31\x04<=50\x12\x1d\n\tpage_size\x18\x02 \x01(\x03\x42\n\xfa\xc7\x31\x06<=1000\x12\x1d\n\npage_token\x18\x03 \x01(\tB\t\x8a\xc8\x31\x05<=100\x12\x1a\n\x06\x66ilter\x18\x04 \x01(\tB\n\x8a\xc8\x31\x06<=1000\x12\x1b\n\x08order_by\x18\x05 \x01(\tB\t\x8a\xc8\x31\x05<=100\"x\n\x18ListRepositoriesResponse\x12\x43\n\x0crepositories\x18\x01 \x03(\x0b\x32-.yandex.cloud.containerregistry.v1.Repository\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xa8\t\n\x11RepositoryService\x12\xaa\x01\n\x03Get\x12\x37.yandex.cloud.containerregistry.v1.GetRepositoryRequest\x1a-.yandex.cloud.containerregistry.v1.Repository\";\x82\xd3\xe4\x93\x02\x35\x12\x33/container-registry/v1/repositories/{repository_id}\x12y\n\tGetByName\x12=.yandex.cloud.containerregistry.v1.GetRepositoryByNameRequest\x1a-.yandex.cloud.containerregistry.v1.Repository\x12\xac\x01\n\x04List\x12:.yandex.cloud.containerregistry.v1.ListRepositoriesRequest\x1a;.yandex.cloud.containerregistry.v1.ListRepositoriesResponse\"+\x82\xd3\xe4\x93\x02%\x12#/container-registry/v1/repositories\x12\xc3\x01\n\x12ListAccessBindings\x12..yandex.cloud.access.ListAccessBindingsRequest\x1a/.yandex.cloud.access.ListAccessBindingsResponse\"L\x82\xd3\xe4\x93\x02\x46\x12\x44/container-registry/v1/repositories/{resource_id}:listAccessBindings\x12\xf3\x01\n\x11SetAccessBindings\x12-.yandex.cloud.access.SetAccessBindingsRequest\x1a!.yandex.cloud.operation.Operation\"\x8b\x01\x82\xd3\xe4\x93\x02H\"C/container-registry/v1/repositories/{resource_id}:setAccessBindings:\x01*\xb2\xd2*9\n access.SetAccessBindingsMetadata\x12\x15google.protobuf.Empty\x12\xff\x01\n\x14UpdateAccessBindings\x12\x30.yandex.cloud.access.UpdateAccessBindingsRequest\x1a!.yandex.cloud.operation.Operation\"\x91\x01\x82\xd3\xe4\x93\x02K\"F/container-registry/v1/repositories/{resource_id}:updateAccessBindings:\x01*\xb2\xd2*<\n#access.UpdateAccessBindingsMetadata\x12\x15google.protobuf.EmptyB\x80\x01\n%yandex.cloud.api.containerregistry.v1ZWgithub.com/yandex-cloud/go-genproto/yandex/cloud/containerregistry/v1;containerregistryb\x06proto3')
,
dependencies=[yandex_dot_cloud_dot_api_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_access_dot_access__pb2.DESCRIPTOR,yandex_dot_cloud_dot_containerregistry_dot_v1_dot_repository__pb2.DESCRIPTOR,yandex_dot_cloud_dot_operation_dot_operation__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_GETREPOSITORYREQUEST = _descriptor.Descriptor(
name='GetRepositoryRequest',
full_name='yandex.cloud.containerregistry.v1.GetRepositoryRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='repository_id', full_name='yandex.cloud.containerregistry.v1.GetRepositoryRequest.repository_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\212\3101\004<=50'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=318,
serialized_end=377,
)
_GETREPOSITORYBYNAMEREQUEST = _descriptor.Descriptor(
name='GetRepositoryByNameRequest',
full_name='yandex.cloud.containerregistry.v1.GetRepositoryByNameRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='repository_name', full_name='yandex.cloud.containerregistry.v1.GetRepositoryByNameRequest.repository_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\350\3071\001\362\3071>[a-z0-9]+(?:[._-][a-z0-9]+)*(/([a-z0-9]+(?:[._-][a-z0-9]+)*))*'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=379,
serialized_end=504,
)
_LISTREPOSITORIESREQUEST = _descriptor.Descriptor(
name='ListRepositoriesRequest',
full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='registry_id', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.registry_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='folder_id', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.folder_id', index=1,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\004<=50'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.page_size', index=2,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\372\3071\006<=1000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.page_token', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\005<=100'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.filter', index=4,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\006<=1000'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='order_by', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesRequest.order_by', index=5,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\212\3101\005<=100'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=507,
serialized_end=711,
)
_LISTREPOSITORIESRESPONSE = _descriptor.Descriptor(
name='ListRepositoriesResponse',
full_name='yandex.cloud.containerregistry.v1.ListRepositoriesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='repositories', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesResponse.repositories', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='yandex.cloud.containerregistry.v1.ListRepositoriesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=713,
serialized_end=833,
)
_LISTREPOSITORIESRESPONSE.fields_by_name['repositories'].message_type = yandex_dot_cloud_dot_containerregistry_dot_v1_dot_repository__pb2._REPOSITORY
DESCRIPTOR.message_types_by_name['GetRepositoryRequest'] = _GETREPOSITORYREQUEST
DESCRIPTOR.message_types_by_name['GetRepositoryByNameRequest'] = _GETREPOSITORYBYNAMEREQUEST
DESCRIPTOR.message_types_by_name['ListRepositoriesRequest'] = _LISTREPOSITORIESREQUEST
DESCRIPTOR.message_types_by_name['ListRepositoriesResponse'] = _LISTREPOSITORIESRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetRepositoryRequest = _reflection.GeneratedProtocolMessageType('GetRepositoryRequest', (_message.Message,), {
'DESCRIPTOR' : _GETREPOSITORYREQUEST,
'__module__' : 'yandex.cloud.containerregistry.v1.repository_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.containerregistry.v1.GetRepositoryRequest)
})
_sym_db.RegisterMessage(GetRepositoryRequest)
GetRepositoryByNameRequest = _reflection.GeneratedProtocolMessageType('GetRepositoryByNameRequest', (_message.Message,), {
'DESCRIPTOR' : _GETREPOSITORYBYNAMEREQUEST,
'__module__' : 'yandex.cloud.containerregistry.v1.repository_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.containerregistry.v1.GetRepositoryByNameRequest)
})
_sym_db.RegisterMessage(GetRepositoryByNameRequest)
ListRepositoriesRequest = _reflection.GeneratedProtocolMessageType('ListRepositoriesRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTREPOSITORIESREQUEST,
'__module__' : 'yandex.cloud.containerregistry.v1.repository_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.containerregistry.v1.ListRepositoriesRequest)
})
_sym_db.RegisterMessage(ListRepositoriesRequest)
ListRepositoriesResponse = _reflection.GeneratedProtocolMessageType('ListRepositoriesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTREPOSITORIESRESPONSE,
'__module__' : 'yandex.cloud.containerregistry.v1.repository_service_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.containerregistry.v1.ListRepositoriesResponse)
})
_sym_db.RegisterMessage(ListRepositoriesResponse)
DESCRIPTOR._options = None
_GETREPOSITORYREQUEST.fields_by_name['repository_id']._options = None
_GETREPOSITORYBYNAMEREQUEST.fields_by_name['repository_name']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['registry_id']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['folder_id']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['page_size']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['page_token']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['filter']._options = None
_LISTREPOSITORIESREQUEST.fields_by_name['order_by']._options = None
_REPOSITORYSERVICE = _descriptor.ServiceDescriptor(
name='RepositoryService',
full_name='yandex.cloud.containerregistry.v1.RepositoryService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=836,
serialized_end=2028,
methods=[
_descriptor.MethodDescriptor(
name='Get',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.Get',
index=0,
containing_service=None,
input_type=_GETREPOSITORYREQUEST,
output_type=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_repository__pb2._REPOSITORY,
serialized_options=_b('\202\323\344\223\0025\0223/container-registry/v1/repositories/{repository_id}'),
),
_descriptor.MethodDescriptor(
name='GetByName',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.GetByName',
index=1,
containing_service=None,
input_type=_GETREPOSITORYBYNAMEREQUEST,
output_type=yandex_dot_cloud_dot_containerregistry_dot_v1_dot_repository__pb2._REPOSITORY,
serialized_options=None,
),
_descriptor.MethodDescriptor(
name='List',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.List',
index=2,
containing_service=None,
input_type=_LISTREPOSITORIESREQUEST,
output_type=_LISTREPOSITORIESRESPONSE,
serialized_options=_b('\202\323\344\223\002%\022#/container-registry/v1/repositories'),
),
_descriptor.MethodDescriptor(
name='ListAccessBindings',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.ListAccessBindings',
index=3,
containing_service=None,
input_type=yandex_dot_cloud_dot_access_dot_access__pb2._LISTACCESSBINDINGSREQUEST,
output_type=yandex_dot_cloud_dot_access_dot_access__pb2._LISTACCESSBINDINGSRESPONSE,
serialized_options=_b('\202\323\344\223\002F\022D/container-registry/v1/repositories/{resource_id}:listAccessBindings'),
),
_descriptor.MethodDescriptor(
name='SetAccessBindings',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.SetAccessBindings',
index=4,
containing_service=None,
input_type=yandex_dot_cloud_dot_access_dot_access__pb2._SETACCESSBINDINGSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=_b('\202\323\344\223\002H\"C/container-registry/v1/repositories/{resource_id}:setAccessBindings:\001*\262\322*9\n access.SetAccessBindingsMetadata\022\025google.protobuf.Empty'),
),
_descriptor.MethodDescriptor(
name='UpdateAccessBindings',
full_name='yandex.cloud.containerregistry.v1.RepositoryService.UpdateAccessBindings',
index=5,
containing_service=None,
input_type=yandex_dot_cloud_dot_access_dot_access__pb2._UPDATEACCESSBINDINGSREQUEST,
output_type=yandex_dot_cloud_dot_operation_dot_operation__pb2._OPERATION,
serialized_options=_b('\202\323\344\223\002K\"F/container-registry/v1/repositories/{resource_id}:updateAccessBindings:\001*\262\322*<\n#access.UpdateAccessBindingsMetadata\022\025google.protobuf.Empty'),
),
])
_sym_db.RegisterServiceDescriptor(_REPOSITORYSERVICE)
DESCRIPTOR.services_by_name['RepositoryService'] = _REPOSITORYSERVICE
# @@protoc_insertion_point(module_scope)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
331,
392,
1069,
14,
17721,
14,
34924,
2301,
4592,
14,
85,
16,
14,
260,
... | 2.456487 | 6,837 |
# coding: utf8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import json
import os
import os.path as osp
import numpy as np
import PIL.Image
from gray2pseudo_color import get_color_map_list
from labelme2seg import shape2label
if __name__ == '__main__':
args = parse_args()
main(args)
| [
2,
19617,
25,
3384,
69,
23,
198,
2,
15069,
357,
66,
8,
13130,
350,
37382,
47,
37382,
46665,
13,
1439,
6923,
12224,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
... | 3.458955 | 268 |
import os
from os.path import join
from django import forms
from django.conf import settings
from farm_food_project.product.models import Product
| [
11748,
28686,
198,
6738,
28686,
13,
6978,
1330,
4654,
198,
198,
6738,
42625,
14208,
1330,
5107,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
5318,
62,
19425,
62,
16302,
13,
11167,
13,
27530,
1330,
8721,
628
] | 3.820513 | 39 |
"""Module describing the planemo ``profile_create`` command."""
from __future__ import print_function
import click
from planemo import options
from planemo.cli import command_function
from planemo.galaxy import profiles
@click.command('profile_create')
@options.profile_name_argument()
@options.profile_database_options()
@options.serve_engine_option()
@options.docker_config_options()
@options.galaxy_url_option()
@options.galaxy_user_key_option()
@options.galaxy_admin_key_option()
@command_function
def cli(ctx, profile_name, **kwds):
"""Create a profile."""
profiles.create_profile(ctx, profile_name, **kwds)
print("Profile [%s] created." % profile_name)
| [
37811,
26796,
12059,
262,
1410,
41903,
7559,
13317,
62,
17953,
15506,
3141,
526,
15931,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
11748,
3904,
198,
198,
6738,
1410,
41903,
1330,
3689,
198,
6738,
1410,
41903,
13,
44506,... | 3.199052 | 211 |
n=int(input());a=set(int(i) for i in input().split());x=int(input());b=[int(i) for i in input().split()];r=""
for i in range(x):
if b[i] in a:
r+="1\n"
else:
r+="0\n"
print(r,end="")
| [
77,
28,
600,
7,
15414,
35430,
64,
28,
2617,
7,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
35430,
87,
28,
600,
7,
15414,
35430,
65,
41888,
600,
7,
72,
8,
329,
1312,
287,
5128,
22446,
35312,
3419,
11208,
81,
33151,
198,
16... | 1.916667 | 108 |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: name.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from . import common_pb2 as common__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='name.proto',
package='edg.name',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\nname.proto\x12\x08\x65\x64g.name\x1a\x0c\x63ommon.proto\"M\n\tNamespace\x12\x0f\n\x05\x62\x61sic\x18\x01 \x01(\tH\x00\x12\"\n\x04meta\x18\x7f \x01(\x0b\x32\x14.edg.common.MetadataB\x0b\n\tnamespace\"?\n\x0bLibraryName\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\"\n\x04meta\x18\x7f \x01(\x0b\x32\x14.edg.common.Metadatab\x06proto3')
,
dependencies=[common__pb2.DESCRIPTOR,])
_NAMESPACE = _descriptor.Descriptor(
name='Namespace',
full_name='edg.name.Namespace',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='basic', full_name='edg.name.Namespace.basic', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meta', full_name='edg.name.Namespace.meta', index=1,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='namespace', full_name='edg.name.Namespace.namespace',
index=0, containing_type=None, fields=[]),
],
serialized_start=38,
serialized_end=115,
)
_LIBRARYNAME = _descriptor.Descriptor(
name='LibraryName',
full_name='edg.name.LibraryName',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='edg.name.LibraryName.name', index=0,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='meta', full_name='edg.name.LibraryName.meta', index=1,
number=127, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=117,
serialized_end=180,
)
_NAMESPACE.fields_by_name['meta'].message_type = common__pb2._METADATA
_NAMESPACE.oneofs_by_name['namespace'].fields.append(
_NAMESPACE.fields_by_name['basic'])
_NAMESPACE.fields_by_name['basic'].containing_oneof = _NAMESPACE.oneofs_by_name['namespace']
_LIBRARYNAME.fields_by_name['meta'].message_type = common__pb2._METADATA
DESCRIPTOR.message_types_by_name['Namespace'] = _NAMESPACE
DESCRIPTOR.message_types_by_name['LibraryName'] = _LIBRARYNAME
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Namespace = _reflection.GeneratedProtocolMessageType('Namespace', (_message.Message,), dict(
DESCRIPTOR = _NAMESPACE,
__module__ = 'name_pb2'
# @@protoc_insertion_point(class_scope:edg.name.Namespace)
))
_sym_db.RegisterMessage(Namespace)
LibraryName = _reflection.GeneratedProtocolMessageType('LibraryName', (_message.Message,), dict(
DESCRIPTOR = _LIBRARYNAME,
__module__ = 'name_pb2'
# @@protoc_insertion_point(class_scope:edg.name.LibraryName)
))
_sym_db.RegisterMessage(LibraryName)
# @@protoc_insertion_point(module_scope)
| [
2,
2980,
515,
416,
262,
8435,
11876,
17050,
13,
220,
8410,
5626,
48483,
0,
198,
2,
2723,
25,
1438,
13,
1676,
1462,
198,
198,
11748,
25064,
198,
62,
65,
28,
17597,
13,
9641,
62,
10951,
58,
15,
60,
27,
18,
290,
357,
50033,
2124,
2... | 2.445235 | 1,826 |
from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import sys
import os
# arguments
if (len(sys.argv)>1) :
key=str(sys.argv[2])
else :
key='Default'
print("Generating LUT files for GCT key %s" % (key))
if (not ("TNS_ADMIN" in os.environ.keys())):
print("Please set TNS_ADMIN using :")
print("export TNS_ADMIN=/nfshome0/popcondev/conddb")
# CMSSW config
process = cms.Process("GctLUTGen")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.MessageLogger.cout.enable = cms.untracked.bool(True)
process.MessageLogger.cout.threshold = cms.untracked.string('DEBUG')
process.MessageLogger.debugModules = cms.untracked.vstring('l1GctConfigDump')
# Generate dummy L1TriggerKeyList
process.load("CondTools.L1Trigger.L1TriggerKeyListDummy_cff")
# get
process.load("CondTools.L1Trigger.L1TriggerKeyDummy_cff")
process.L1TriggerKeyDummy.objectKeys = cms.VPSet()
process.L1TriggerKeyDummy.label = cms.string('SubsystemKeysOnly')
# xxxKey = csctfKey, dttfKey, rpcKey, gmtKey, rctKey, gctKey, gtKey, or tsp0Key
process.L1TriggerKeyDummy.gctKey = cms.string(key)
# Subclass of L1ObjectKeysOnlineProdBase.
process.load("L1TriggerConfig.GctConfigProducers.L1GctTSCObjectKeysOnline_cfi")
process.L1GctTSCObjectKeysOnline.subsystemLabel = cms.string('')
# Get configuration data from OMDS. This is the subclass of L1ConfigOnlineProdBase.
process.load("L1TriggerConfig.GctConfigProducers.L1GctJetFinderParamsOnline_cfi")
process.load("L1TriggerConfig.L1ScalesProducers.L1JetEtScaleOnline_cfi")
process.load("L1TriggerConfig.L1ScalesProducers.L1HfRingEtScaleOnline_cfi")
process.load("L1TriggerConfig.L1ScalesProducers.L1HtMissScaleOnline_cfi")
#process.load("L1TriggerConfig.GctConfigProducers.L1GctChannelMaskOnline_cfi")
from CondTools.L1Trigger.L1CondDBPayloadWriter_cff import initPayloadWriter
initPayloadWriter( process )
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.source = cms.Source("EmptyIOVSource",
timetype = cms.string('runnumber'),
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
interval = cms.uint64(1)
)
# LUT printer
process.load("L1Trigger.GlobalCaloTrigger.l1GctPrintLuts_cfi")
process.l1GctPrintLuts.jetRanksFilename = cms.untracked.string("GctJetLUT_"+key+".txt")
process.l1GctPrintLuts.hfSumLutFilename = cms.untracked.string("GctHFSumLUT_"+key+".txt")
process.l1GctPrintLuts.htMissLutFilename = cms.untracked.string("GctHtMissLUT_"+key+".txt")
process.p = cms.Path(
process.l1GctPrintLuts
)
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
11748,
25064,
198,
11748,
28686,
198,
198,
2,
7159,
198,
361,
357,
11925,
7,
17597,
13,
853,
85,
8,
29,
16... | 2.544 | 1,000 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from . import _utilities, _tables
from . import outputs
from ._enums import *
__all__ = [
'DeviceIpAddress',
'DeviceNetwork',
'DevicePort',
'ProjectBgpConfig',
'SpotMarketRequestInstanceParameters',
'VolumeAttachment',
'VolumeSnapshotPolicy',
'GetDeviceBgpNeighborsBgpNeighborResult',
'GetDeviceBgpNeighborsBgpNeighborRoutesInResult',
'GetDeviceBgpNeighborsBgpNeighborRoutesOutResult',
'GetDeviceNetworkResult',
'GetDevicePortResult',
'GetProjectBgpConfigResult',
'GetVolumeSnapshotPolicyResult',
]
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
@pulumi.output_type
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
17202,
39410,
25,
428,
2393,
373,
7560,
416,
262,
21624,
12994,
24118,
687,
10290,
357,
27110,
5235,
8,
16984,
13,
17202,
198,
2,
17202,
2141,
407,
4370,
416,
1021,
4556,
345,
821,
1728,
345,
760... | 2.764286 | 420 |
from ..storage import source_utils
from ..storage.caching import cache
from .. import config
from .storage import FeatureConstructor
from ..storage import dataframe
from IPython.display import display
from glob import glob
import os
def preview(df, sizes=(2, 4, 6)):
"""
Applies function to heads of particular dataframe.
Example:
``` python
@preview(df, sizes=[5, 15])
def make_ohe_pclass(df):
...
```
"""
return __preview
def register(*args, cache_default=True):
"""
Registers function for further caching its calls and restoring source.
Example:
``` python
@register
def make_ohe_pclass(df):
...
```
"""
if args:
function = args[0]
return __register(function)
else:
return __register
def deregister(name, force=False):
"""
Deletes sources and cached calls of a certain function.
Usage:
``` python
deregister('make_new_features')
deregister('make_new_features', force=True)
```
"""
confirmation = ''
fc_name = name + '_fc'
df_names = [df_name for df_name in cache.cached_dfs() if df_name.startswith(name + '__')]
if not force:
print("Are you sure you want to delete all these cached files?")
if fc_name in cache.cached_objs():
print(fc_name)
for df_name in df_names:
print(df_name)
print("To confirm please print full name of the function:")
confirmation = input()
if not force and confirmation != name:
print("Doesn't match")
return
if fc_name in cache.cached_objs():
print(f'removing {fc_name}')
cache.remove_obj(fc_name)
for df_name in df_names:
print(f'removing {df_name}')
cache.remove_df(df_name)
def dropper(function):
"""
Registers function that won't be cached.
Is recommended to be used only with functions which actually drop columns or rows and don't produce any new data.
Example:
``` python
@dropper
def drop_pclass(df):
return stl.column_dropper(['Pclass'])(df)
```
"""
# TODO:
# if cache.is_cached(function.__name__):
# print('Dropper is already registered. Deregistering: ')
# deregister(function.__name__, force=True)
deregister(function.__name__, force=True)
return register(function, cache_default=False)
def selector(function):
"""
Registers function that won't be cached.
Is recommended to be used only with functions which actually select columns or rows and don't produce any new data.
Example:
``` python
@selector
def select_pclass_cabin(df):
return stl.column_selector(['Pclass', 'Cabin'])(df)
```
"""
deregister(function.__name__, force=True)
return register(function, cache_default=False)
def helper(func):
"""
Save function as helper to store its source
and be able to define it in any notebook with kts.helpers.define_in_scope()
:param func: function
:return: function with .source method
"""
assert '__name__' in dir(func), 'Helper should have a name'
func.source = source_utils.get_source(func)
if func.__name__ + '_helper' in cache.cached_objs():
cache.remove_obj(func.__name__ + '_helper')
cache.cache_obj(func, func.__name__ + '_helper')
return func
| [
6738,
11485,
35350,
1330,
2723,
62,
26791,
198,
6738,
11485,
35350,
13,
66,
8103,
1330,
12940,
198,
6738,
11485,
1330,
4566,
198,
6738,
764,
35350,
1330,
27018,
42316,
273,
198,
6738,
11485,
35350,
1330,
1366,
14535,
198,
6738,
6101,
7535... | 2.54437 | 1,341 |
import FWCore.ParameterSet.Config as cms
process = cms.Process("Test")
process.load("Configuration.StandardSequences.Services_cff")
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.hcalPulseContainmentTest = cms.EDAnalyzer("HcalPulseContainmentTest")
process.p1 = cms.Path(process.hcalPulseContainmentTest)
| [
11748,
48849,
14055,
13,
36301,
7248,
13,
16934,
355,
269,
907,
198,
198,
14681,
796,
269,
907,
13,
18709,
7203,
14402,
4943,
198,
198,
14681,
13,
2220,
7203,
38149,
13,
23615,
44015,
3007,
13,
31007,
62,
66,
487,
4943,
198,
14681,
13... | 2.919753 | 162 |
import logging
import random
from common.timing_wrapper import TimingWrapper
from locust import User, task, between
from common.workload_config import workload_config
""" A simple workload that selects single rows
""" | [
11748,
18931,
198,
11748,
4738,
198,
6738,
2219,
13,
16514,
278,
62,
48553,
1330,
5045,
278,
36918,
2848,
198,
6738,
1179,
436,
1330,
11787,
11,
4876,
11,
1022,
198,
6738,
2219,
13,
1818,
2220,
62,
11250,
1330,
26211,
62,
11250,
198,
... | 4.192308 | 52 |
#!/usr/bin/env python
import math
import numpy as np
from xform import *
if __name__ == '__main__':
test_axis_angle_conversion()
test_quat_conversion()
test_euler_conversion()
test_dcm_conversion()
test_rot_shift_axis_angle()
test_rot_shift_quat()
test_rot_shift_euler()
test_intra_euler()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
2124,
687,
1330,
1635,
628,
628,
628,
628,
628,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
19... | 2.342657 | 143 |
from rest_framework import serializers
from .models import Photo
| [
6738,
1334,
62,
30604,
1330,
11389,
11341,
198,
198,
6738,
764,
27530,
1330,
5555,
628,
628,
628
] | 4.176471 | 17 |
import saopy.model
from saopy.model import ssn___Accuracy as Accuracy
from saopy.model import ssn___BatteryLifetime as BatteryLifetime
from saopy.model import ssn___Condition as Condition
from saopy.model import ssn___Deployment as Deployment
from saopy.model import ssn___DeploymentRelatedProcess as DeploymentRelatedProcess
from saopy.model import ssn___DetectionLimit as DetectionLimit
from saopy.model import ssn___Device as Device
from saopy.model import ssn___Drift as Drift
from saopy.model import ssn___FeatureOfInterest as FeatureOfInterest
from saopy.model import ssn___Frequency as Frequency
from saopy.model import ssn___Input as Input
from saopy.model import ssn___Latency as Latency
from saopy.model import ssn___MaintenanceSchedule as MaintenanceSchedule
from saopy.model import ssn___MeasurementCapability as MeasurementCapability
from saopy.model import ssn___MeasurementProperty as MeasurementProperty
from saopy.model import ssn___MeasurementRange as MeasurementRange
from saopy.model import ssn___Observation as Observation
from saopy.model import ssn___ObservationValue as ObservationValue
from saopy.model import ssn___OperatingPowerRange as OperatingPowerRange
from saopy.model import ssn___OperatingProperty as OperatingProperty
from saopy.model import ssn___OperatingRange as OperatingRange
from saopy.model import ssn___Output as Output
from saopy.model import ssn___Platform as Platform
from saopy.model import ssn___Precision as Precision
from saopy.model import ssn___Process as Process
from saopy.model import ssn___Property as Property
from saopy.model import ssn___Resolution as Resolution
from saopy.model import ssn___ResponseTime as ResponseTime
from saopy.model import ssn___Selectivity as Selectivity
from saopy.model import ssn___Sensing as Sensing
from saopy.model import ssn___SensingDevice as SensingDevice
from saopy.model import ssn___Sensitivity as Sensitivity
from saopy.model import ssn___Sensor as Sensor
from saopy.model import ssn___SensorDataSheet as SensorDataSheet
from saopy.model import ssn___SensorInput as SensorInput
from saopy.model import ssn___SensorOutput as SensorOutput
from saopy.model import ssn___Stimulus as Stimulus
from saopy.model import ssn___SurvivalProperty as SurvivalProperty
from saopy.model import ssn___SurvivalRange as SurvivalRange
from saopy.model import ssn___System as System
from saopy.model import ssn___SystemLifetime as SystemLifetime
| [
11748,
473,
11081,
13,
19849,
198,
198,
6738,
473,
11081,
13,
19849,
1330,
264,
16184,
17569,
17320,
23843,
355,
33222,
198,
6738,
473,
11081,
13,
19849,
1330,
264,
16184,
17569,
47006,
43,
361,
8079,
355,
23490,
43,
361,
8079,
198,
673... | 3.780031 | 641 |
from datetime import timedelta
from django.core.validators import MinValueValidator
from django.db import models
from django.contrib.auth.models import User
| [
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
12102,
2024,
1330,
1855,
11395,
47139,
1352,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
... | 3.586957 | 46 |
from functools import reduce
from typing import Callable, Dict, List, Tuple
constraints, my_ticket, nearby_tickets = parse('16.in')
valid_nearby_tickets = []
invalid_number_per_ticket = []
for nearby_ticket in nearby_tickets:
invalid_ticket = False
for number in nearby_ticket:
at_least_one_is_valid = False
for constraint in constraints:
res = evaluate_constraint(number, constraints[constraint])
at_least_one_is_valid = at_least_one_is_valid or res
if not at_least_one_is_valid:
invalid_ticket = True
invalid_number_per_ticket.append(number)
break
if not invalid_ticket:
valid_nearby_tickets.append(nearby_ticket)
print(str(sum(invalid_number_per_ticket)))
candidates = find_candidates(valid_nearby_tickets, constraints)
assignment = find_assignment(candidates)
print(str(reduce(lambda x,y: x*y, map(lambda i: my_ticket[i], indices_for_departure_fields(assignment))))) | [
6738,
1257,
310,
10141,
1330,
4646,
198,
6738,
19720,
1330,
4889,
540,
11,
360,
713,
11,
7343,
11,
309,
29291,
628,
628,
628,
628,
628,
628,
198,
198,
1102,
2536,
6003,
11,
616,
62,
43350,
11,
6716,
62,
83,
15970,
796,
21136,
10786,... | 2.458333 | 408 |
import warnings
from typing import Iterable, Optional, Union, Tuple, List
ArraysetsRef = Union['ArraysetDataReader', Iterable['ArraysetDataReader']]
class GroupedAsets(object):
"""Groups hangar arraysets and validate suitability for usage in dataloaders.
It can choose a subset of samples in the hangar arraysets by checking the
list of keys or an index range. :class:`GroupedAsets` does not expect all
the input hangar arraysets to have same length and same keys. It takes a
`set.union` of sample names from all the arraysets and `keys` argument if
passed and hence discard non-common keys while fetching. Based on `keys` or
`index_range` (ignore `index_range` if `keys` is present) it makes a subset
of sample names which is then used to fetch the data from hangar arraysets.
"""
def get_types(self, converter=None):
"""
Get dtypes of the all the arraysets in the `GroupedAsets`.
Parameters
----------
converter : Callable
A function that takes default dtype (numpy) and convert it to another
format
Returns
-------
A tuple of types
"""
types = []
for aset in self.arrayset_array:
if converter:
print(aset)
types.append(converter(aset.dtype))
else:
types.append(aset.dtype)
return tuple(types)
def get_shapes(self, converter=None):
"""
Get shapes of the all the arraysets in the `GroupedAsets`.
Parameters
----------
converter : Callable
A function that takes default shape (numpy) and convert it to another
format
Returns
-------
A tuple of arrayset shapes
"""
if self.arrayset_array[0].variable_shape:
return None
shapes = []
for aset in self.arrayset_array:
if converter:
shapes.append(converter(aset.shape))
else:
shapes.append(aset.shape)
return tuple(shapes)
@property
| [
11748,
14601,
198,
6738,
19720,
1330,
40806,
540,
11,
32233,
11,
4479,
11,
309,
29291,
11,
7343,
628,
198,
3163,
20477,
1039,
8134,
796,
4479,
17816,
3163,
20477,
316,
6601,
33634,
3256,
40806,
540,
17816,
3163,
20477,
316,
6601,
33634,
... | 2.420273 | 878 |
import matplotlib.pyplot as plt
import torch
import torchvision
import numpy as np
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from torch.utils.data.sampler import SubsetRandomSampler
import warnings
warnings.filterwarnings('ignore')
DEBUG = False
BATCH_SIZE = 64
NUM_CLASSES = 39
data_dir = './images'
trainloader, testloader = load_split_train_test(data_dir, .2)
print(trainloader.dataset.classes)
print(len(trainloader.dataset.classes))
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
cnn = SimpleCNN()
cnn = cnn.to(device)
print(len(trainloader.dataset.classes))
loss, optimizer = createLossAndOptimizer(cnn)
train_network(trainloader, loss, optimizer, device, cnn, 35)
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
output = cnn(images)
_, predicted = torch.max(output.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network is %d %%' % (100 * correct / total))
class_correct = list(0. for i in range(NUM_CLASSES))
class_total = list(0. for i in range(NUM_CLASSES))
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = cnn(images)
_, predicted = torch.max(outputs, 1)
c = (predicted == labels).squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
for i in range(NUM_CLASSES):
print('Accuracy of %10s : %2d %%' % (
trainloader.dataset.classes[i], 100 * class_correct[i] / class_total[i]))
torch.save(cnn.state_dict(), 'last_cnn_dict.pt')
torch.save(cnn, 'last_cnn.pt')
example = torch.rand(1, 3, 256, 256)
ex = example.to(device)
traced_script_module = torch.jit.trace(cnn, ex)
traced_script_module.save('last_jit_model.pt')
| [
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
11748,
28034,
198,
11748,
28034,
10178,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28034,
1330,
299,
77,
198,
6738,
28034,
1330,
6436,
198,
11748,
28034,
13,
20471,
13,
4... | 2.546914 | 810 |
from typing import List | [
6738,
19720,
1330,
7343
] | 5.75 | 4 |
from pathlib import Path
import os
from PIL import Image, ImageFont
import numpy as np
| [
6738,
3108,
8019,
1330,
10644,
198,
11748,
28686,
198,
6738,
350,
4146,
1330,
7412,
11,
7412,
23252,
198,
11748,
299,
32152,
355,
45941,
628,
198
] | 3.56 | 25 |
from SortBase import SortBase
| [
6738,
33947,
14881,
1330,
33947,
14881,
628
] | 4.428571 | 7 |
from pytest_check import check
import numpy as np
import firedrake
import firedrake_adjoint
import ufl
import fdm
from fecr import evaluate_primal, evaluate_pullback, evaluate_pushforward
mesh = firedrake.UnitSquareMesh(3, 2)
V = firedrake.FunctionSpace(mesh, "P", 1)
templates = (firedrake.Function(V), firedrake.Constant(0.0), firedrake.Constant(0.0))
inputs = (np.ones(V.dim()), np.ones(1) * 0.5, np.ones(1) * 0.6)
ff = lambda *args: evaluate_primal(assemble_firedrake, templates, *args)[ # noqa: E731
0
]
ff0 = lambda x: ff(x, inputs[1], inputs[2]) # noqa: E731
ff1 = lambda y: ff(inputs[0], y, inputs[2]) # noqa: E731
ff2 = lambda z: ff(inputs[0], inputs[1], z) # noqa: E731
| [
6738,
12972,
9288,
62,
9122,
1330,
2198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
11748,
6294,
33788,
198,
11748,
6294,
33788,
62,
41255,
1563,
198,
11748,
334,
2704,
198,
198,
11748,
277,
36020,
198,
198,
6738,
27685,
81,
1330,
13... | 2.496429 | 280 |
# coding: utf-8
from PIL import Image
import sys
import pyocr
import pyocr.builders
import os
from tqdm import trange
# Set Netflix subtitle image directory.
img_dir = '../data/planetes/PLANETES.S01E02.WEBRip.Netflix/'
#img_dir = '../data/'
# Get a tool.
tools = pyocr.get_available_tools()
if len(tools) == 0:
print("No OCR tool found")
sys.exit(1)
tool = tools[0]
print("Will use tool '%s'" % (tool.get_name()))
langs = tool.get_available_languages()
print("Available languages: %s" % ", ".join(langs))
# Image to string.
txts = []
img_names = [f for f in os.listdir(img_dir) if f.split('.')[-1].lower() in ('png')]
img_names = sorted(img_names)
print(img_names)
for i in trange(len(img_names), desc='img 2 str'):
txt = tool.image_to_string(
Image.open(img_dir+img_names[i]),
lang='jpn',
builder=pyocr.builders.TextBuilder()
)
print(txt)
txts.append(txt)
# Save the subtitle.
subs = open(img_dir+'subs.txt', 'w')
subs.write('\n'.join(txts))
subs.close()
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
6738,
350,
4146,
1330,
7412,
198,
11748,
25064,
198,
11748,
12972,
1696,
198,
11748,
12972,
1696,
13,
50034,
198,
11748,
28686,
198,
6738,
256,
80,
36020,
1330,
491,
858,
198,
198,
2,
5345,
12074,
... | 2.494872 | 390 |
from unittest import TestCase
import numpy as np
from pandas_ml_utils.utils.functions import unfold_parameter_space
from pandas_ml_utils.utils.classes import ReScaler
| [
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
19798,
292,
62,
4029,
62,
26791,
13,
26791,
13,
12543,
2733,
1330,
16631,
62,
17143,
2357,
62,
13200,
198,
6738,
19798,
292,
62,
4029,
62... | 3.269231 | 52 |
from invenio_db import db
from scoap3.utils.nations import find_country
from scoap3.modules.analysis.models import CountryCache
| [
6738,
287,
574,
952,
62,
9945,
1330,
20613,
198,
198,
6738,
629,
78,
499,
18,
13,
26791,
13,
77,
602,
1330,
1064,
62,
19315,
198,
6738,
629,
78,
499,
18,
13,
18170,
13,
20930,
13,
27530,
1330,
12946,
30562,
628,
198
] | 3.195122 | 41 |
"""
Created on 22.09.2009
@author: alen
"""
import uuid
# from oauth import oauth # not used
from django.conf import settings
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.utils.translation import gettext as _
# from django.utils.hashcompat import md5_constructor # not used
from django.http import HttpResponseRedirect#, HttpResponse
from django.contrib.auth.models import User
from django.contrib.auth import login, authenticate, logout as auth_logout
from django.contrib.sites.models import Site
from socialregistration.forms import UserForm
from socialregistration.utils import (OAuthClient, OAuthTwitter, #OAuthFriendFeed,
OpenID)
from socialregistration.models import FacebookProfile, TwitterProfile, OpenIDProfile
FB_ERROR = _('We couldn\'t validate your Facebook credentials')
def _get_next(request):
"""
Returns a url to redirect to after the login
"""
if 'next' in request.session:
next = request.session['next']
del request.session['next']
return next
elif 'next' in request.GET:
return request.GET.get('next')
elif 'next' in request.POST:
return request.POST.get('next')
else:
return getattr(settings, 'LOGIN_REDIRECT_URL', '/')
def setup(request, template='socialregistration/setup.html',
form_class=UserForm, extra_context=dict()):
"""
Setup view to create a username & set email address after authentication
"""
if not getattr(settings, 'SOCIAL_GENERATE_USERNAME', False):
# User can pick own username
if not request.method == "POST":
form = form_class(
request.session['socialregistration_user'],
request.session['socialregistration_profile'],
)
else:
form = form_class(
request.session['socialregistration_user'],
request.session['socialregistration_profile'],
request.POST
)
if form.is_valid():
form.save()
user = form.profile.authenticate()
login(request, user)
del request.session['socialregistration_user']
del request.session['socialregistration_profile']
return HttpResponseRedirect(_get_next(request))
extra_context.update(dict(form=form))
return render_to_response(
template,
extra_context,
context_instance=RequestContext(request)
)
else:
# Generate user and profile
user = request.session['socialregistration_user']
user.username = str(uuid.uuid4())[:30]
user.save()
profile = request.session['socialregistration_profile']
profile.user = user
profile.save()
# Authenticate and login
user = profile.authenticate()
login(request, user)
# Clear & Redirect
del request.session['socialregistration_user']
del request.session['socialregistration_profile']
return HttpResponseRedirect(getattr(settings, 'LOGIN_REDIRECT_URL', _get_next(request)))
def facebook_login(request, template='socialregistration/facebook.html',
extra_context=dict()):
"""
View to handle the Facebook login
"""
if not request.facebook.check_session(request):
extra_context.update(
dict(error=FB_ERROR)
)
return render_to_response(
template, extra_context, context_instance=RequestContext(request)
)
user = authenticate(uid=request.facebook.uid)
if user is None:
request.session['socialregistration_user'] = User()
fb_profile = request.facebook.users.getInfo([request.facebook.uid], ['name', 'pic_square'])[0]
request.session['socialregistration_profile'] = FacebookProfile(
uid=request.facebook.uid,
)
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
login(request, user)
return HttpResponseRedirect(getattr(settings, 'LOGIN_REDIRECT_URL', _get_next(request)))
def facebook_connect(request, template='socialregistration/facebook.html',
extra_context=dict()):
"""
View to handle connecting existing accounts with facebook
"""
if not request.facebook.check_session(request) \
or not request.user.is_authenticated():
extra_context.update(
dict(error=FB_ERROR)
)
return render_to_response(
template,
extra_context,
context_dict=RequestContext(request)
)
profile, created = FacebookProfile.objects.get_or_create(
user=request.user, uid=request.facebook.uid
)
return HttpResponseRedirect(_get_next(request))
def logout(request, redirect_url=None):
"""
Logs the user out of facebook and django.
"""
auth_logout(request)
if getattr(request,'facebook',False):
request.facebook.session_key = None
request.facebook.uid = None
url = getattr(settings,'LOGOUT_REDIRECT_URL',redirect_url) or '/'
return HttpResponseRedirect(url)
def twitter(request):
"""
Actually setup/login an account relating to a twitter user after the oauth
process is finished successfully
"""
client = OAuthTwitter(
request, settings.TWITTER_CONSUMER_KEY,
settings.TWITTER_CONSUMER_SECRET_KEY,
settings.TWITTER_REQUEST_TOKEN_URL,
)
user_info = client.get_user_info()
user = authenticate(twitter_id=user_info['id'])
if user is None:
profile = TwitterProfile(twitter_id=user_info['id'],
)
user = User()
request.session['socialregistration_profile'] = profile
request.session['socialregistration_user'] = user
request.session['next'] = _get_next(request)
return HttpResponseRedirect(reverse('socialregistration_setup'))
login(request, user)
return HttpResponseRedirect(getattr(settings, 'LOGIN_REDIRECT_URL', _get_next(request)))
def friendfeed(request):
"""
Actually setup an account relating to a friendfeed user after the oauth process
is finished successfully
"""
raise NotImplementedError()
def oauth_redirect(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, parameters=None):
"""
View to handle the OAuth based authentication redirect to the service provider
"""
request.session['next'] = _get_next(request)
client = OAuthClient(request, consumer_key, secret_key,
request_token_url, access_token_url, authorization_url, callback_url, parameters)
return client.get_redirect()
def oauth_callback(request, consumer_key=None, secret_key=None,
request_token_url=None, access_token_url=None, authorization_url=None,
callback_url=None, template='socialregistration/oauthcallback.html',
extra_context=dict(), parameters=None):
"""
View to handle final steps of OAuth based authentication where the user
gets redirected back to from the service provider
"""
client = OAuthClient(request, consumer_key, secret_key, request_token_url,
access_token_url, authorization_url, callback_url, parameters)
extra_context.update(dict(oauth_client=client))
if not client.is_valid():
return render_to_response(
template, extra_context, context_instance=RequestContext(request)
)
# We're redirecting to the setup view for this oauth service
return HttpResponseRedirect(reverse(client.callback_url))
def openid_redirect(request):
"""
Redirect the user to the openid provider
"""
request.session['next'] = _get_next(request)
request.session['openid_provider'] = request.GET.get('openid_provider')
client = OpenID(
request,
'http://%s%s' % (
Site.objects.get_current().domain,
reverse('openid_callback')
),
request.GET.get('openid_provider')
)
return client.get_redirect()
def openid_callback(request, template='socialregistration/openid.html',
extra_context=dict()):
"""
Catches the user when he's redirected back from the provider to our site
"""
client = OpenID(
request,
'http://%s%s' % (
Site.objects.get_current().domain,
reverse('openid_callback')
),
request.session.get('openid_provider')
)
if client.is_valid():
user = authenticate(identity=request.GET.get('openid.claimed_id'))
if user is None:
request.session['socialregistration_user'] = User()
request.session['socialregistration_profile'] = OpenIDProfile(
identity=request.GET.get('openid.claimed_id')
)
return HttpResponseRedirect(reverse('socialregistration_setup'))
else:
login(request, user)
return HttpResponseRedirect(_get_next(request))
return render_to_response(
template,
dict(),
context_instance=RequestContext(request)
)
| [
37811,
198,
41972,
319,
2534,
13,
2931,
13,
10531,
198,
198,
31,
9800,
25,
435,
268,
198,
37811,
198,
11748,
334,
27112,
198,
2,
422,
267,
18439,
1330,
267,
18439,
1303,
407,
973,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
... | 2.571429 | 3,612 |
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pyplot as plt
import glob, os
LOAD_MOTION_DIR = './data/preprocessed/motion/'
LOAD_PROB_DIST = 'prob_dist/'
LOAD_CUMUL_DIST = 'cumul_dist/'
LOAD_COMPACT = 'compact/'
LOAD_VIDEO_DIR = './data/preprocessed/video/'
SAVE_DIR = './data/processed/'
SAVE_TABLE_OPTION = 'table/'
SAVE_GRAPH_OPTION = 'graph/'
SAVE_OPT_DIF = 'difference/'
SAVE_OPT_KLD = 'kullback_leibler_divergence/'
SAVE_OPT_ENT = 'entropy/'
SAVE_OPT_OPT = 'optical_flow/'
SAVE_OPT_MOT = 'motion/'
SAVE_OPT_PROB = 'probability/'
SAVE_OPT_CUMUL = 'cumulative/'
SAVE_OPT_20S = '20s/'
SAVE_OPT_AFTER = '20-60s/'
EXT_PNG = '.png'
EXT_JSON = '.json'
EXT_EXCEL = '.xlsx'
optflow_dist_list = load_video_data_dir()
motion_prob_dist_list = load_motion_data_dir(is_compact = True)
motion_cumul_dist_list = load_motion_data_dir(is_cumulative = True, is_compact = True)
kld_prob_list = apply_on_dir(optflow_dist_list, motion_prob_dist_list,
kullback_leibler_divergence,
lambda x, y: (x, y))
kld_cumul_list = apply_on_dir(optflow_dist_list, motion_cumul_dist_list,
kullback_leibler_divergence,
lambda x, y: (x, y))
directory = SAVE_DIR + SAVE_TABLE_OPTION + SAVE_OPT_ENT + SAVE_OPT_OPT
save_dir(optflow_dist_list, to_entropy, directory)
directory = SAVE_DIR + SAVE_GRAPH_OPTION + SAVE_OPT_ENT + SAVE_OPT_OPT
save_fig_dir(optflow_dist_list,
to_entropy,
directory,
'sec',
'entropy',
[0,60])
save_graph = SAVE_DIR + SAVE_GRAPH_OPTION + SAVE_OPT_ENT + SAVE_OPT_MOT + SAVE_OPT_PROB
save_fig_dir(kld_prob_list,
lambda x: x,
save_graph,
'sec',
'entropy')
for optflow in list(zip(*optflow_dist_list)):
plt.cla()
name = optflow[0]
dif = to_difference_of_entropy(optflow[1])
save_2d(dif, name)
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
8019,
198,
6759,
29487,
8019,
13,
1904,
7203,
51,
74,
46384,
4943,
198,
6738,
2603,
29487,
8019,
1330,
12972,
29487,
355,
458,
83,
198,
11748,... | 1.927606 | 1,036 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__author__ = 'Medici.Yan'
#Rsync弱口令检测
import socket,struct,hashlib,base64,time
def initialisation(host,port):
'''
初始化并获得版本信息,每次会话前都要发送版本信息
'''
flag=False
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
rsync={"MagicHeader":"@RSYNCD:","HeaderVersion":" 30.0"}
payload=struct.pack("!8s5ss",rsync["MagicHeader"],rsync["HeaderVersion"],"\n")#init
try:
socket.setdefaulttimeout(20)#超时
s.connect((host,port))#连接对应主机和端口
s.send(payload)
data=s.recv(1024)
reply=struct.unpack('!8s5ss',data)
if len(reply)==3:
flag=True #端口开放
rsynclist=ClientQuery(s) #查询模块名
except Exception :
pass
finally:
s.close()
if flag:
return True,reply[0],reply[1],rsynclist
return False,"port not open"
def ClientQuery(socket_pre):
'''
查询所有的模块名
@return module name
'''
s=socket_pre
payload=struct.pack("!s","\n")#query
modulelist=[]
try:
s.send(payload)
while True:
data=s.recv(1024)#Module List lenth 17
moduletemp=struct.unpack("!"+str(len(data))+"s",data)
modulename=moduletemp[0].replace(" ","").split("\n")
for i in range(len(modulename)):
realname=modulename[i].split("\t")
if realname[0] != "":
modulelist.append(realname[0])
if modulename[-2]=="@RSYNCD:EXIT":
break
except Exception :
pass
return modulelist
if __name__ == '__main__':
from dummy import *
audit(assign('rsync', ('172.18.19.90',873))[1]) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
834,
9800,
834,
796,
705,
39112,
72,
13,
49664,
6,
201,
198,
2,
49,
27261,
28156,
109,
20998,
96,
2001... | 1.862374 | 792 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#- Author : (DEK) Devendra Kavthekar
# program065:
# Write a program to compute:
# f(n)=f(n-1)+100 when n>0
# and f(0)=0
# with a given n input by console (n>0).
# Example:
# If the following n is given as input to the program:
# 5
# Then, the output of the program should be:
# 500
# In case of input data being supplied to the question,# it should be
# assumed to be a console input.
# Hints:
# We can define recursive function in Python.
# import timeit
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
201,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
2,
12,
6434,
1058,
357,
7206,
42,
8,
6245,
48286,
509,
615,
1169,
21070,
201,
198,
201,
198,
2,
1430,
15,
2996,
... | 2.519651 | 229 |
# coding=utf-8
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
from msrest.serialization import Model
class StorageAccountProperties(Model):
"""StorageAccountProperties.
:param provisioning_state: Gets the status of the storage account at the
time the operation was called. Possible values include: 'Creating',
'ResolvingDNS', 'Succeeded'
:type provisioning_state: str or :class:`ProvisioningState
<Petstore.models.ProvisioningState>`
:param account_type: Gets the type of the storage account. Possible
values include: 'Standard_LRS', 'Standard_ZRS', 'Standard_GRS',
'Standard_RAGRS', 'Premium_LRS'
:type account_type: str or :class:`AccountType
<Petstore.models.AccountType>`
:param primary_endpoints: Gets the URLs that are used to perform a
retrieval of a public blob, queue or table object.Note that StandardZRS
and PremiumLRS accounts only return the blob endpoint.
:type primary_endpoints: :class:`Endpoints <Petstore.models.Endpoints>`
:param primary_location: Gets the location of the primary for the storage
account.
:type primary_location: str
:param status_of_primary: Gets the status indicating whether the primary
location of the storage account is available or unavailable. Possible
values include: 'Available', 'Unavailable'
:type status_of_primary: str or :class:`AccountStatus
<Petstore.models.AccountStatus>`
:param last_geo_failover_time: Gets the timestamp of the most recent
instance of a failover to the secondary location. Only the most recent
timestamp is retained. This element is not returned if there has never
been a failover instance. Only available if the accountType is
StandardGRS or StandardRAGRS.
:type last_geo_failover_time: datetime
:param secondary_location: Gets the location of the geo replicated
secondary for the storage account. Only available if the accountType is
StandardGRS or StandardRAGRS.
:type secondary_location: str
:param status_of_secondary: Gets the status indicating whether the
secondary location of the storage account is available or unavailable.
Only available if the accountType is StandardGRS or StandardRAGRS.
Possible values include: 'Available', 'Unavailable'
:type status_of_secondary: str or :class:`AccountStatus
<Petstore.models.AccountStatus>`
:param creation_time: Gets the creation date and time of the storage
account in UTC.
:type creation_time: datetime
:param custom_domain: Gets the user assigned custom domain assigned to
this storage account.
:type custom_domain: :class:`CustomDomain <Petstore.models.CustomDomain>`
:param secondary_endpoints: Gets the URLs that are used to perform a
retrieval of a public blob, queue or table object from the secondary
location of the storage account. Only available if the accountType is
StandardRAGRS.
:type secondary_endpoints: :class:`Endpoints <Petstore.models.Endpoints>`
"""
_attribute_map = {
'provisioning_state': {'key': 'provisioningState', 'type': 'ProvisioningState'},
'account_type': {'key': 'accountType', 'type': 'AccountType'},
'primary_endpoints': {'key': 'primaryEndpoints', 'type': 'Endpoints'},
'primary_location': {'key': 'primaryLocation', 'type': 'str'},
'status_of_primary': {'key': 'statusOfPrimary', 'type': 'AccountStatus'},
'last_geo_failover_time': {'key': 'lastGeoFailoverTime', 'type': 'iso-8601'},
'secondary_location': {'key': 'secondaryLocation', 'type': 'str'},
'status_of_secondary': {'key': 'statusOfSecondary', 'type': 'AccountStatus'},
'creation_time': {'key': 'creationTime', 'type': 'iso-8601'},
'custom_domain': {'key': 'customDomain', 'type': 'CustomDomain'},
'secondary_endpoints': {'key': 'secondaryEndpoints', 'type': 'Endpoints'},
}
| [
2,
19617,
28,
40477,
12,
23,
198,
2,
16529,
35937,
198,
2,
16529,
35937,
198,
198,
6738,
13845,
2118,
13,
46911,
1634,
1330,
9104,
628,
198,
4871,
20514,
30116,
2964,
18200,
7,
17633,
2599,
198,
220,
220,
220,
37227,
31425,
30116,
296... | 3.187994 | 1,266 |
from quicksort_first_pivot import read_input
if __name__ == "__main__":
input_arr = read_input('quicksort.txt')
cmp_cnt = quicksort_last_pivot(input_arr, 0, len(input_arr))
# print(input_arr)
print(cmp_cnt)
| [
6738,
627,
3378,
419,
62,
11085,
62,
79,
45785,
1330,
1100,
62,
15414,
628,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
5128,
62,
3258,
796,
1100,
62,
15414,
10786,
421,
3378,
419,
13,
1411... | 2.306122 | 98 |
# from copy import deepcopy
# from inicheck.tools import get_user_config, check_config, cast_all_variables
#
# from smrf.framework.model_framework import can_i_run_smrf
# from tests.test_configurations import SMRFTestCase
# class TestSpatialMethods(SMRFTestCase):
#
# def test_station_spatial_config(self):
# """
# Test the config for different spatial methods
# """
#
# config = deepcopy(self.base_config)
#
#
# config.raw_cfg['air_temp']['distribution'] = 'dk'
# config.raw_cfg['precip']['distribution'] = 'idw'
#
# # kriging doesn't work with 2 stations, so this will fail
# config.raw_cfg['vapor_pressure']['distribution'] = 'kriging'
# config.raw_cfg['vapor_pressure']['nlags'] = 1
# config.raw_cfg['system']['threading'] = False
#
# # apply the new recipies
# config.apply_recipes()
# config = cast_all_variables(config, config.mcfg)
#
# # test the base run with the config file
# result = can_i_run_smrf(config)
# self.assertFalse(result)
# def test_grid_config(self):
# """
# Test the config for the grid
# """
#
# config = deepcopy(self.base_config)
#
#
# config.raw_cfg['air_temp']['distribution'] = 'grid'
# config.raw_cfg['precip']['distribution'] = 'idw'
#
# # apply the new recipies
# config.apply_recipes()
# config = cast_all_variables(config, config.mcfg)
#
# # test the base run with the config file
# result = can_i_run_smrf(config)
# self.assertTrue(result)
| [
2,
422,
4866,
1330,
2769,
30073,
198,
2,
422,
287,
14234,
694,
13,
31391,
1330,
651,
62,
7220,
62,
11250,
11,
2198,
62,
11250,
11,
3350,
62,
439,
62,
25641,
2977,
198,
2,
198,
2,
422,
895,
41871,
13,
30604,
13,
19849,
62,
30604,
... | 2.226207 | 725 |
"""
Django accounts management made easy.
"""
VERSION = (1, 0, 2)
__version__ = '.'.join((str(each) for each in VERSION[:4]))
def get_version():
"""
Returns string with digit parts only as version.
"""
return '.'.join((str(each) for each in VERSION[:3]))
| [
37811,
198,
35,
73,
14208,
5504,
4542,
925,
2562,
13,
198,
198,
37811,
198,
43717,
796,
357,
16,
11,
657,
11,
362,
8,
198,
198,
834,
9641,
834,
796,
705,
2637,
13,
22179,
19510,
2536,
7,
27379,
8,
329,
1123,
287,
44156,
2849,
58,
... | 2.669903 | 103 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import mysql.connector
import config
dbconfig = {
'host': '127.0.0.1',
'port': 3306,
'database': 'invoicerepo2',
'user': 'invoicerepo',
'password': 'invoicerepo',
'charset': 'utf8',
'use_unicode': True,
'get_warnings': True
}
cnx = mysql.connector.connect(pool_name = 'mypool',
pool_size = config.config['mysql_pool_size'],
**dbconfig)
cur = cnx.cursor()
# 获取最大发票 ID
# 保存发票
# 根据文件 hash 获取发票
# 插入发票
# 根据 ID 更新发票
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
48761,
13,
8443,
273,
198,
198,
11748,
4566,
198,
198,
9945,
11250,
796,
1391,
198,
220,
220,
220,
705,
4... | 1.636364 | 341 |
from __future__ import absolute_import
from __future__ import unicode_literals
__author__ = """Ayush Pallav"""
__email__ = 'ayushpallav@gmail.com'
__version__ = '1.0.3'
| [
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
834,
9800,
834,
796,
37227,
42012,
1530,
39651,
615,
37811,
198,
834,
12888,
834,
796,
705,
323,
1530,
79,
... | 2.803279 | 61 |
# https://www.hackerrank.com/challenges/non-divisible-subset/problem
#!/bin/python3
import math
import os
import random
import re
import sys
#
# Complete the 'nonDivisibleSubset' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER k
# 2. INTEGER_ARRAY s
#
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
s = list(map(int, input().rstrip().split()))
result = nonDivisibleSubset(k, s)
fptr.write(str(result) + '\n')
fptr.close() | [
2,
3740,
1378,
2503,
13,
31153,
8056,
962,
13,
785,
14,
36747,
34120,
14,
13159,
12,
7146,
12843,
12,
7266,
2617,
14,
45573,
198,
198,
2,
48443,
8800,
14,
29412,
18,
198,
198,
11748,
10688,
198,
11748,
28686,
198,
11748,
4738,
198,
... | 2.639535 | 258 |
from operator import itemgetter
original = {1: 2, 2: 3, 3: 1}
ordered = dict(sorted(original.items(), key=itemgetter(1)))
print(ordered)
| [
6738,
10088,
1330,
2378,
1136,
353,
198,
14986,
796,
1391,
16,
25,
362,
11,
362,
25,
513,
11,
513,
25,
352,
92,
198,
24071,
796,
8633,
7,
82,
9741,
7,
14986,
13,
23814,
22784,
1994,
28,
9186,
1136,
353,
7,
16,
22305,
198,
4798,
... | 2.854167 | 48 |
from enum import Enum
| [
6738,
33829,
1330,
2039,
388,
628,
628,
628,
198
] | 3.111111 | 9 |
# -*- coding: utf-8 -*-
"""
Create Time: 6/14/2021 4:16 PM
Author: Zhou
Generate the officials_list from officials.txt
"""
with open("officials.txt", encoding="utf-8") as file:
officials_raw = file.readlines()
officials_renew1 = []
print(officials_raw)
for item in officials_raw:
if "#" in item:
officials = item.split("#")
officials_renew1 += officials
else:
officials_renew1.append(item)
officials_renew2 = [item.strip() for item in officials_renew1]
officials_renew3 = [item for item in officials_renew2 if len(item)>0 ]
officials_renew4 = list(set(officials_renew3))
print(officials_renew4)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
16447,
3862,
25,
718,
14,
1415,
14,
1238,
2481,
604,
25,
1433,
3122,
201,
198,
13838,
25,
32222,
201,
198,
201,
198,
8645,
378,
262,
2828,
62,
... | 2.373665 | 281 |
class Observer(object):
"""docstring for Observer"""
| [
4871,
27058,
7,
15252,
2599,
198,
220,
220,
220,
37227,
15390,
8841,
329,
27058,
37811,
198
] | 3.5625 | 16 |
"""Boxes with integer size."""
# Copyright 2016-2018 by California Institute of Technology
# All rights reserved. Licensed under 3-clause BSD.
#
import logging
import natsort
from omega.logic import syntax as stx
from omega.symbolic.prime import support_issubset
from omega.symbolic.prime import joint_support
from omega.symbolic import _type_hints as tyh
log = logging.getLogger(__name__)
def partial_order(px, fol):
"""Return `u <= p` and `p <= u`."""
ux = {
x: dict(
a=stx._prime_like(d['a']),
b=stx._prime_like(d['b']))
for x, d in px.items()}
varmap = parameter_varmap(ux, px)
u_leq_p = subseteq(varmap, fol)
varmap = parameter_varmap(px, ux)
p_leq_u = subseteq(varmap, fol)
return u_leq_p, p_leq_u
def essential_orthotopes(f, prm, fol):
"""Return essential prime orthotopes of `f`."""
log.info('---- essential orthotopes ----')
p_is_prime = prime_implicants(f, prm, fol)
q_is_prime = fol.let(prm.p_to_q, p_is_prime)
# add to quantify u, v, so that we can rename
x_in_p = x_in_implicant(prm, fol)
x_in_q = fol.let(prm.p_to_q, x_in_p)
# del x_in_p
x = ', '.join(prm._px)
q = ', '.join(prm.q_vars)
s = (
'{p_is_prime} /\ '
r'\E {x}: ( '
' {f} /\ '
r' \A {q}: ( '
' ( '
' {q_is_prime} /\ '
' ~ {p_eq_q} '
' ) '
' => ~ {x_in_q}'
' )'
')').format(
p_is_prime=p_is_prime,
q_is_prime=q_is_prime,
p_eq_q=prm.p_eq_q,
x_in_q=x_in_q,
f=f, x=x, q=q)
r = fol.add_expr(s)
log.info('==== essential orthotopes ====')
return r
def prime_implicants(f, prm, fol):
"""Return dominators of implicants."""
log.info('----- prime orthotopes ----')
assert support_issubset(f, prm.x_vars, fol)
p_is_implicant = _implicant_orthotopes(f, prm, fol)
q_is_implicant = fol.let(prm.p_to_q, p_is_implicant)
r = q_is_implicant & prm.p_leq_q
r = prm.p_eq_q | ~ r
r = fol.forall(prm.q_vars, r)
r &= p_is_implicant
'''
q = ', '.join(prm.q_vars)
s = (
'{p_is_implicant} /\ '
r'\A {q}: ( '
' ({q_is_implicant} /\ {p_leq_q})'
' => {p_eq_q}'
')').format(
p_is_implicant=p_is_implicant,
q_is_implicant=q_is_implicant,
p_leq_q=prm.p_leq_q,
p_eq_q=prm.p_eq_q,
q=prm.q_vars)
r = fol.add_expr(s)
'''
log.info('==== prime orthotopes ====')
return r
def _implicant_orthotopes(f, prm, fol):
"""Return orthotopes that imply `f`.
Caution: `fol` type hints are ignored.
"""
log.info('---- implicant orthotopes ----')
x_vars = prm.x_vars
assert support_issubset(f, x_vars, fol)
x = ', '.join(x_vars)
h = x_in_implicant(prm, fol)
nonempty = _orthotope_nonempty(prm._px, fol)
s = (
'{nonempty} /\ '
'\A {x}: {h} => {f} ').format(
x=x, h=h, f=f, nonempty=nonempty)
r = fol.add_expr(s)
log.info('==== implicant orthotopes ====')
return r
def _embed_as_implicants_naive(f, px, fol):
"""Return product representation of minterms."""
x_as_ab = {x: dict(a=x, b=x) for x in px}
varmap = parameter_varmap(px, x_as_ab)
r = eq(varmap, fol)
return fol.exist(x_as_ab, r & f)
# slower than `_orthotope_singleton`
# needs `from omega.symbolic import fol as _fol`
def _orthotope_singleton_wo_parsing(px, fol):
"""Return BDD that orthotope contains single point."""
a_b = {d['a']: d['b'] for d in px.values()}
bitmap = _fol._refine_renaming(a_b, fol.vars)
bdd = fol.bdd
r = bdd.true
for b1, b2 in bitmap.items():
u = bdd.var(b1)
v = bdd.var(b2)
u = bdd.apply('<=>', u, v)
r &= u
r_ = _orthotope_signleton(px, fol)
assert r == r_, (r, r_)
return r
def _orthotope_singleton(px, fol):
"""Return BDD that orthotope contains single point."""
s = stx.conj(
'({a} = {b})'.format(
a=d['a'], b=d['b'])
for x, d in px.items())
r = fol.add_expr(s)
return r
def _orthotope_nonempty(abx, fol):
"""Return condition that orthotope be non-empty."""
s = stx.conj(
'({a} <= {b})'.format(
a=d['a'], b=d['b'])
for x, d in abx.items())
r = fol.add_expr(s)
return r
def x_in_implicant(prm, fol):
r"""Return `x \in concretization(prm)`."""
px = prm._px
s = stx.conj('''
({a} <= {x})
/\ ({x} <= {b})
'''.format(
x=x, a=d['a'], b=d['b'])
for x, d in px.items())
r = fol.add_expr(s)
return r
def subseteq(varmap, fol):
r"""Return `ab \subseteq uv`.
This is the partial order defined by the subset relation.
In the general formulation `\sqsubseteq`.
"""
s = stx.conj('''
({u} <= {a})
/\ ({b} <= {v})
'''.format(a=a, b=b, u=u, v=v)
for (a, b), (u, v) in varmap.items())
r = fol.add_expr(s)
return r
def eq(varmap, fol):
"""Return `ab = uv`.
The parameterization defines an injective mapping from
parameter assignments to orthotopes. This is why equality
of orthotopes is equivalent to equality of parameter values.
"""
s = stx.conj('''
({a} = {u})
/\ ({b} = {v})
'''.format(a=a, b=b, u=u, v=v)
for (a, b), (u, v) in varmap.items())
r = fol.add_expr(s)
return r
def implicants_intersect(prm, fol):
"""Return `ab \cap uv # \emptyset`.
Equivalent to
\E x: /\ x \in concretization(ab)
/\ x \in concretization(uv)
The representation of orthotopes as products of
intervals allows for a direct construction that
avoids quantification over `x`.
"""
# disjoint intervals in at least one dimension
s = stx.disj('''
({b} < {u})
\/ ({v} < {a})
'''.format(a=a, b=b, u=u, v=v)
for (a, b), (u, v) in prm._varmap.items())
r = fol.add_expr(s)
return ~ r
def plot_orthotopes(u, abx, axvars, fol, ax):
"""Plot a polytope for each orthotope in `u`.
@param axvars: `list` that defines which variable
spans each dimension of the plot.
"""
try:
import polytope as poly
except ImportError:
raise ImportError(
'`orthotopes` failed to import `polytope`.\n'
'No plotting of orthotopes.')
c = _orthotopes_iter(u, fol)
eps = 0.1
cycol = cycle('bgrcmk')
for product in c:
x, y = axvars
a_x = abx[x]['a']
b_x = abx[x]['b']
a_y = abx[y]['a']
b_y = abx[y]['b']
xmin = product[a_x]
xmax = product[b_x]
ymin = product[a_y]
ymax = product[b_y]
# if a = b add a small amount
if xmin == xmax:
xmin -= eps
xmax += eps
if ymin == ymax:
ymin -= eps
ymax += eps
size = [[xmin, xmax], [ymin, ymax]]
p = poly.box2poly(size)
color = next(cycol)
p.plot(ax=ax, color=color, alpha=0.5)
def list_expr(
cover, prm, fol,
simple=False,
use_dom=False, latex=False):
"""Return `list` of `str`, each an orthotope in `cover`.
@param simple: if `True`, then return expression
that can be parsed by `fol.add_expr`.
@param use_dom: omit conjuncts that contain dom of var
assumes that `|= care => type_hints`
"""
px = prm._px
xp = _map_parameters_to_vars(px)
support = fol.support(cover)
keys = {xp[k] for k in support}
keys = natsort.natsorted(keys)
c = _orthotopes_iter(cover, fol)
r = list()
for product in c:
w = list()
for x in keys:
a = px[x]['a']
b = px[x]['b']
a = product[a]
b = product[b]
tyh._check_type_hint(a, b, fol.vars[x], x)
# can `x` be ignored ?
if use_dom:
dom = fol.vars[x]['dom']
a, b = tyh._clip_subrange((a, b), dom, x)
if a is None and b is None:
continue
if a == b:
s = '({x} = {a})'
elif simple:
s = '({a} <= {x}) /\ ({x} <= {b})'
else:
# precise even in absence of limits/dom
s = '({x} \in {a} .. {b})'
s = s.format(x=x, a=a, b=b)
w.append(s)
# conjoin as one triplet per line
lines = w
n_tail = len(lines) % 3
tail = lines[-n_tail:]
lines = lines[:-n_tail]
i = iter(lines)
triplets = list(zip(i, i, i))
lines = [' /\ '.join(t) for t in triplets]
lines.append(' /\ '.join(tail))
s = stx.vertical_op(lines, latex=latex, op='and')
r.append(s)
r = natsort.natsorted(r) # reproducible vertical listing
return r
def _orthotopes_iter(u, fol):
"""Return iterator over orthotopes."""
if u == fol.false:
log.info('empty set')
c = fol.pick_iter(u)
return c
def setup_aux_vars(f, care, fol):
"""Add and return auxiliary variables.
No BDD operations other than `support` are invoked.
Returns:
- `x_vars`: set of variable names in
`support(f) \/ support(care)`
- `px`: map var name to `dict` of indexed parameters
- `qx`: similar for var copies
- `p_to_q`: `dict` that maps parameters to their copies
For example:
```
x_vars = {'x', 'y'}
px = dict(
x=dict(a='a_x', b='b_x'),
y=dict(a='a_y', b='b_y'))
qx = dict(
x=dict(a='u_x', b='v_x'),
y=dict(a='u_y', b='v_y'))
p_to_q = dict(
a_x='u_x', b_x='v_x',
a_y='u_y', b_y='v_y')
```
@return x_vars, px, qx, p_to_q
"""
assert f != fol.false
assert care != fol.false
assert not (f == fol.true and care == fol.true)
x_vars = joint_support([f, care], fol)
assert x_vars, x_vars
# aux vars for orthotope representation
params = dict(pa='a', pb='b', qa='u', qb='v')
p_dom = _parameter_table(
x_vars, fol.vars,
a_name=params['pa'], b_name=params['pb'])
q_dom = _parameter_table(
x_vars, fol.vars,
a_name=params['qa'], b_name=params['qb'])
p_dom = stx._add_prime_like_too(p_dom)
q_dom = stx._add_prime_like_too(q_dom)
common = x_vars.intersection(p_dom)
assert not common, common
common = x_vars.intersection(q_dom)
assert not common, common
# works for primed variables too
fol.declare(**p_dom)
fol.declare(**q_dom)
px = _map_vars_to_parameters(
x_vars, a_name=params['pa'], b_name=params['pb'])
qx = _map_vars_to_parameters(
x_vars, a_name=params['qa'], b_name=params['qb'])
assert set(px) == set(qx), (px, qx)
p_to_q = _renaming_between_parameters(px, qx)
q_to_p = {v: k for k, v in p_to_q.items()}
p_to_u = {p: stx._prime_like(p) for p in p_to_q}
p_vars = set(p_to_q)
q_vars = set(p_to_q.values())
u_vars = set(p_to_u.values())
log.debug('x vars: {x_vars}'.format(x_vars=x_vars))
assert not (p_vars & q_vars), (p_vars, q_vars)
assert not (p_vars & u_vars), (p_vars, u_vars)
assert not (u_vars & q_vars), (u_vars, q_vars)
varmap = parameter_varmap(px, qx)
# package
prm = Parameters()
prm._px = px
prm._qx = qx
prm._varmap = varmap
prm.x_vars = x_vars
prm.p_vars = p_vars
prm.q_vars = q_vars
prm.u_vars = u_vars
prm.p_to_q = p_to_q
prm.q_to_p = q_to_p
prm.p_to_u = p_to_u
return prm
def setup_lattice(prm, fol):
"""Store the lattice BDDs in `prm`."""
log.info('partial order')
u_leq_p, p_leq_u = partial_order(prm._px, fol)
log.info('subseteq')
p_leq_q = subseteq(prm._varmap, fol)
log.info('eq')
p_eq_q = eq(prm._varmap, fol)
pq_vars = prm.p_vars.union(prm.q_vars)
assert support_issubset(p_leq_q, pq_vars, fol)
prm.u_leq_p = u_leq_p
prm.p_leq_u = p_leq_u
prm.p_leq_q = p_leq_q
prm.p_eq_q = p_eq_q
def _parameter_table(x, table, a_name, b_name):
"""Return symbol table that defines parameters.
Supports integer-valued variables only.
Represent Boolean-valued as 0..1-valued variables.
"""
assert x, x
d = dict()
for xj in x:
dtype = table[xj]['type']
assert dtype in ('int', 'saturating'), dtype
dom = table[xj]['dom']
name = stx._replace_prime(xj)
aj = '{a}_{v}'.format(a=a_name, v=name)
bj = '{b}_{v}'.format(b=b_name, v=name)
d[aj] = tuple(dom)
d[bj] = tuple(dom)
assert "'" not in aj, aj
assert "'" not in bj, bj
assert len(d) == 2 * len(x), d
return d
def _map_vars_to_parameters(x_vars, a_name, b_name):
"""Return `dict` that maps each var x to a_x, b_x."""
d = dict()
for x in x_vars:
name = stx._replace_prime(x)
a_x = '{a}_{v}'.format(a=a_name, v=name)
b_x = '{b}_{v}'.format(b=b_name, v=name)
d[x] = dict(a=a_x, b=b_x)
return d
def _map_parameters_to_vars(px):
"""Return map `{a: x, b: x, ...}`."""
d = {d['a']: k for k, d in px.items()}
d.update((d['b'], k) for k, d in px.items())
return d
def collect_parameters(px):
"""Return `set` of parameters from `px`."""
c = set()
c.update(d['a'] for d in px.values())
c.update(d['b'] for d in px.values())
assert len(c) == 2 * len(px), (c, px)
return c
def parameter_varmap(px, qx):
"""Return map `{(a, b): (u, v), ... }`."""
assert set(px) == set(qx), (px, qx)
d = dict()
for x in px:
a = px[x]['a']
b = px[x]['b']
u = qx[x]['a']
v = qx[x]['b']
d[(a, b)] = (u, v)
return d
def _renaming_between_parameters(px, qx):
"""Return map `{a: u, b: v, ... }`."""
assert set(px) == set(qx), (px, qx)
d = dict()
for x in px:
a = px[x]['a']
b = px[x]['b']
u = qx[x]['a']
v = qx[x]['b']
d[a] = u
d[b] = v
assert a != b, (a, b)
assert u != v, (u, v)
assert a != u, (a, u)
return d
class Parameters(object):
"""Stores parameters values and lattice definition."""
| [
37811,
14253,
274,
351,
18253,
2546,
526,
15931,
198,
2,
15069,
1584,
12,
7908,
416,
3442,
5136,
286,
8987,
198,
2,
1439,
2489,
10395,
13,
49962,
739,
513,
12,
565,
682,
347,
10305,
13,
198,
2,
198,
11748,
18931,
198,
198,
11748,
29... | 1.895293 | 7,564 |
# Prints which 3rd party libraries are desired for the given configuration.
from components import requiredLibrariesFor
from configurations import getConfiguration
from libraries import allDependencies, librariesByName
from packages import iterDownloadablePackages
if __name__ == '__main__':
import sys
if len(sys.argv) == 3:
try:
main(*sys.argv[1 : ])
except ValueError as ex:
print(ex, file=sys.stderr)
sys.exit(2)
else:
print(
'Usage: python3 3rdparty_libraries.py TARGET_OS LINK_MODE',
file=sys.stderr
)
sys.exit(2)
| [
2,
12578,
82,
543,
513,
4372,
2151,
12782,
389,
10348,
329,
262,
1813,
8398,
13,
198,
198,
6738,
6805,
1330,
2672,
43,
11127,
1890,
198,
6738,
25412,
1330,
651,
38149,
198,
6738,
12782,
1330,
477,
35,
2690,
3976,
11,
12782,
3886,
5376... | 2.840206 | 194 |
import typing
from abc import abstractmethod
from typing import TYPE_CHECKING
from bxcommon.connections.connection_type import ConnectionType
from bxcommon.services.transaction_service import TransactionService
from bxcommon.utils.object_hash import Sha256Hash
from bxgateway.messages.ont.block_ont_message import BlockOntMessage
from bxgateway.messages.ont.get_data_ont_message import GetDataOntMessage
from bxgateway.messages.ont.inventory_ont_message import InventoryOntType
from bxgateway.services.abstract_block_cleanup_service import AbstractBlockCleanupService
from bxgateway.services.ont.ont_block_queuing_service import OntBlockQueuingService
if TYPE_CHECKING:
from bxgateway.connections.ont.ont_gateway_node import OntGatewayNode
from bxutils import logging
logger = logging.get_logger(__name__)
class AbstractOntBlockCleanupService(AbstractBlockCleanupService):
"""
Service for managing block cleanup.
"""
def __init__(self, node: "OntGatewayNode", network_num: int) -> None:
"""
Constructor
:param node: reference to node object
:param network_num: network number
"""
super(AbstractOntBlockCleanupService, self).__init__(node=node, network_num=network_num)
@abstractmethod
| [
11748,
19720,
198,
6738,
450,
66,
1330,
12531,
24396,
198,
6738,
19720,
1330,
41876,
62,
50084,
2751,
198,
198,
6738,
275,
87,
11321,
13,
8443,
507,
13,
38659,
62,
4906,
1330,
26923,
6030,
198,
6738,
275,
87,
11321,
13,
30416,
13,
764... | 3.128395 | 405 |
#----------------------------------------------------------------------------
# Created By : Leonardo Citraro leonardo.citraro@epfl.ch
# Date: 2021
# ---------------------------------------------------------------------------
import os
import sys
import json
import yaml
import re
import os
import ast
import glob
import pickle
import numpy as np
__all__ = ["json_read", "json_write", "yaml_read", "yaml_write",
"pickle_read", "pickle_write",
"mkdir", "sort_nicely", "find_files", "dict_keys_to_string",
"dict_keys_from_literal_string"]
def sort_nicely(l):
""" Sort the given list in the way that humans expect.
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]
return sorted(l, key=alphanum_key) | [
2,
10097,
10541,
198,
2,
15622,
2750,
220,
1058,
38083,
15792,
81,
12022,
443,
261,
13109,
13,
47992,
81,
12022,
31,
538,
2704,
13,
354,
198,
2,
7536,
25,
33448,
198,
2,
16529,
32284,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
... | 2.968421 | 285 |
from flask import render_template, request
from . import main
@main.errorhandler(404) | [
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
198,
6738,
764,
1330,
1388,
198,
198,
31,
12417,
13,
18224,
30281,
7,
26429,
8
] | 3.73913 | 23 |
'''
In this module, we determine if two given strings are anagrams
'''
def is_anagram_sort(string_1, string_2):
'''
Return True if the two given strings are anagrams using sorting
'''
return sorted(string_1) == sorted(string_2)
def is_anagram_counter(string_1, string_2):
'''
Return True if the two given strings are anagrams using Counter
'''
from collections import Counter
return Counter(string_1) == Counter(string_2)
def is_anagram(string_1, string_2):
'''
Return True if the two given strings are anagrams using dictonaries
'''
from collections import defaultdict
if len(string_1) != len(string_2):
return False
char_count = defaultdict(int)
for char in string_1:
char_count[char] += 1
for char in string_2:
char_count[char] -= 1
if char_count[char] < 0:
return False
return True
| [
7061,
6,
198,
818,
428,
8265,
11,
356,
5004,
611,
734,
1813,
13042,
389,
281,
6713,
82,
198,
198,
7061,
6,
198,
198,
4299,
318,
62,
272,
6713,
62,
30619,
7,
8841,
62,
16,
11,
4731,
62,
17,
2599,
198,
197,
7061,
6,
198,
197,
13... | 2.735669 | 314 |
#
# wayne_django_rest copyright © 2020 - all rights reserved
# Created at: 28/10/2020
# By: mauromarini
# License: MIT
# Repository: https://github.com/marinimau/wayne_django_rest
# Credits: @marinimau (https://github.com/marinimau)
#
from rest_framework import serializers
from contents.messages.get_messages import get_messages
from django.conf import settings
messages = get_messages(package=settings.CONTENT_PACKAGES[3])
# ----------------------------------------------------------------------------------------------------------------------
# validate_user
# ----------------------------------------------------------------------------------------------------------------------
| [
2,
198,
2,
220,
220,
835,
710,
62,
28241,
14208,
62,
2118,
6634,
10673,
12131,
532,
477,
2489,
10395,
198,
2,
220,
220,
15622,
379,
25,
2579,
14,
940,
14,
42334,
198,
2,
220,
220,
2750,
25,
285,
2899,
296,
283,
5362,
198,
2,
220... | 4.011429 | 175 |
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread("imori.jpg").astype(np.float)
H,W,C=img.shape
# Nearest Neighbor
a = 1.5
aH = int(a * H)
aW = int(a * W)
y = np.arange(aH).repeat(aW).reshape(aW,aH)
y = (y / a)
x = np.tile(np.arange(aW),(aH,1))
x = (x / a)
fy = np.floor(y).astype(np.int)
fx = np.floor(x).astype(np.int)
dx1 = fx - (x-1)
dx2 = fx - x
dx3 = (x+1) - fx
dx4 = (x+2) - fx
dy1 = fy - (y-1)
dy2 = fy - y
dy3 = (y+1) - fy
dy4 = (y+2) - fy
dxs = [dx1, dx2, dx3, dx4]
dys = [dy1, dy2, dy3, dy4] #ここまではわかる
w_sum = np.zeros((aH, aW, C), dtype=np.float32)
out = np.zeros((aH, aW, C), dtype=np.float32)
for j in range(-1, 3):
for i in range(-1, 3):
ind_x = np.minimum(np.maximum(fx + i, 0), W-1)
ind_y = np.minimum(np.maximum(fy + j, 0), H-1)
#wx=weight((dxs[i+1]**2+dys[j+1]**2)**(1/2))
#wy=weight((dxs[i+1]**2+dys[j+1]**2)**(1/2))
wx = weight(dxs[i+1]) #dxs
wy = weight(dys[j+1])
wx = np.repeat(np.expand_dims(wx, axis=-1), 3, axis=-1)
wy = np.repeat(np.expand_dims(wy, axis=-1), 3, axis=-1)
w_sum += wx * wy
out += wx * wy * img[ind_y, ind_x]
out /= w_sum
out[out>255] = 255
out = out.astype(np.uint8)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.imwrite("question27.jpg", out)
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
9600,
796,
269,
85,
17,
13,
320,
961,
7203,
320,
10145,
13,
9479,
11074,
459,
2981,
7,
37659,
13,
22468,
8,
... | 1.745333 | 750 |
from django.conf import settings
from afriproperty.property.models import City, Property, PropertyCompare
from afriproperty.tips.models import Tip
def settings_context(_request):
"""Settings available by default to the templates context."""
# Note: we intentionally do NOT expose the entire settings
# to prevent accidental leaking of sensitive information
return {
"DEBUG": settings.DEBUG,
"API_KEY": settings.GOOGLE_API_KEY,
"featured_tips": Tip.objects.filter(approved=True, featured=True).exclude(published=False).order_by("-created")[:3],
"lagos": City.objects.get(title__icontains="Lagos"),
"abuja": City.objects.get(title__icontains="Abuja"),
"rivers": City.objects.get(title__icontains="Port Harcourt"),
"ibadan": City.objects.get(title__icontains="Ibadan"),
"featured_properties": Property.objects.filter(featured=True),
"recent_properties": Property.objects.filter(approved=True).order_by('-created')[:10],
"all_properties": Property.objects.filter(approved=True).exclude(property_status=Property.SOLD).order_by("-created"),
"compared_properties": PropertyCompare.objects.all().order_by("-created")[:3],
}
| [
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
198,
6738,
6580,
380,
26745,
13,
26745,
13,
27530,
1330,
2254,
11,
14161,
11,
14161,
41488,
198,
6738,
6580,
380,
26745,
13,
41315,
13,
27530,
1330,
23095,
628,
198,
4299,
6460,
62,
22866,... | 2.866822 | 428 |
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
Qubit Transformations (:mod:`qiskit.chemistry.transformations`)
===============================================================
.. currentmodule:: qiskit.chemistry.transformations
Transformations for both Fermionic and Bosonic operators to qubit operators. Transformation
includes specification of qubit mapping type for example, as well as other options. As part of
the transformation of the main operator other, so called auxiliary operators, may be created to
enable other properties of the result state with the main operator to also be evaluated, such as
total spin for example.
Base Transformation
===================
.. autosummary::
:toctree: ../stubs/
:nosignatures:
Transformation
Fermionic Transformation
========================
.. autosummary::
:toctree: ../stubs/
:nosignatures:
FermionicTransformation
FermionicQubitMappingType
FermionicTransformationType
Bosonic Transformation
======================
.. autosummary::
:toctree: ../stubs/
:nosignatures:
BosonicTransformation
BosonicQubitMappingType
BosonicTransformationType
"""
from .fermionic_transformation import (FermionicTransformation,
FermionicQubitMappingType,
FermionicTransformationType)
from .bosonic_transformation import (BosonicTransformation,
BosonicQubitMappingType,
BosonicTransformationType)
from .transformation import Transformation
__all__ = [
'FermionicTransformation',
'FermionicQubitMappingType',
'FermionicTransformationType',
'BosonicTransformation',
'BosonicQubitMappingType',
'BosonicTransformationType',
'Transformation'
]
| [
2,
770,
2438,
318,
636,
286,
1195,
1984,
270,
13,
198,
2,
198,
2,
357,
34,
8,
15069,
19764,
12131,
13,
198,
2,
198,
2,
770,
2438,
318,
11971,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
13,
921,
743,
198,
2,
7330,
257,
486... | 3.005384 | 743 |
import os
from pathlib import Path
scripts_dir = Path(__file__).parent.absolute()
repo_dir = scripts_dir.parent.absolute()
python_exe = os.path.join(repo_dir, "msanalyzer_venv", "Scripts", "python.exe")
dist_folder = scripts_dir / "dist"
main_py = os.path.join(repo_dir, "local_api.py")
# mpl data
matplotlibrc = scripts_dir / "matplotlibrc"
mpl_data_dir = os.path.join(
repo_dir, "msanalyzer_venv", "Lib", "site-packages", "matplotlib", "mpl-data"
)
mpl_destination = os.path.join(
repo_dir, "scripts", "dist", "local_api", "matplotlib", "mpl-data"
)
# UI
ui_folder = os.path.join(repo_dir, "UI")
ui_release_folder = os.path.join(ui_folder, "release")
ui_exe_folder = os.path.join(ui_release_folder, "win-unpacked")
| [
11748,
28686,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
46521,
62,
15908,
796,
10644,
7,
834,
7753,
834,
737,
8000,
13,
48546,
3419,
198,
260,
7501,
62,
15908,
796,
14750,
62,
15908,
13,
8000,
13,
48546,
3419,
198,
29412,
62,
1349... | 2.477816 | 293 |
"""Training wrapper: script to be executed when running Training Step."""
import json
from logging import INFO, Formatter, StreamHandler, getLogger
from pathlib import Path, PurePath
import click
import joblib
import pandas as pd
from azureml.core import Run
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
def run(input_dir: str, output_dir: str):
"""Run Function.
Args:
input_dir (str): [description]
output_dir (str): [description]
"""
logger.info("TRAINING")
logger.info(f"input dir path: {input_dir}")
logger.info(f"output dir path: {output_dir}")
Path(output_dir).mkdir(parents=True, exist_ok=True)
data = []
input_file = str(PurePath(input_dir, "extraction_output.txt"))
with open(input_file) as i_file:
for line in i_file.readlines():
data.append(json.loads(line))
df = pd.DataFrame.from_records(data)
logger.info(df.describe())
X = df.drop('price', axis=1)
y = df['price']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42)
lin_reg_model = LinearRegression()
lin_reg_model.fit(X_train, y_train)
predict = lin_reg_model.predict(X_test)
logger.info(f'Predicted Value :{predict[0]}')
logger.info(f'Actual Value :{y_test.values[0]}')
model_path = str(PurePath(output_dir, "LinRegModel"))
joblib.dump(value=lin_reg_model, filename=model_path)
run = Run.get_context()
run.upload_file("models", model_path)
run.register_model(model_name="California_Housing_Price_Prediction_Model", model_path="models", description='Generated model in Azure ML')
@click.command()
@click.option("--input_dir", type=str, help="File path of the input", default="/tmp/training_input")
@click.option("--output_dir", type=str, help="File path of the output", default="/tmp/training_output")
def main(input_dir: str, output_dir: str):
"""Execuete run function.
Args:
input_dir (str): File path of the input
output_dir (str):File path of the output
"""
run(input_dir, output_dir)
if __name__ == "__main__":
logger = getLogger(__name__)
logger.setLevel(INFO)
logger.propagate = False
sh = StreamHandler()
sh.setFormatter(Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
logger.addHandler(sh)
main()
| [
37811,
44357,
29908,
25,
4226,
284,
307,
10945,
618,
2491,
13614,
5012,
526,
15931,
198,
198,
11748,
33918,
198,
6738,
18931,
1330,
24890,
11,
5178,
1436,
11,
13860,
25060,
11,
651,
11187,
1362,
198,
6738,
3108,
8019,
1330,
10644,
11,
1... | 2.625137 | 915 |
""" sockets server """
import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket_server:
socket_server.bind( ('127.0.0.1', 5025) )
socket_server.listen()
print("server is listening on 127.0.0.1:5025")
conn, addr = socket_server.accept() # accept is the blocking call
print(f"client at {addr[0]}:{addr[1]} connected")
conn.sendall(b"Welcome to 127.0.0.1:5025!")
# conn.sendall("Welcome to 127.0.0.1:5025!".encode("UTF-8"))
while True:
message = conn.recv(2048).decode('UTF-8')
print("recv: " + message)
conn.sendall(message.encode('UTF-8')) | [
37811,
37037,
4382,
37227,
198,
198,
11748,
17802,
198,
198,
4480,
17802,
13,
44971,
7,
44971,
13,
8579,
62,
1268,
2767,
11,
17802,
13,
50,
11290,
62,
2257,
32235,
8,
355,
17802,
62,
15388,
25,
198,
220,
220,
220,
220,
198,
220,
220... | 2.358209 | 268 |
# -*- coding: utf-8 -*-
################################################################################
# #
# Copyright 1997 - 2019 by IXIA Keysight #
# All Rights Reserved. #
# #
################################################################################
################################################################################
# #
# LEGAL NOTICE: #
# ============== #
# The following code and documentation (hereinafter "the script") is an #
# example script for demonstration purposes only. #
# The script is not a standard commercial product offered by Ixia and have #
# been developed and is being provided for use only as indicated herein. The #
# script [and all modifications enhancements and updates thereto (whether #
# made by Ixia and/or by the user and/or by a third party)] shall at all times #
# remain the property of Ixia. #
# #
# Ixia does not warrant (i) that the functions contained in the script will #
# meet the users requirements or (ii) that the script will be without #
# omissions or error-free. #
# THE SCRIPT IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND AND IXIA #
# DISCLAIMS ALL WARRANTIES EXPRESS IMPLIED STATUTORY OR OTHERWISE #
# INCLUDING BUT NOT LIMITED TO ANY WARRANTY OF MERCHANTABILITY AND FITNESS FOR #
# A PARTICULAR PURPOSE OR OF NON-INFRINGEMENT. #
# THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE SCRIPT IS WITH THE #
# USER. #
# IN NO EVENT SHALL IXIA BE LIABLE FOR ANY DAMAGES RESULTING FROM OR ARISING #
# OUT OF THE USE OF OR THE INABILITY TO USE THE SCRIPT OR ANY PART THEREOF #
# INCLUDING BUT NOT LIMITED TO ANY LOST PROFITS LOST BUSINESS LOST OR #
# DAMAGED DATA OR SOFTWARE OR ANY INDIRECT INCIDENTAL PUNITIVE OR #
# CONSEQUENTIAL DAMAGES EVEN IF IXIA HAS BEEN ADVISED OF THE POSSIBILITY OF #
# SUCH DAMAGES IN ADVANCE. #
# Ixia will not be required to provide any software maintenance or support #
# services of any kind (e.g. any error corrections) in connection with the #
# script or any part thereof. The user acknowledges that although Ixia may #
# from time to time and in its sole discretion provide maintenance or support #
# services for the script any such services are subject to the warranty and #
# damages limitations set forth herein and will not obligate Ixia to provide #
# any additional maintenance or support services. #
# #
################################################################################
################################################################################
# #
# Description: #
# This script intends to demonstrate how to use NGPF BGP6 API #
# It will create 2 BGP6 topologies, it will start the emulation and #
# than it will retrieve and display few statistics and Modify the Flow-Spec #
# fields. #
################################################################################
################################################################################
# Utilities #
################################################################################
from pprint import pprint
import sys, os
import time, re
from ixiatcl import IxiaTcl
from ixiahlt import IxiaHlt
from ixiangpf import IxiaNgpf
from ixiaerror import IxiaError
if os.name == 'nt':
# If the Python version is greater than 3.4 call IxiaTcl with
# the Tcl 8.6 path.
# Example: tcl_dependencies = ['/path/to/tcl8.6'];
# ixiatcl = IxiaTcl(tcl_autopath=tcl_dependencies)
ixiatcl = IxiaTcl()
else:
# unix dependencies this may change accoring to your system. This is
# required to make following packages available to ixiatcl object.
# 1. Tclx --> mandatory
# 2. msgcat --> mandatory
# 3. mpexpr --> optional
tcl_dependencies = [
'/usr/local/lib/',
'/usr/lib/',
'/usr/share/tcl8.5',
'/usr/lib/tcl8.5',
'/usr/lib/tk8.5',
'/usr/share/tk8.5',
]
ixiatcl = IxiaTcl(tcl_autopath=tcl_dependencies)
# endif
ixiahlt = IxiaHlt(ixiatcl)
ixiangpf = IxiaNgpf(ixiahlt)
try:
ErrorHandler('', {})
except (NameError,):
###############################################################################
# Specify your chassis/card port and IxNetwork client here
###############################################################################
chassis_ip = "10.39.50.122"
tcl_server = "10.39.50.122"
ixnetwork_tcl_server = "10.39.43.12:8009"
port_list = "1/7 1/8"
cfgErrors = 0
print("Connecting to chassis and client")
connect_result = ixiangpf.connect(
ixnetwork_tcl_server = ixnetwork_tcl_server,
tcl_server = tcl_server,
device = chassis_ip,
port_list = port_list,
break_locks = 1,
reset = 1,
)
if connect_result['status'] != '1':
ErrorHandler('connect', connect_result)
#Retrieving the port handles, in a list
ports = connect_result['vport_list'].split()
################################################################################
# Creating topology and device group #
################################################################################
# Creating a topology on first port
print("Adding topology 1 on port 1")
topology_1_status = ixiangpf.topology_config(
topology_name = """BGP6 Topology 1""",
port_handle = ports[0],
)
if topology_1_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('topology_config',topology_1_status)
topology_1_handle = topology_1_status['topology_handle']
# Creating a device group in BGP topology1
print("Creating device group 1 in topology 1")
device_group_1_status = ixiangpf.topology_config(
topology_handle = topology_1_handle,
device_group_name = """BGP6 Topology 1 Router""",
device_group_multiplier = "1",
device_group_enabled = "1",
)
if device_group_1_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('topology_config', device_group_1_status)
deviceGroup_1_handle = device_group_1_status['device_group_handle']
# Creating a topology on second port
print("Adding topology 2 on port 2")
topology_2_status = ixiangpf.topology_config(
topology_name = """BGP6 Topology 2""",
port_handle = ports[1],
)
if topology_2_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('topology_config', topology_2_status)
topology_2_handle = topology_2_status['topology_handle']
# Creating a device group in BGP topology2
print("Creating device group 2 in topology 2")
device_group_2_status = ixiangpf.topology_config(
topology_handle = topology_2_handle,
device_group_name = """BGP6 Topology 2 Router""",
device_group_multiplier = "1",
device_group_enabled = "1",
)
if device_group_2_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('topology_config', device_group_2_status)
deviceGroup_2_handle = device_group_2_status['device_group_handle']
################################################################################
# Configure protocol interfaces #
################################################################################
# Creating Ethernet stack for the first Device Group
print("Creating Ethernet stack for the first Device Group")
ethernet_1_status= ixiangpf.interface_config(
protocol_name = """Ethernet 1""",
protocol_handle = deviceGroup_1_handle,
mtu = "1500",
src_mac_addr = "18.03.73.c7.6c.b1",
src_mac_addr_step = "00.00.00.00.00.00",
vlan = "0",
vlan_id_count = '%s' % ("0"),
use_vpn_parameters = "0",
site_id = "0",
)
if ethernet_1_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('interface_config', ethernet_1_status)
ethernet_1_handle = ethernet_1_status['ethernet_handle']
# Creating Ethernet stack for the second Device Group
print("Creating Ethernet for the second Device Group")
ethernet_2_status = ixiangpf.interface_config(
protocol_name = """Ethernet 2""",
protocol_handle = deviceGroup_2_handle,
mtu = "1500",
src_mac_addr = "18.03.73.c7.6c.01",
src_mac_addr_step = "00.00.00.00.00.00",
vlan = "0",
vlan_id_count = '%s' % ("0"),
use_vpn_parameters = "0",
site_id = "0",
)
if ethernet_2_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('interface_config', ethernet_2_status)
ethernet_2_handle = ethernet_2_status['ethernet_handle']
# Creating IPv6 Stack on top of Ethernet Stack for the first Device Group
print("Creating IPv6 Stack on top of Ethernet 1 Stack for the first Device Group")
ipv6_1_status = ixiangpf.interface_config(
protocol_name = """IPv6 1""",
protocol_handle = ethernet_1_handle,
ipv6_multiplier = "1",
ipv6_resolve_gateway = "1",
ipv6_manual_gateway_mac = "00.00.00.00.00.01",
ipv6_manual_gateway_mac_step = "00.00.00.00.00.00",
ipv6_gateway = "2000:0:0:1:0:0:0:1",
ipv6_gateway_step = "::0",
ipv6_intf_addr = "2000:0:0:1:0:0:0:2",
ipv6_intf_addr_step = "::0",
ipv6_prefix_length = "64",
)
if ipv6_1_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('interface_config', ipv6_1_status)
ipv6_1_handle = ipv6_1_status['ipv6_handle']
# Creating IPv6 Stack on top of Ethernet 1 Stack for the second Device Group
print("Creating IPv6 2 stack on Ethernet 2 stack for the second Device Group")
ipv6_2_status = ixiangpf.interface_config(
protocol_name = """IPv6 2""",
protocol_handle = ethernet_2_handle,
ipv6_multiplier = "1",
ipv6_resolve_gateway = "1",
ipv6_manual_gateway_mac = "00.00.00.00.00.01",
ipv6_manual_gateway_mac_step = "00.00.00.00.00.00",
ipv6_gateway = "2000:0:0:1:0:0:0:2",
ipv6_gateway_step = "::0",
ipv6_intf_addr = "2000:0:0:1:0:0:0:1",
ipv6_intf_addr_step = "::0",
ipv6_prefix_length = "64",
)
if ipv6_2_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('interface_config', ipv6_2_status)
ipv6_2_handle = ipv6_2_status['ipv6_handle']
################################################################################
# BGP6 protocol configurations On Top Of IPv6 Stack at Peer1 side #
################################################################################
# This will Create BGP6 Stack on top of IPv6 Stack of Topology1
print("Creating BGP6 Stack on top of IPv6 1 stack on Topology 1")
bgp_v6_interface_1_status = ixiangpf.emulation_bgp_config(
mode = "enable",
active = "1",
md5_enable = "0",
md5_key = "Ixia",
handle = ipv6_1_handle,
ip_version = "6",
remote_ipv6_addr = "2000:0:0:1:0:0:0:1",
next_hop_enable = "0",
next_hop_ip = "0.0.0.0",
enable_4_byte_as = "0",
local_as = "0",
local_as4 = "0",
update_interval = "0",
count = "1",
local_router_id = "192.0.0.1",
local_router_id_step = "0.0.0.0",
hold_time = "90",
neighbor_type = "internal",
graceful_restart_enable = "0",
restart_time = "45",
stale_time = "0",
tcp_window_size = "8192",
local_router_id_enable = "1",
ipv4_capability_mdt_nlri = "0",
ipv4_capability_unicast_nlri = "1",
ipv4_filter_unicast_nlri = "1",
ipv4_capability_multicast_nlri = "1",
ipv4_filter_multicast_nlri = "0",
ipv4_capability_mpls_nlri = "1",
ipv4_filter_mpls_nlri = "0",
ipv4_capability_mpls_vpn_nlri = "1",
ipv4_filter_mpls_vpn_nlri = "0",
ipv6_capability_unicast_nlri = "1",
ipv6_filter_unicast_nlri = "1",
ipv6_capability_multicast_nlri = "1",
ipv6_filter_multicast_nlri = "0",
ipv6_capability_mpls_nlri = "1",
ipv6_filter_mpls_nlri = "0",
ipv6_capability_mpls_vpn_nlri = "1",
ipv6_filter_mpls_vpn_nlri = "0",
capability_route_refresh = "1",
capability_route_constraint = "0",
ttl_value = "64",
updates_per_iteration = "1",
bfd_registration = "0",
bfd_registration_mode = "multi_hop",
vpls_capability_nlri = "1",
vpls_filter_nlri = "0",
act_as_restarted = "0",
discard_ixia_generated_routes = "0",
flap_down_time = "0",
local_router_id_type = "same",
enable_flap = "0",
send_ixia_signature_with_routes = "0",
flap_up_time = "0",
ipv4_capability_multicast_vpn_nlri = "0",
ipv4_filter_multicast_vpn_nlri = "0",
ipv6_capability_multicast_vpn_nlri = "0",
ipv6_filter_multicast_vpn_nlri = "0",
advertise_end_of_rib = "0",
configure_keepalive_timer = "0",
keepalive_timer = "30",
as_path_set_mode = "no_include",
router_id = "192.0.0.1",
filter_link_state = "0",
capability_linkstate_nonvpn = "0",
bgp_ls_id = "0",
instance_id = "0",
number_of_communities = "1",
enable_community = "0",
number_of_ext_communities = "1",
enable_ext_community = "0",
enable_override_peer_as_set_mode = "0",
bgp_ls_as_set_mode = "include_as_seq",
number_of_as_path_segments = "1",
enable_as_path_segments = "1",
number_of_clusters = "1",
enable_cluster = "0",
ethernet_segments_count = "0",
filter_evpn = "0",
evpn = "0",
operational_model = "symmetric",
routers_mac_or_irb_mac_address = "00:01:03:00:00:01",
capability_ipv4_unicast_add_path = "0",
capability_ipv6_unicast_add_path = "0",
ipv4_mpls_add_path_mode = "both",
ipv6_mpls_add_path_mode = "both",
ipv4_unicast_add_path_mode = "both",
ipv6_unicast_add_path_mode = "both",
ipv4_mpls_capability = "0",
ipv6_mpls_capability = "0",
capability_ipv4_mpls_add_path = "0",
capability_ipv6_mpls_add_path = "0",
custom_sid_type = "40",
srgb_count = "1",
start_sid = ["16000"],
sid_count = ["8000"],
ipv4_multiple_mpls_labels_capability = "0",
ipv6_multiple_mpls_labels_capability = "0",
mpls_labels_count_for_ipv4_mpls_route = "1",
mpls_labels_count_for_ipv6_mpls_route = "1",
noOfUserDefinedAfiSafi = "0",
capability_ipv4_unicast_flowSpec = "1",
filter_ipv4_unicast_flowSpec = "1",
capability_ipv6_unicast_flowSpec = "1",
filter_ipv6_unicast_flowSpec = "1",
always_include_tunnel_enc_ext_community = "false",
ip_vrf_to_ip_vrf_type = "interfacefullWithUnnumberedCorefacingIRB",
irb_interface_label = "16",
irb_ipv6_address = "10:0:0:0:0:0:0:1",
)
if bgp_v6_interface_1_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('emulation_bgp_config', bgp_v6_interface_1_status)
bgpInterface_1_handle = bgp_v6_interface_1_status['bgp_handle']
################################################################################
# BGP FLOW SPEC configurations AT PEER1 Side on Top of BGP6 Stack #
################################################################################
# This will Create BGP IPv6 Flow spec on top of BGP6 Stack of Topology1
print("Creating BGP IPv6 Flow Spec on top of BGP6 stack on Topology 1")
bgpFlowSpecRangeList_v6_interface_1_status = ixiangpf.emulation_bgp_flow_spec_config(
mode = "enable",
fs_mode = "fsv6",
handle = bgpInterface_1_handle,
no_of_flowSpecRangeV6 = "1",
active = "1",
flowSpecName = """BGP Flow Spec 11-1""",
fsv6_enableDestPrefix = "1",
fsv6_destPrefix = "1:0:0:0:0:0:1:1",
fsv6_destPrefixLength = "64",
fsv6_destPrefixOffset = "34",
fsv6_enableSrcPrefix = "1",
fsv6_srcPrefix = "1:1:0:0:0:0:0:1",
fsv6_srcPrefixLength = "80",
fsv6_srcPrefixOffset = "48",
fsv6_nextHeader = "120",
portMatch = "10",
destPortMatch = "40",
srcPortMatch = "50",
icmpTypeMatch = "80",
icmpCodeMatch = "90",
tcpFlagsMatch = "(cwr)",
ipPacketMatch = "110",
dscpMatch = "10",
fsv6_fragmentMatch = "(lf)",
fsv6_flowLabel = "40",
enable_traffic_rate = "1",
trafficRate = "1000",
enable_trafficAction = "1",
terminalAction = "1",
trafficActionSample = "1",
enable_redirect = "1",
redirect_ext_communities_type = "rdIPv4",
as_2_bytes = "100",
as_4_bytes = "400",
fsv6_ipv6 = "1:1:0:0:0:0:0:1",
assigned_number_2_octets = "500",
assigned_number_4_octets = "800",
Cbit = "1",
nextHop = "1.1.1.1",
enable_trafficMarking = "1",
dscp = "10",
fsv6_enable_redirectIPv6 = "1",
fsv6_redirectIPv6 = "1:1:0:0:0:0:0:1",
enable_next_hop = "1",
set_next_hop = "sameaslocalip",
set_next_hop_ip_type = "ipv4",
ipv4_next_hop = "10.10.10.10",
ipv6_next_hop = "a:0:0:0:0:0:0:b",
enable_origin = "1",
origin = "igp",
enable_local_preference = "1",
local_preference = "100",
enable_multi_exit_discriminator = "1",
multi_exit_discriminator = "300",
enable_atomic_aggregate = "1",
enable_aggregator_id = "1",
aggregator_id = "2.2.2.2",
aggregator_as = "200",
enable_originator_id = "1",
originator_id = "6.6.6.6",
enable_community = "1",
number_of_communities = "1",
community_type = ["no_export"],
community_as_number = ["123"],
community_last_two_octets = ["234"],
enable_ext_community = "1",
number_of_ext_communities = "1",
ext_communities_type = ["admin_as_two_octet"],
ext_communities_subtype = ["route_target"],
ext_community_as_number = ["123"],
ext_community_target_assigned_number_4_octets = ["1"],
ext_community_ip = ["1.1.1.1"],
ext_community_as_4_bytes = ["1"],
ext_community_target_assigned_number_2_octets = ["1"],
ext_community_opaque_data = ["aa"],
ext_community_colorCObits = ["00"],
ext_community_colorReservedBits = ["123"],
ext_community_colorValue = ["1234"],
ext_community_linkBandwidth = ["1000"],
enable_override_peer_as_set_mode = "1",
as_path_set_mode = "include_as_seq",
enable_as_path_segments = "1",
no_of_as_path_segments = "1",
enable_as_path_segment = ["1"],
as_path_segment_type = ["as_set"],
number_of_as_number_in_segment = ["1"],
as_path_segment_enable_as_number = ["1"],
as_path_segment_as_number = ["100"],
enable_cluster = "1",
no_of_clusters = "1",
cluster_id = ["1.2.3.4"],
)
if bgpFlowSpecRangeList_v6_interface_1_status['status'] != IxiaHlt.SUCCESS:
ixnHLT_errorHandler('emulation_bgp_flow_spec_config', bgpFlowSpecRangeList_v6_interface_1_status)
bgpFlowSpecRangesListV6_1_handle = bgpFlowSpecRangeList_v6_interface_1_status['bgp_flowSpecV6_handle']
################################################################################
# BGP protocol configurations On Top Of IPv6 Stack at Peer2 side #
################################################################################
# This will Create BGP6 Stack on top of IPv6 Stack of Topology2
print("Creating BGP6 Stack on top of IPv6 1 stack on Topology 2")
bgp_v6_interface_2_status = ixiangpf.emulation_bgp_config(
mode = "enable",
active = "1",
md5_enable = "0",
md5_key = "Ixia",
handle = ipv6_2_handle,
ip_version = "6",
remote_ipv6_addr = "2000:0:0:1:0:0:0:2",
next_hop_enable = "0",
next_hop_ip = "0.0.0.0",
enable_4_byte_as = "0",
local_as = "0",
local_as4 = "0",
update_interval = "0",
count = "1",
local_router_id = "193.0.0.1",
local_router_id_step = "0.0.0.0",
hold_time = "90",
neighbor_type = "internal",
graceful_restart_enable = "0",
restart_time = "45",
stale_time = "0",
tcp_window_size = "8192",
local_router_id_enable = "1",
ipv4_capability_mdt_nlri = "0",
ipv4_capability_unicast_nlri = "1",
ipv4_filter_unicast_nlri = "1",
ipv4_capability_multicast_nlri = "1",
ipv4_filter_multicast_nlri = "0",
ipv4_capability_mpls_nlri = "1",
ipv4_filter_mpls_nlri = "0",
ipv4_capability_mpls_vpn_nlri = "1",
ipv4_filter_mpls_vpn_nlri = "0",
ipv6_capability_unicast_nlri = "1",
ipv6_filter_unicast_nlri = "1",
ipv6_capability_multicast_nlri = "1",
ipv6_filter_multicast_nlri = "0",
ipv6_capability_mpls_nlri = "1",
ipv6_filter_mpls_nlri = "0",
ipv6_capability_mpls_vpn_nlri = "1",
ipv6_filter_mpls_vpn_nlri = "0",
capability_route_refresh = "1",
capability_route_constraint = "0",
ttl_value = "64",
updates_per_iteration = "1",
bfd_registration = "0",
bfd_registration_mode = "multi_hop",
vpls_capability_nlri = "1",
vpls_filter_nlri = "0",
act_as_restarted = "0",
discard_ixia_generated_routes = "0",
flap_down_time = "0",
local_router_id_type = "same",
enable_flap = "0",
send_ixia_signature_with_routes = "0",
flap_up_time = "0",
ipv4_capability_multicast_vpn_nlri = "0",
ipv4_filter_multicast_vpn_nlri = "0",
ipv6_capability_multicast_vpn_nlri = "0",
ipv6_filter_multicast_vpn_nlri = "0",
advertise_end_of_rib = "0",
configure_keepalive_timer = "0",
keepalive_timer = "30",
as_path_set_mode = "no_include",
router_id = "193.0.0.1",
filter_link_state = "0",
capability_linkstate_nonvpn = "0",
bgp_ls_id = "0",
instance_id = "0",
number_of_communities = "1",
enable_community = "0",
number_of_ext_communities = "1",
enable_ext_community = "0",
enable_override_peer_as_set_mode = "0",
bgp_ls_as_set_mode = "include_as_seq",
number_of_as_path_segments = "1",
enable_as_path_segments = "1",
number_of_clusters = "1",
enable_cluster = "0",
ethernet_segments_count = "0",
filter_evpn = "0",
evpn = "0",
operational_model = "symmetric",
routers_mac_or_irb_mac_address = "00:01:04:00:00:01",
capability_ipv4_unicast_add_path = "0",
capability_ipv6_unicast_add_path = "0",
ipv4_mpls_add_path_mode = "both",
ipv6_mpls_add_path_mode = "both",
ipv4_unicast_add_path_mode = "both",
ipv6_unicast_add_path_mode = "both",
ipv4_mpls_capability = "0",
ipv6_mpls_capability = "0",
capability_ipv4_mpls_add_path = "0",
capability_ipv6_mpls_add_path = "0",
custom_sid_type = "40",
srgb_count = "1",
start_sid = ["16000"],
sid_count = ["8000"],
ipv4_multiple_mpls_labels_capability = "0",
ipv6_multiple_mpls_labels_capability = "0",
mpls_labels_count_for_ipv4_mpls_route = "1",
mpls_labels_count_for_ipv6_mpls_route = "1",
noOfUserDefinedAfiSafi = "0",
capability_ipv4_unicast_flowSpec = "1",
filter_ipv4_unicast_flowSpec = "1",
capability_ipv6_unicast_flowSpec = "1",
filter_ipv6_unicast_flowSpec = "1",
always_include_tunnel_enc_ext_community = "false",
ip_vrf_to_ip_vrf_type = "interfacefullWithUnnumberedCorefacingIRB",
irb_interface_label = "16",
irb_ipv6_address = "10:0:0:0:0:0:0:1",
)
if bgp_v6_interface_2_status['status'] != IxiaHlt.SUCCESS:
ErrorHandler('emulation_bgp_config', bgp_v6_interface_2_status)
bgpInterface_2_handle = bgp_v6_interface_2_status['bgp_handle']
################################################################################
# BGP IPv6 FLOW SPEC configurations AT PEER1 Side on Top of BGP6 Stack #
################################################################################
# This will Create BGP IPv6 Flow spec on top of BGP6 Stack of Topology2
print("Creating BGP IPv6 Flow Spec on top of BGP6 stack on Topology 2")
bgpFlowSpecRangeList_v6_interface_2_status = ixiangpf.emulation_bgp_flow_spec_config(
mode = "enable",
fs_mode = "fsv6",
handle = bgpInterface_2_handle,
no_of_flowSpecRangeV6 = "1",
active = "1",
flowSpecName = """BGP Flow Spec 11-1""",
fsv6_enableDestPrefix = "1",
fsv6_destPrefix = "1:0:0:0:0:0:1:1",
fsv6_destPrefixLength = "64",
fsv6_destPrefixOffset = "34",
fsv6_enableSrcPrefix = "1",
fsv6_srcPrefix = "1:1:0:0:0:0:0:1",
fsv6_srcPrefixLength = "96",
fsv6_srcPrefixOffset = "64",
fsv6_nextHeader = "120",
portMatch = "20",
destPortMatch = "30",
srcPortMatch = "60",
icmpTypeMatch = "70",
icmpCodeMatch = "100",
tcpFlagsMatch = "(fin)",
ipPacketMatch = "120",
dscpMatch = "20",
fsv6_fragmentMatch = "(ff)",
fsv6_flowLabel = "30",
enable_traffic_rate = "1",
trafficRate = "2000",
enable_trafficAction = "1",
terminalAction = "1",
trafficActionSample = "1",
enable_redirect = "1",
redirect_ext_communities_type = "rdIPv4",
as_2_bytes = "200",
as_4_bytes = "300",
fsv6_ipv6 = "1:1:0:0:0:0:0:1",
assigned_number_2_octets = "600",
assigned_number_4_octets = "700",
Cbit = "1",
nextHop = "1.1.1.1",
enable_trafficMarking = "1",
dscp = "20",
fsv6_enable_redirectIPv6 = "1",
fsv6_redirectIPv6 = "1:1:0:0:0:0:0:1",
enable_next_hop = "1",
set_next_hop = "manually",
set_next_hop_ip_type = "ipv6",
ipv4_next_hop = "11.11.11.11",
ipv6_next_hop = "c:0:0:0:0:0:0:d",
enable_origin = "1",
origin = "igp",
enable_local_preference = "1",
local_preference = "200",
enable_multi_exit_discriminator = "1",
multi_exit_discriminator = "400",
enable_atomic_aggregate = "1",
enable_aggregator_id = "1",
aggregator_id = "3.3.3.3",
aggregator_as = "300",
enable_originator_id = "1",
originator_id = "7.7.7.7",
enable_community = "1",
number_of_communities = "1",
community_type = ["no_export"],
community_as_number = ["321"],
community_last_two_octets = ["432"],
enable_ext_community = "1",
number_of_ext_communities = "1",
ext_communities_type = ["admin_as_two_octet"],
ext_communities_subtype = ["route_target"],
ext_community_as_number = ["1"],
ext_community_target_assigned_number_4_octets = ["1"],
ext_community_ip = ["1.1.1.1"],
ext_community_as_4_bytes = ["1"],
ext_community_target_assigned_number_2_octets = ["1"],
ext_community_opaque_data = ["bb"],
ext_community_colorCObits = ["00"],
ext_community_colorReservedBits = ["214"],
ext_community_colorValue = ["567"],
ext_community_linkBandwidth = ["2000"],
enable_override_peer_as_set_mode = "1",
as_path_set_mode = "include_as_seq",
enable_as_path_segments = "1",
no_of_as_path_segments = "1",
enable_as_path_segment = ["1"],
as_path_segment_type = ["as_set"],
number_of_as_number_in_segment = ["1"],
as_path_segment_enable_as_number = ["1"],
as_path_segment_as_number = ["200"],
enable_cluster = "1",
no_of_clusters = "1",
cluster_id = ["5.6.7.8"],
)
if bgpFlowSpecRangeList_v6_interface_2_status['status'] != IxiaHlt.SUCCESS:
ixnHLT_errorHandler('emulation_bgp_flow_spec_config', bgpFlowSpecRangeList_v6_interface_2_status)
bgpFlowSpecRangesListV6_2_handle = bgpFlowSpecRangeList_v6_interface_2_status['bgp_flowSpecV6_handle']
#####################################################################################
#Modifying the value of Flow Spec Field of BGP6 PEER1 and PEER2
#####################################################################################
time.sleep(10)
print "wait for 10Sec"
print "After 5 secs Modify the value of Flow-Spec fields of BGP IPv6 Flow SPec Range of BGP6 PEER1"
time.sleep(5)
print "Modifying the value of Flow-Spec fields of BGP IPv6 FLOW SPEC RANGE of BGP6 PEER1"
bgpFlowSpecRangeList_v6_interface_1_status = ixiangpf.emulation_bgp_flow_spec_config(
mode = "modify",
fs_mode = "fsv6",
handle = bgpFlowSpecRangesListV6_1_handle,
no_of_flowSpecRangeV6 = "1",
active = "1",
flowSpecName = """BGP Flow Spec 11-1""",
fsv6_enableDestPrefix = "1",
fsv6_destPrefix = "1a:b0:c0:d0:ef0:120:134:1",
fsv6_destPrefixLength = "64",
fsv6_destPrefixOffset = "34",
fsv6_enableSrcPrefix = "1",
fsv6_srcPrefix = "123:145:0675:0876:0abc:0def:0:1",
fsv6_srcPrefixLength = "80",
fsv6_srcPrefixOffset = "48",
fsv6_nextHeader = "120",
portMatch = "10",
destPortMatch = "40",
srcPortMatch = "50",
icmpTypeMatch = "80",
icmpCodeMatch = "90",
tcpFlagsMatch = "(syn)",
ipPacketMatch = "110",
dscpMatch = "10",
fsv6_fragmentMatch = "(ff)",
fsv6_flowLabel = "40",
enable_traffic_rate = "1",
trafficRate = "1000",
enable_trafficAction = "1",
terminalAction = "1",
trafficActionSample = "1",
enable_redirect = "1",
redirect_ext_communities_type = "rdIPv4",
as_2_bytes = "100",
as_4_bytes = "400",
fsv6_ipv6 = "1bc:1de:0f3:120:340:560:0:1",
assigned_number_2_octets = "500",
assigned_number_4_octets = "800",
Cbit = "1",
nextHop = "1.1.1.1",
enable_trafficMarking = "1",
dscp = "10",
fsv6_enable_redirectIPv6 = "1",
fsv6_redirectIPv6 = "1a:1b:0cd:0ef:230:450:670:1",
enable_next_hop = "1",
set_next_hop = "sameaslocalip",
set_next_hop_ip_type = "ipv4",
ipv4_next_hop = "150.160.170.180",
ipv6_next_hop = "a:a:b:c:d:e:f:b",
enable_origin = "1",
origin = "igp",
enable_local_preference = "1",
local_preference = "100",
enable_multi_exit_discriminator = "1",
multi_exit_discriminator = "300",
enable_atomic_aggregate = "1",
enable_aggregator_id = "1",
aggregator_id = "2.2.2.2",
aggregator_as = "200",
enable_originator_id = "1",
originator_id = "66.67.68.69",
enable_community = "1",
number_of_communities = "1",
community_type = ["no_export"],
community_as_number = ["123"],
community_last_two_octets = ["234"],
enable_ext_community = "1",
number_of_ext_communities = "1",
ext_communities_type = ["admin_as_two_octet"],
ext_communities_subtype = ["route_target"],
ext_community_as_number = ["123"],
ext_community_target_assigned_number_4_octets = ["1"],
ext_community_ip = ["1.1.1.1"],
ext_community_as_4_bytes = ["1"],
ext_community_target_assigned_number_2_octets = ["1"],
ext_community_opaque_data = ["aa"],
ext_community_colorCObits = ["00"],
ext_community_colorReservedBits = ["123"],
ext_community_colorValue = ["1234"],
ext_community_linkBandwidth = ["1000"],
enable_override_peer_as_set_mode = "1",
as_path_set_mode = "include_as_seq",
enable_as_path_segments = "1",
no_of_as_path_segments = "1",
enable_as_path_segment = ["1"],
as_path_segment_type = ["as_set"],
number_of_as_number_in_segment = ["1"],
as_path_segment_enable_as_number = ["1"],
as_path_segment_as_number = ["100"],
enable_cluster = "1",
no_of_clusters = "1",
cluster_id = ["11.22.33.45"],
)
if bgpFlowSpecRangeList_v6_interface_1_status['status'] != IxiaHlt.SUCCESS:
ixnHLT_errorHandler('emulation_bgp_flow_spec_config', bgpFlowSpecRangeList_v6_interface_1_status)
time.sleep(5)
print "After 5 secs Modify the value of Flow-Spec fields of BGP IPv6 Flow SPec Range of BGP6 PEER2"
time.sleep(5)
print "Modifying the value of Flow-Spec fields of BGP IPv6 FLOW SPEC RANGE of BGP6 PEER2"
bgpFlowSpecRangeList_v6_interface_2_status = ixiangpf.emulation_bgp_flow_spec_config(
mode = "modify",
fs_mode = "fsv6",
handle = bgpFlowSpecRangesListV6_2_handle,
no_of_flowSpecRangeV6 = "1",
active = "1",
flowSpecName = """BGP Flow Spec 11-1""",
fsv6_enableDestPrefix = "1",
fsv6_destPrefix = "1:a0:b0:c0:d0:e0:1f:1",
fsv6_destPrefixLength = "64",
fsv6_destPrefixOffset = "34",
fsv6_enableSrcPrefix = "1",
fsv6_srcPrefix = "1:1:a0:4540:560:5650:780:1",
fsv6_srcPrefixLength = "96",
fsv6_srcPrefixOffset = "64",
fsv6_nextHeader = "120",
portMatch = "20",
destPortMatch = "30",
srcPortMatch = "60",
icmpTypeMatch = "70",
icmpCodeMatch = "100",
tcpFlagsMatch = "(rst)",
ipPacketMatch = "120",
dscpMatch = "20",
fsv6_fragmentMatch = "(lf)",
fsv6_flowLabel = "30",
enable_traffic_rate = "1",
trafficRate = "2220",
enable_trafficAction = "1",
terminalAction = "1",
trafficActionSample = "1",
enable_redirect = "1",
redirect_ext_communities_type = "rdIPv4",
as_2_bytes = "200",
as_4_bytes = "300",
fsv6_ipv6 = "1:1:0:0:0:0:0:1",
assigned_number_2_octets = "600",
assigned_number_4_octets = "700",
Cbit = "1",
nextHop = "13.14.15.17",
enable_trafficMarking = "1",
dscp = "20",
fsv6_enable_redirectIPv6 = "1",
fsv6_redirectIPv6 = "1:1:0a:0b:0c:0d:0e:1",
enable_next_hop = "1",
set_next_hop = "manually",
set_next_hop_ip_type = "ipv6",
ipv4_next_hop = "11.11.11.11",
ipv6_next_hop = "c:0:0:0:0:0:0:d",
enable_origin = "1",
origin = "igp",
enable_local_preference = "1",
local_preference = "200",
enable_multi_exit_discriminator = "1",
multi_exit_discriminator = "400",
enable_atomic_aggregate = "1",
enable_aggregator_id = "1",
aggregator_id = "3.3.3.3",
aggregator_as = "300",
enable_originator_id = "1",
originator_id = "7.7.7.7",
enable_community = "1",
number_of_communities = "1",
community_type = ["no_export"],
community_as_number = ["321"],
community_last_two_octets = ["432"],
enable_ext_community = "1",
number_of_ext_communities = "1",
ext_communities_type = ["admin_as_two_octet"],
ext_communities_subtype = ["route_target"],
ext_community_as_number = ["1"],
ext_community_target_assigned_number_4_octets = ["1"],
ext_community_ip = ["1.1.1.1"],
ext_community_as_4_bytes = ["1"],
ext_community_target_assigned_number_2_octets = ["1"],
ext_community_opaque_data = ["bb"],
ext_community_colorCObits = ["00"],
ext_community_colorReservedBits = ["214"],
ext_community_colorValue = ["567"],
ext_community_linkBandwidth = ["2000"],
enable_override_peer_as_set_mode = "1",
as_path_set_mode = "include_as_seq",
enable_as_path_segments = "1",
no_of_as_path_segments = "1",
enable_as_path_segment = ["1"],
as_path_segment_type = ["as_set"],
number_of_as_number_in_segment = ["1"],
as_path_segment_enable_as_number = ["1"],
as_path_segment_as_number = ["200"],
enable_cluster = "1",
no_of_clusters = "1",
cluster_id = ["55.66.77.89"],
)
if bgpFlowSpecRangeList_v6_interface_2_status['status'] != IxiaHlt.SUCCESS:
ixnHLT_errorHandler('emulation_bgp_flow_spec_config', bgpFlowSpecRangeList_v6_interface_2_status)
############################################################################
# Start BGP6 protocol #
############################################################################
print("Waiting 5 seconds before starting protocol(s) ...")
time.sleep(5)
_result_ = ixiangpf.test_control(action='start_all_protocols')
if _result_['status'] != IxiaHlt.SUCCESS:
ErrorHandler('test_control', _result_)
print("Waiting for 60 seconds")
time.sleep(60)
############################################################################
# Retrieve protocol statistics #
############################################################################
print("Fetching BGP aggregated statistics")
protostats = ixiangpf.emulation_bgp_info(\
handle = bgpInterface_1_handle,
mode = 'stats_per_device_group')
if protostats['status'] != IxiaHlt.SUCCESS:
ErrorHandler('emulation_bgp_info', protostats)
pprint(protostats)
############################################################################
# Retrieve Learned Info #
############################################################################
print("Fetching BGP Learned Info")
learned_info = ixiangpf.emulation_bgp_info(\
handle = bgpInterface_1_handle,
mode = 'learned_info');
if learned_info['status'] != IxiaHlt.SUCCESS:
ErrorHandler('emulation_bgp_info', learned_info)
pprint(learned_info)
############################################################################
# Stop all protocols #
############################################################################
print("Stopping all protocols")
_result_ = ixiangpf.test_control(action='stop_all_protocols')
if _result_['status'] != IxiaHlt.SUCCESS:
ErrorHandler('test_control', _result_)
time.sleep(2)
print("!!! Test Script Ends !!!")
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
29113,
29113,
14468,
201,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.595089 | 35,025 |
from .models import ReviewRating, Brand
| [
6738,
764,
27530,
1330,
6602,
29321,
11,
13512,
628,
198
] | 4.2 | 10 |
from datetime import datetime, timedelta
from typing import Callable
from aiohttp import web
from neuro_sdk import Bucket, Client, Cluster
from tests import _TestServerFactory
_MakeClient = Callable[..., Client]
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
19720,
1330,
4889,
540,
198,
198,
6738,
257,
952,
4023,
1330,
3992,
198,
198,
6738,
7669,
62,
21282,
74,
1330,
48353,
11,
20985,
11,
38279,
198,
198,
6738,
5254,
1330,
4... | 3.580645 | 62 |
import sys
backoff_models = { "UD_Breton-KEB": "ga_idt",
"UD_Czech-PUD": "cs_pdt",
"UD_English-PUD": "en_ewt",
"UD_Faroese-OFT": "nn_nynorsk",
"UD_Finnish-PUD": "fi_tdt",
"UD_Japanese-Modern": "ja_gsd",
"UD_Naija-NSC": "en_ewt",
"UD_Swedish-PUD": "sv_talbanken"
}
print(backoff_models[sys.argv[1]])
| [
11748,
25064,
198,
198,
1891,
2364,
62,
27530,
796,
1391,
366,
8322,
62,
12679,
1122,
12,
7336,
33,
1298,
366,
4908,
62,
312,
83,
1600,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,... | 1.488449 | 303 |
from os import getenv, environ
from os.path import exists, join, expanduser
from random import seed, sample, randint, uniform
from subprocess import run
from tqdm.notebook import tqdm as log_progress
import torch
from torch import optim
from naeval.ner.datasets import (
load_factru,
load_ne5,
)
from slovnet.s3 import S3
from slovnet.io import (
format_jl,
parse_jl,
load_gz_lines,
dump_gz_lines
)
from slovnet.board import (
TensorBoard,
LogBoard,
MultiBoard
)
from slovnet.const import (
TRAIN, TEST,
PER, LOC, ORG,
CUDA0,
)
from slovnet.token import tokenize
from slovnet.model.bert import (
RuBERTConfig,
BERTEmbedding,
BERTEncoder,
BERTNERHead,
BERTNER
)
from slovnet.markup import (
SpanMarkup,
show_span_markup
)
from slovnet.vocab import BERTVocab, BIOTagsVocab
from slovnet.encoders.bert import BERTNERTrainEncoder
from slovnet.score import (
NERBatchScore,
NERScoreMeter,
score_ner_batch
)
from slovnet.mask import (
Masked,
split_masked,
pad_masked
)
DATA_DIR = 'data'
MODEL_DIR = 'model'
BERT_DIR = 'bert'
RAW_DIR = join(DATA_DIR, 'raw')
CORUS_NE5 = join(RAW_DIR, 'Collection5')
CORUS_FACTRU = join(RAW_DIR, 'factRuEval-2016-master')
NE5 = join(DATA_DIR, 'ne5.jl.gz')
FACTRU = join(DATA_DIR, 'factru.jl.gz')
S3_DIR = '02_bert_ner'
S3_NE5 = join(S3_DIR, NE5)
S3_FACTRU = join(S3_DIR, FACTRU)
VOCAB = 'vocab.txt'
EMB = 'emb.pt'
ENCODER = 'encoder.pt'
NER = 'ner.pt'
BERT_VOCAB = join(BERT_DIR, VOCAB)
BERT_EMB = join(BERT_DIR, EMB)
BERT_ENCODER = join(BERT_DIR, ENCODER)
S3_RUBERT_DIR = '01_bert_news/rubert'
S3_MLM_DIR = '01_bert_news/model'
S3_BERT_VOCAB = join(S3_RUBERT_DIR, VOCAB)
S3_BERT_EMB = join(S3_MLM_DIR, EMB)
S3_BERT_ENCODER = join(S3_MLM_DIR, ENCODER)
MODEL_ENCODER = join(MODEL_DIR, ENCODER)
MODEL_NER = join(MODEL_DIR, NER)
S3_MODEL_ENCODER = join(S3_DIR, MODEL_ENCODER)
S3_MODEL_NER = join(S3_DIR, MODEL_NER)
BOARD_NAME = getenv('board_name', '02_bert_ner')
RUNS_DIR = 'runs'
TRAIN_BOARD = '01_train'
TEST_BOARD = '02_test'
SEED = int(getenv('seed', 72))
DEVICE = getenv('device', CUDA0)
BERT_LR = float(getenv('bert_lr', 0.000045))
LR = float(getenv('lr', 0.0075))
LR_GAMMA = float(getenv('lr_gamma', 0.45))
EPOCHS = int(getenv('epochs', 5))
| [
198,
6738,
28686,
1330,
651,
24330,
11,
551,
2268,
198,
6738,
28686,
13,
6978,
1330,
7160,
11,
4654,
11,
4292,
7220,
198,
6738,
4738,
1330,
9403,
11,
6291,
11,
43720,
600,
11,
8187,
198,
6738,
850,
14681,
1330,
1057,
198,
198,
6738,
... | 2.148079 | 1,067 |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import networkx as nx
from .exceptions import UnweightedGraphError, InvalidGraphError
def assert_is_graph(graph: nx.Graph):
"""
Asserts that an object is a networkx graph
:param graph: Graph to check
:raises TypeError: If graph is not a graph object
"""
if not isinstance(graph, nx.Graph):
raise TypeError('graph must be a networkx.Graph')
def assert_is_weighted(
graph: nx.Graph,
weight_column: str = 'weight'
):
"""
Asserts that a graph object is a weighted graph
:param graph: A graph to check
:param weight_column: Weight column
:raises UnweightedGraphError: Graph is not weighted by the requested weight column
"""
if not nx.is_weighted(graph, weight=weight_column):
raise UnweightedGraphError("Weight column [{0}] not found in every graph edge attribute".format(weight_column))
def assert_is_weighted_graph(
graph: nx.Graph,
weight_column: str = 'weight'
):
"""
Asserts that an object is a networkx graph and that the graph object is a weighted graph.
:param graph: A graph to check
:param weight_column: Weight column
:raises TypeError: If graph is not a networkx graph object
:raises UnweightedGraphError: Graph is not weighted by the requested weight column
"""
assert_is_graph(graph)
assert_is_weighted(graph, weight_column)
def assert_single_connected_components(
graph: nx.Graph,
extended_message: str = ""
):
"""
Asserts that there is only a single connected component in the graph.
:param graph: A graph object
:param extended_message: An optional message that, if specified, will be appended to the raises exception.
"""
assert_is_graph(graph)
if not nx.is_directed(graph):
if nx.number_connected_components(graph) > 1:
raise InvalidGraphError(
"The graph provided has more than one connected component. {0}".format(extended_message)
)
else:
if nx.number_weakly_connected_components(graph) > 1:
raise InvalidGraphError(
"The graph provided has more than one weakly connected component. {0}".format(extended_message)
)
def validate_minimal_graph(
graph: nx.Graph,
weight_attribute: str = "weight"
):
"""
Validates that every edge is weighted, contains nodes, and contains edges.
:param networkx.Graph graph: A networkx graph object
:param str weight_attribute: The attribute containing the weight of the edge.
:raises ValueError: If the graph is not fully weighted, has no nodes, or has no edges.
"""
if len(graph) == 0:
raise ValueError("The graph provided has no nodes")
if len(graph.edges()) == 0:
raise ValueError("The graph provided has no edges")
if not nx.is_weighted(graph, weight=weight_attribute):
raise ValueError("The graph provided is not fully weighted")
def assert_is_undirected(graph: nx.Graph):
"""
Asserts that an object is an undirected graph
:param graph: Graph to check
:raises ValueError: If a graph is not an undirected graph
"""
if graph.is_directed():
raise ValueError("graph must be an undirected graph")
| [
2,
15069,
357,
66,
8,
5413,
10501,
13,
198,
2,
49962,
739,
262,
17168,
5964,
13,
198,
198,
11748,
3127,
87,
355,
299,
87,
198,
6738,
764,
1069,
11755,
1330,
791,
6551,
276,
37065,
12331,
11,
17665,
37065,
12331,
628,
198,
4299,
6818... | 2.832765 | 1,172 |
from typing import List
| [
6738,
19720,
1330,
7343,
198
] | 4.8 | 5 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import os
from ooi_data_explorations.common import inputs, m2m_collect, m2m_request, get_deployment_dates, \
get_vocabulary, dt64_epoch, update_dataset, ENCODINGS
from gsw.conversions import SP_from_C
def metbk_hourly(ds):
"""
Takes METBK hourly averaged bulk flux estimates from the CGSN/EA moorings
and cleans up the data set to make it more user-friendly. Primary task is
renaming parameters and dropping some that are of limited use.
Additionally, re-organize some of the variables to permit better
assessments of the data.
:param ds: initial metbk hourly averaged data set downloaded from OOI via
the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# met_timeflx == time, redundant,
# ### Data products from upstream processing used to calculate hourly flux measurements. Remove from here to
# ### keep this data set clean. Will obtain the 1 minute source data from a separate stream.
# eastward_velocity
# northward_velocity
# longwave_irradiance
# air_temperature
# barometric_pressure
# precipitation
# sea_surface_temperature
# relative_humidity
# shortwave_irradiance
ds = ds.drop(['met_timeflx', 'eastward_velocity', 'northward_velocity', 'longwave_irradiance', 'air_temperature',
'barometric_pressure', 'precipitation', 'sea_surface_temperature', 'relative_humidity',
'shortwave_irradiance'])
# reset incorrectly formatted temperature units
temp_vars = ['met_tempa2m', 'met_tempskn']
for var in temp_vars:
ds[var].attrs['units'] = 'degree_Celsius'
return ds
def metbk_datalogger(ds, burst=False):
"""
Takes METBK data recorded by the data loggers used in the CGSN/EA moorings
and cleans up the data set to make it more user-friendly. Primary task is
renaming parameters and dropping some that are of limited use.
Additionally, re-organize some of the variables to permit better
assessments of the data.
:param ds: initial metbk data set downloaded from OOI via the M2M system
:return ds: cleaned up data set
"""
# drop some of the variables:
# date_time_string == internal_timestamp, redundant so can remove
# dcl_controller_timestamp == time, redundant so can remove
# internal_timestamp == doesn't exist, always empty so can remove
# ### Data products from downstream processing used to calculate hourly flux measurements. Remove from here to
# ### keep this data set clean. Will obtain hourly flux data from a different stream.
# met_barpres
# met_windavg_mag_corr_east
# met_windavg_mag_corr_north
# met_netsirr
# met_salsurf
# met_spechum
# ct_depth
# met_current_direction
# met_current_speed
# met_relwind_direction
# met_relwind_speed
# met_heatflx_minute
# met_latnflx_minute
# met_netlirr_minute
# met_sensflx_minute
ds = ds.drop(['dcl_controller_timestamp', 'internal_timestamp', 'met_barpres',
'met_windavg_mag_corr_east', 'met_windavg_mag_corr_north', 'met_netsirr', 'met_salsurf',
'met_spechum', 'ct_depth', 'met_current_direction', 'met_current_speed', 'met_relwind_direction',
'met_relwind_speed', 'met_heatflx_minute', 'met_latnflx_minute', 'met_netlirr_minute',
'met_sensflx_minute', 'met_barpres_qc_executed', 'met_barpres_qc_results',
'met_current_direction_qc_executed', 'met_current_direction_qc_results',
'met_current_speed_qc_executed', 'met_current_speed_qc_results', 'met_relwind_direction_qc_executed',
'met_relwind_direction_qc_results', 'met_relwind_speed_qc_executed', 'met_relwind_speed_qc_results',
'met_netsirr_qc_executed', 'met_netsirr_qc_results', 'met_salsurf_qc_executed',
'met_salsurf_qc_results', 'met_spechum_qc_executed', 'met_spechum_qc_results'])
# drop the QC test applied to the L0 values (not supposed to happen)
ds = ds.drop(['precipitation_qc_executed', 'precipitation_qc_results'])
# reset incorrectly formatted temperature and relative humidity units
ds['relative_humidity'].attrs['units'] = 'percent'
temp_vars = ['air_temperature', 'sea_surface_temperature']
for var in temp_vars:
ds[var].attrs['units'] = 'degree_Celsius'
# calculate the near surface salinity
ds['sea_surface_salinity'] = ('time', SP_from_C(ds['sea_surface_conductivity'] * 10, ds['sea_surface_temperature'],
1.0))
ds['sea_surface_salinity'].attrs = {
'long_name': 'Sea Surface Practical Salinity',
'standard_name': 'sea_surface_salinity',
'units': '1e-3',
'comment': ('Salinity is generally defined as the concentration of dissolved salt in a parcel of seawater. ' +
'Practical Salinity is a more specific unitless quantity calculated from the conductivity of ' +
'seawater and adjusted for temperature and pressure. It is approximately equivalent to Absolute ' +
'Salinity (the mass fraction of dissolved salt in seawater), but they are not interchangeable.'),
'data_product_identifier': 'SALSURF_L2',
'instrument': (ds.attrs['subsite'] + '-SBD11-06-METBKA000'),
'stream': ds.attrs['stream'],
'ancillary_variables': 'sea_surface_conductivity sea_surface_temperature'
}
if burst: # re-sample the data to a 15 minute interval using a median average
burst = ds
burst['time'] = burst['time'] - np.timedelta64(450, 's') # center time windows for 15 minute bursts
burst = burst.resample(time='15Min', keep_attrs=True, skipna=True).median()
burst = burst.where(~np.isnan(burst.deployment), drop=True)
# reset the attributes...which keep_attrs should do...
burst.attrs = ds.attrs
for v in burst.variables:
burst[v].attrs = ds[v].attrs
# save the newly average data
ds = burst
return ds
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
28686,
198,
198,
6738,
267,
23013,
62,
7890,
62,
20676,
273,
602,
13,
11321,... | 2.478363 | 2,542 |
# Ingresa el texto a encriptar
texto= input("Ingresa un mensaje: ")
# Ingresa un valor de cambio válido (repitelo hasta que tengas éxito)
shift = 0
while shift == 0:
try:
shift = int(input("Ingresa el valor de cambio del cifrado (1..25): "))
if shift not in range(1,26):
raise ValueError
except ValueError:
shift = 0
if shift == 0:
print("Valor de cambio inválido!")
cifrado = ''
for char in texto:
# ¿Es un letra?
if char.isalpha():
# cambia su código
code = ord(char) + shift
# encuentra el código de la primera letra (mayúscula o minúscula)
if char.isupper():
first = ord('A')
else:
first = ord('a')
# hacer corrección
code -= first
code %= 26
# agrega caracter codificado al mensaje
cifrado += chr(first + code)
else:
# agregar caracter original al mensaje
cifrado += char
print(cifrado) | [
171,
119,
123,
2,
17589,
14625,
1288,
2420,
78,
257,
2207,
1968,
283,
198,
5239,
78,
28,
5128,
7203,
27682,
14625,
555,
285,
641,
1228,
68,
25,
366,
8,
198,
198,
2,
17589,
14625,
555,
1188,
273,
390,
269,
4131,
952,
410,
6557,
75,... | 2.008147 | 491 |
from slugify import slugify
from .schedules import * # noqa
from .generator_base import GeneratorBase
from .utilities import steps
| [
6738,
31065,
1958,
1330,
31065,
1958,
198,
198,
6738,
764,
1416,
704,
5028,
1330,
1635,
1303,
645,
20402,
198,
6738,
764,
8612,
1352,
62,
8692,
1330,
35986,
14881,
198,
6738,
764,
315,
2410,
1330,
4831,
198
] | 3.666667 | 36 |
'''
>>> from exception_translator_ext import *
>>> try:
... throw_error();
... except RuntimeError, x:
... print x
... else:
... print 'Expected a RuntimeError!'
!!!error!!!
'''
if __name__ == '__main__':
print "running..."
import sys
sys.exit(run()[0])
| [
7061,
6,
201,
198,
33409,
422,
6631,
62,
7645,
41880,
62,
2302,
1330,
1635,
201,
198,
33409,
1949,
25,
201,
198,
986,
220,
220,
220,
220,
3714,
62,
18224,
9783,
201,
198,
986,
2845,
43160,
12331,
11,
2124,
25,
201,
198,
986,
220,
... | 2.346457 | 127 |
#!/usr/bin/env python
"""
api/rest_organization.py
Copyright 2021 Triple Dot Engineering LLC
Defines the RestOrg class used to interact with organizations via the API.
"""
import json
from .. import util
from ._abc_rest_obj import RestObject
from .api import TriviumApi
class RestOrg(RestObject):
"""Class for interacting with Orgs via REST api"""
##
# RestOrg constructor
##
##
# Returns the string representation of an organization
##
##
# Returns the string representation of an organization
##
##
# Takes a list of organizations as input and prints them in tabular format.
##
@staticmethod
def print_table(orgs):
"""Prints in tabular format."""
fmt = '{id:20s} {name:32s}'
labels = {
'id': 'ID',
'name': 'Name'
}
header_fmt = util.Colors.CYAN + util.Colors.BOLD
print(header_fmt + fmt.format(**labels) + util.Colors.ENDC)
for org in orgs:
print(fmt.format(**org))
##
# Gets a single org is the org id is provided, otherwise gets all orgs
# that the user has access to.
##
@staticmethod
def get(org=None):
"""Gets one or more orgs"""
url = '/orgs' if org is None else '/orgs/{}'.format(org)
r = TriviumApi().make_request(url)
if r.status_code == 200:
return r.json()
# If not 200, raise exception
raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))
##
# Posts an organization based on the input data.
##
@staticmethod
def post(data):
"""Posts an org"""
opts = {
'method': 'POST',
'params': {},
'body': data
}
r = TriviumApi().make_request('/orgs', **opts)
if r.status_code == 200:
return r.json()
raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))
##
# Deletes an organization.
##
@staticmethod
def delete(identifier):
"""Deletes orgs"""
opts = {
'method': 'DELETE',
'params': {}
}
url = '/orgs/{0}'.format(identifier)
r = TriviumApi().make_request(url, **opts)
if r.status_code == 200:
return r.json()
# if not 200
raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))
##
# Patches organization(s)
##
@staticmethod
def patch(data):
"""Patches orgs"""
opts = {
'method': 'PATCH',
'params': {},
'body': data
}
r = TriviumApi().make_request('/orgs', **opts)
if r.status_code == 200:
return r.json()
# if not 200
raise Exception('TriviumApiError: {} {}'.format(r.status_code, r.text))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
37811,
198,
15042,
14,
2118,
62,
9971,
1634,
13,
9078,
198,
198,
15269,
33448,
19817,
22875,
14044,
11419,
198,
198,
7469,
1127,
262,
8324,
46808,
1398,
973,
284,
9427,
351,
5745,
2884,
... | 2.183969 | 1,310 |
from pytest_mock import MockerFixture
from gopredict.__main__ import main_loop
| [
6738,
12972,
9288,
62,
76,
735,
1330,
337,
12721,
37,
9602,
198,
6738,
308,
404,
17407,
13,
834,
12417,
834,
1330,
1388,
62,
26268,
628,
220,
220,
220,
220,
628,
628,
220,
220,
220,
220,
198,
220,
220,
220
] | 2.461538 | 39 |
# coding=utf-8
from everglade.tokens import TokenType, Token
ALLOWED_CHARS = "abcdefghijklmnopqrstuvwxyz_"
| [
2,
19617,
28,
40477,
12,
23,
198,
6738,
1683,
4743,
671,
13,
83,
482,
641,
1330,
29130,
6030,
11,
29130,
198,
198,
7036,
3913,
1961,
62,
3398,
27415,
796,
366,
39305,
4299,
456,
2926,
41582,
10295,
404,
80,
81,
301,
14795,
86,
5431,... | 2.291667 | 48 |
import collections
import csv
import dataclasses
import datetime
import io
import itertools
import json
import logging
import os
import random
import typing
import uuid
import faker
import numpy as np
from google.cloud import storage
fake = faker.Faker()
# final datasets
orders = list()
users = list()
order_items = list()
events = list()
inventory_items = list()
# read from local csv and return products
# read from local csv and return locations
SECONDS_IN_MINUTE = 60
MINUTES_IN_HOUR = 60
MINUTES_IN_DAY = 1440
MIN_AGE = 12
MAX_AGE = 71
products = generate_products()
logging.info("generating products helper dict")
logging.info("generating locations data")
LOCATION_DATA = generate_locations()
PRODUCT_GENDER_DICT = products[0]
PRODUCT_BY_ID_DICT = products[1]
# returns random address based off specified distribution
# generates random date between now and specified date
# generate URI for events table
# converts list of dicts into csv format
# upload into GCS Bucket
# utility class
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
inv_item_id = 0
@dataclasses.dataclass
@dataclasses.dataclass
@dataclasses.dataclass
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
main(
num_of_users=int(os.environ["NUM_OF_USERS"]),
target_gcs_prefix=os.environ["TARGET_GCS_PREFIX"],
target_gcs_bucket=os.environ["TARGET_GCS_BUCKET"],
source_dir=os.environ["SOURCE_DIR"],
extraneous_headers=json.loads(os.environ["EXTRANEOUS_HEADERS"]),
)
| [
11748,
17268,
198,
11748,
269,
21370,
198,
11748,
4818,
330,
28958,
198,
11748,
4818,
8079,
198,
11748,
33245,
198,
11748,
340,
861,
10141,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
28686,
198,
11748,
4738,
198,
11748,
19720,
198,
... | 2.835088 | 570 |
# coding: utf-8
import io
from . import bl
from . import exporter
from pymeshio import pmx
from pymeshio import common
from pymeshio.pmx import writer
import bpy
import bpy_extras.io_utils # pylint: disable=E0401
def create_pmx(ex, enable_bdef4=True):
"""
PMX 出力
"""
model=pmx.Model()
o=ex.root.o
model.name=o.get(bl.MMD_MB_NAME, 'Blenderエクスポート')
model.english_name=o.get(bl.MMD_ENGLISH_NAME, 'blender export model')
model.comment=o.get(bl.MMD_MB_COMMENT, 'Blnderエクスポート\n')
model.english_comment=o.get(bl.MMD_ENGLISH_COMMENT, 'blender export commen\n')
deform_builder=DeformBuilder(ex.skeleton)
model.vertices=[pmx.Vertex(
# convert right-handed z-up to left-handed y-up
common.Vector3(pos[0], pos[2], pos[1]),
# convert right-handed z-up to left-handed y-up
common.Vector3(attribute.nx, attribute.nz, attribute.ny),
# reverse vertical
common.Vector2(attribute.u, 1.0-attribute.v),
deform_builder(ext_weight) if enable_bdef4 else \
get_deform(ex.skeleton.indexByName(b0), ex.skeleton.indexByName(b1), weight),
# edge flag, 0: enable edge, 1: not edge
1.0
)
for pos, attribute, b0, b1, weight, ext_weight in ex.oneSkinMesh.vertexArray.zip2()]
if enable_bdef4:
deform_builder.show()
boneMap=dict([(b.name, i) for i, b in enumerate(ex.skeleton.bones)])
model.bones=[create_bone(b) for b in ex.skeleton.bones]
# textures
textures=set()
try:
for m in ex.oneSkinMesh.vertexArray.indexArrays.keys():
for path in bl.material.eachEnalbeTexturePath(bl.material.get(m)):
textures.add(get_texture_name(path))
except KeyError as e:
# no material
pass
model.textures=list(textures)
# texture pathからtexture indexを逆引き
texturePathMap={}
for i, texture_path in enumerate(model.textures):
texturePathMap[texture_path]=i
def get_flag(m):
"""
return material flag
"""
return (
m.get(bl.MATERIALFLAG_BOTHFACE, 0)
+(m.get(bl.MATERIALFLAG_GROUNDSHADOW, 0) << 1)
+(m.get(bl.MATERIALFLAG_SELFSHADOWMAP, 0) << 2)
+(m.get(bl.MATERIALFLAG_SELFSHADOW, 0) << 3)
+(m.get(bl.MATERIALFLAG_EDGE, 0) << 4)
)
def get_toon_shareing_flag(m):
"""
return
shared: 1
not shared: 0
"""
for t in bl.material.eachEnalbeTexturePath(m):
if re.match("""toon\d\d.bmp"""):
return 1
return 0
# 面とマテリアル
vertexCount=ex.oneSkinMesh.getVertexCount()
model.materials=[]
for material_name, indices in ex.oneSkinMesh.vertexArray.each():
#print('material:', material_name)
try:
m=bl.material.get(material_name)
except KeyError as e:
m=exporter.oneskinmesh.DefaultMaterial()
(
texture_index,
toon_texture_index, toon_sharing_flag,
sphere_texture_index, sphere_mode,
)=get_texture_params(m, texturePathMap)
# マテリアル
model.materials.append(pmx.Material(
name=m.name,
english_name='',
diffuse_color=common.RGB(
m.diffuse_color[0],
m.diffuse_color[1],
m.diffuse_color[2]),
alpha=m.use_transparency and m.alpha or 1.0,
specular_factor=(0
if m.specular_toon_size<1e-5
else m.specular_toon_size * 10),
specular_color=common.RGB(
m.specular_color[0],
m.specular_color[1],
m.specular_color[2]),
ambient_color=common.RGB(
m.mirror_color[0],
m.mirror_color[1],
m.mirror_color[2]),
flag=get_flag(m),
edge_color=common.RGBA(0, 0, 0, 1),
edge_size=1.0,
texture_index=texture_index,
sphere_texture_index=sphere_texture_index,
sphere_mode=sphere_mode,
toon_sharing_flag=toon_sharing_flag,
toon_texture_index=toon_texture_index,
comment='',
vertex_count=len(indices)
))
# 面
for i in indices:
assert(i<vertexCount)
for i in range(0, len(indices), 3):
# reverse triangle
model.indices.append(indices[i+2])
model.indices.append(indices[i+1])
model.indices.append(indices[i])
# 表情
from pymeshio import englishmap
for i, m in enumerate(ex.oneSkinMesh.morphList[1:]):
# name
english_name="morph: %d" % i
panel=4
for en, n, p in englishmap.skinMap:
if n==m.name:
english_name=en
panel=p
break
morph=pmx.Morph(
name=m.name,
english_name=english_name,
panel=panel,
morph_type=1,
)
morph.offsets=[pmx.VertexMorphOffset(
get_vertex_index(index),
common.Vector3(offset[0], offset[2], offset[1])
)
for index, offset in m.offsets]
model.morphs.append(morph)
# ボーングループ
model.display_slots=[]
for name, members in ex.skeleton.bone_groups:
if name=="表情":
slot=pmx.DisplaySlot(
name=name,
english_name=englishmap.getEnglishBoneGroupName(name),
special_flag=1
)
slot.references=[(1, i) for i in range(len(model.morphs))]
model.display_slots.append(slot)
else:
slot=pmx.DisplaySlot(
name=name,
english_name=englishmap.getEnglishBoneGroupName(name),
special_flag=1 if name=="Root" else 0
)
slot.references=[(0, ex.skeleton.boneByName(m).index) for m in members]
model.display_slots.append(slot)
# rigid body
boneNameMap={}
for i, b in enumerate(ex.skeleton.bones):
boneNameMap[b.name]=i
rigidNameMap={}
for i, obj in enumerate(ex.oneSkinMesh.rigidbodies):
name=obj[bl.RIGID_NAME] if bl.RIGID_NAME in obj else obj.name
#print(name)
rigidNameMap[name]=i
boneIndex=boneNameMap[obj[bl.RIGID_BONE_NAME]]
if boneIndex==0:
boneIndex=-1
if obj[bl.RIGID_SHAPE_TYPE]==0:
shape_type=0
shape_size=common.Vector3(obj.scale[0], 0, 0)
elif obj[bl.RIGID_SHAPE_TYPE]==1:
shape_type=1
shape_size=common.Vector3(obj.scale[0], obj.scale[2], obj.scale[1])
elif obj[bl.RIGID_SHAPE_TYPE]==2:
shape_type=2
shape_size=common.Vector3(obj.scale[0], obj.scale[2], 0)
rigidBody=pmx.RigidBody(
name=name,
english_name='',
collision_group=obj[bl.RIGID_GROUP],
no_collision_group=obj[bl.RIGID_INTERSECTION_GROUP],
bone_index=boneIndex,
shape_position=common.Vector3(
obj.location.x,
obj.location.z,
obj.location.y),
shape_rotation=common.Vector3(
-obj.rotation_euler[0],
-obj.rotation_euler[2],
-obj.rotation_euler[1]),
shape_type=shape_type,
shape_size=shape_size,
mass=obj[bl.RIGID_WEIGHT],
linear_damping=obj[bl.RIGID_LINEAR_DAMPING],
angular_damping=obj[bl.RIGID_ANGULAR_DAMPING],
restitution=obj[bl.RIGID_RESTITUTION],
friction=obj[bl.RIGID_FRICTION],
mode=obj[bl.RIGID_PROCESS_TYPE]
)
model.rigidbodies.append(rigidBody)
# joint
model.joints=[pmx.Joint(
name=obj[bl.CONSTRAINT_NAME],
english_name='',
joint_type=0,
rigidbody_index_a=rigidNameMap[obj[bl.CONSTRAINT_A]],
rigidbody_index_b=rigidNameMap[obj[bl.CONSTRAINT_B]],
position=common.Vector3(
obj.location[0],
obj.location[2],
obj.location[1]),
rotation=common.Vector3(
-obj.rotation_euler[0],
-obj.rotation_euler[2],
-obj.rotation_euler[1]),
translation_limit_min=common.Vector3(
obj[bl.CONSTRAINT_POS_MIN][0],
obj[bl.CONSTRAINT_POS_MIN][1],
obj[bl.CONSTRAINT_POS_MIN][2]
),
translation_limit_max=common.Vector3(
obj[bl.CONSTRAINT_POS_MAX][0],
obj[bl.CONSTRAINT_POS_MAX][1],
obj[bl.CONSTRAINT_POS_MAX][2]
),
rotation_limit_min=common.Vector3(
obj[bl.CONSTRAINT_ROT_MIN][0],
obj[bl.CONSTRAINT_ROT_MIN][1],
obj[bl.CONSTRAINT_ROT_MIN][2]),
rotation_limit_max=common.Vector3(
obj[bl.CONSTRAINT_ROT_MAX][0],
obj[bl.CONSTRAINT_ROT_MAX][1],
obj[bl.CONSTRAINT_ROT_MAX][2]),
spring_constant_translation=common.Vector3(
obj[bl.CONSTRAINT_SPRING_POS][0],
obj[bl.CONSTRAINT_SPRING_POS][1],
obj[bl.CONSTRAINT_SPRING_POS][2]),
spring_constant_rotation=common.Vector3(
obj[bl.CONSTRAINT_SPRING_ROT][0],
obj[bl.CONSTRAINT_SPRING_ROT][1],
obj[bl.CONSTRAINT_SPRING_ROT][2])
)
for obj in ex.oneSkinMesh.constraints]
return model
class ExportPmx(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
'''Export to PMX file format (.pmx)'''
bl_idname = 'export_scene.mmd_pmx'
bl_label = 'Export PMX'
filename_ext = '.pmx'
filter_glob = bpy.props.StringProperty(
default='*.pmx', options={'HIDDEN'})
use_selection = bpy.props.BoolProperty(
name='Selection Only',
description='Export selected objects only',
default=False)
@classmethod
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
33245,
198,
6738,
764,
1330,
698,
198,
6738,
764,
1330,
1033,
4337,
198,
6738,
279,
4948,
5069,
952,
1330,
9114,
87,
198,
6738,
279,
4948,
5069,
952,
1330,
2219,
198,
6738,
279,
4948,... | 1.810502 | 5,694 |
# MIT License
#
# Copyright (c) 2020-2021 Parakoopa and the SkyTemple Contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
from typing import Optional
from explorerscript.antlr.ExplorerScriptParser import ExplorerScriptParser
from explorerscript.error import SsbCompilerError
from explorerscript.ssb_converting.compiler.compile_handlers.abstract import AbstractCompileHandler
from explorerscript.ssb_converting.compiler.compile_handlers.blocks.ifs.header.bit import IfHeaderBitCompileHandler
from explorerscript.ssb_converting.compiler.compile_handlers.blocks.ifs.header.negatable import \
IfHeaderNegatableCompileHandler
from explorerscript.ssb_converting.compiler.compile_handlers.blocks.ifs.header.operator import \
IfHeaderOperatorCompileHandler
from explorerscript.ssb_converting.compiler.compile_handlers.blocks.ifs.header.scn import IfHeaderScnCompileHandler
from explorerscript.ssb_converting.compiler.compile_handlers.operations.operation import OperationCompileHandler
from explorerscript.ssb_converting.compiler.utils import CompilerCtx, SsbLabelJumpBlueprint
from explorerscript.ssb_converting.ssb_special_ops import OP_BRANCH_PERFORMANCE, OPS_BRANCH
from explorerscript.util import _, f
| [
2,
220,
17168,
13789,
198,
2,
198,
2,
220,
15069,
357,
66,
8,
12131,
12,
1238,
2481,
2547,
461,
11224,
64,
290,
262,
5274,
12966,
1154,
25767,
669,
198,
2,
198,
2,
220,
2448,
3411,
318,
29376,
7520,
11,
1479,
286,
3877,
11,
284,
... | 3.542587 | 634 |
#!/usr/bin/python
import socket
import time
import sys
import urllib2
import os
import base64
auth = "jude:sniff"
hostname = sys.argv[1]
port = int(sys.argv[2] )
filename = sys.argv[3]
data_fd = None
data_path = None
if len(sys.argv) > 4:
data_path = sys.argv[4]
data_fd = open( data_path, "r" )
mode = '0644'
if filename[-1] == '/':
mode = '0755'
size = 0
if data_fd != None:
size = os.stat(data_path).st_size
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM )
s.connect( (hostname, port) )
boundary = "AaBbCcDdEe"
http_header = ""
http_header += "PUT %s HTTP/1.0\r\n" % filename
http_header += "Host: t510\r\n"
http_header += "Content-Type: application/octet-stream\r\n"
http_header += "Content-Length: %s\r\n" % size
http_header += "Authorization: Basic %s\r\n" % base64.b64encode(auth)
http_header += "X-POSIX-mode: %s\r\n" % mode
http_header += "\r\n"
print "<<<<<<<<<<<<<<<<<<<<<<<<<"
print http_header
print "<<<<<<<<<<<<<<<<<<<<<<<<<\n"
s.send( http_header )
while data_fd != None:
buf = data_fd.read(32768)
if len(buf) == 0:
break
s.send( buf )
ret = s.recv(16384)
print ">>>>>>>>>>>>>>>>>>>>>>>>>"
print ret
print ">>>>>>>>>>>>>>>>>>>>>>>>>\n"
s.close()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
11748,
17802,
198,
11748,
640,
198,
11748,
25064,
198,
11748,
2956,
297,
571,
17,
198,
11748,
28686,
198,
11748,
2779,
2414,
198,
198,
18439,
796,
366,
73,
2507,
25,
16184,
733,
1,
198,
... | 2.29434 | 530 |
#!/usr/bin/python
import sys
import argparse
from pathlib import Path
import urllib.request
import git
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
6738,
3108,
8019,
1330,
10644,
198,
11748,
2956,
297,
571,
13,
25927,
198,
11748,
17606,
628,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,... | 2.88 | 50 |
import numpy as np
import predict
import os
import pandas as pd
directory = os.path.join(os.path.dirname(__file__), '..', 'chainsaw')
predictions = []
for file_name in os.listdir(directory):
(predicted_class, predicted_proba, le) = predict.predict(os.path.join(directory, file_name))
predictions.append(predicted_class)
df = pd.DataFrame(predictions, columns=['class_name'])
print(df.class_name.value_counts(normalize=True)) | [
11748,
299,
32152,
355,
45941,
198,
11748,
4331,
198,
11748,
28686,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
34945,
796,
28686,
13,
6978,
13,
22179,
7,
418,
13,
6978,
13,
15908,
3672,
7,
834,
7753,
834,
828,
705,
492,
3256,
7... | 2.86755 | 151 |
import json, datetime
from confluent_kafka import Consumer, TopicPartition
from confluent_kafka.cimpl import KafkaError, KafkaException
import frappe
from frappe.utils.logger import get_logger
from spine.spine_adapter.scheduler.message_processor import process_message, skip_message
logger = None
| [
11748,
33918,
11,
4818,
8079,
198,
198,
6738,
1013,
28216,
62,
74,
1878,
4914,
1330,
18110,
11,
47373,
7841,
653,
198,
6738,
1013,
28216,
62,
74,
1878,
4914,
13,
66,
23928,
1330,
46906,
12331,
11,
46906,
16922,
198,
198,
11748,
5306,
... | 3.420455 | 88 |
from __future__ import print_function
from unittest import TestCase
from indexdigest.linters.linter_0094_generic_primary_key import check_generic_primary_key
from indexdigest.test import DatabaseTestMixin
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
198,
6738,
555,
715,
395,
1330,
6208,
20448,
198,
198,
6738,
6376,
12894,
395,
13,
2815,
1010,
13,
2815,
353,
62,
405,
5824,
62,
41357,
62,
39754,
62,
2539,
1330,
2198,
62,
41357,
... | 3.525424 | 59 |
import logging
from typing import Union
from xml.dom.minidom import Element
import requests
from huaweisms.api.config import MODEM_HOST
from huaweisms.xml.util import get_child_text, parse_xml_string, get_dictionary_from_children
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
19720,
1330,
4479,
198,
6738,
35555,
13,
3438,
13,
1084,
312,
296,
1330,
11703,
198,
198,
11748,
7007,
198,
6738,
289,
84,
707,
68,
6583,
13,
15042,
13,
11250,
1330,
19164,
3620,
62,
39,
10892,
198,
198,
6738,... | 3.032609 | 92 |
# 대신증권 API
# 종목정보 구하는 예제
import win32com.client
# 연결 여부 체크
objCpCybos = win32com.client.Dispatch("CpUtil.CpCybos")
bConnect = objCpCybos.IsConnect
if (bConnect == 0):
print("PLUS가 정상적으로 연결되지 않음. ")
exit()
# 종목코드 리스트 구하기
objCpCodeMgr = win32com.client.Dispatch("CpUtil.CpCodeMgr")
codeList = objCpCodeMgr.GetStockListByMarket(1) # 거래소
codeList2 = objCpCodeMgr.GetStockListByMarket(2) # 코스닥
print("거래소 종목코드", len(codeList))
for i, code in enumerate(codeList):
secondCode = objCpCodeMgr.GetStockSectionKind(code)
name = objCpCodeMgr.CodeToName(code)
stdPrice = objCpCodeMgr.GetStockStdPrice(code)
print(i, code, secondCode, stdPrice, name)
print("코스닥 종목코드", len(codeList2))
for i, code in enumerate(codeList2):
secondCode = objCpCodeMgr.GetStockSectionKind(code)
name = objCpCodeMgr.CodeToName(code)
stdPrice = objCpCodeMgr.GetStockStdPrice(code)
print(i, code, secondCode, stdPrice, name)
print("거래소 + 코스닥 종목코드 ", len(codeList) + len(codeList2)) | [
2,
31619,
234,
222,
168,
233,
254,
168,
99,
251,
166,
114,
234,
7824,
201,
198,
2,
23821,
95,
227,
167,
103,
102,
168,
254,
243,
167,
111,
112,
220,
166,
113,
105,
47991,
246,
167,
232,
242,
23821,
246,
230,
168,
254,
250,
201,
... | 1.661789 | 615 |
from django.contrib import admin
from .models import Announcement
admin.site.register(Announcement, AnnouncementAdmin)
| [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
198,
6738,
764,
27530,
1330,
43470,
434,
628,
198,
198,
28482,
13,
15654,
13,
30238,
7,
18858,
8652,
434,
11,
43470,
434,
46787,
8,
198
] | 3.617647 | 34 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
@ Author : pengj
@ date : 2019/12/8 16:30
@ IDE : PyCharm
@ GitHub : https://github.com/JackyPJB
@ Contact : pengjianbiao@hotmail.com
-------------------------------------------------
Description : https://sm.ms/doc/v2#277af2f2a6a9c6679bfc62a51b714c0d
Python requests库处理 multipart/form-data 请求以及 boundary值问题,看下面
https://blog.csdn.net/Enderman_xiaohei/article/details/89421773
-------------------------------------------------
"""
import json
__author__ = 'Max_Pengjb'
import requests
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
47232,
12,
628,
220,
220,
220,
2488,
220,
220,
6434,
220,
1058,
220,
220,
220,
220,
220,
220,
279,
1516,
... | 2.283784 | 296 |
from ._hsvideo import * | [
6738,
47540,
11994,
15588,
1330,
1635
] | 3.833333 | 6 |
# Copyright (c) 2016-2018 Koninklijke Philips N.V. All rights reserved. A
# copyright license for redistribution and use in source and binary forms,
# with or without modification, is hereby granted for non-commercial,
# experimental and research purposes, provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution. If
# you wish to use this software commercially, kindly contact
# info.licensing@philips.com to obtain a commercial license.
#
# This license extends only to copyright and does not include or grant any
# patent license or other license whatsoever.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os.path
import subprocess
import sys
from pysnark.runtime import Var, subqap, enterfn
from pysnark.lib.array import Array
from pysnark.lib.ggh import ggh_hash, ggh_hash_packed
from pysnark.lib.ggh_plain import packin, packout, unpackin, unpackout
import pysnark.prove
if __name__ == '__main__':
if len(sys.argv)<2:
print "*** Usage: ", sys.argv[0], "file [pos]"
sys.exit(2)
# lib
# convert file into hash tree
fname = sys.argv[1]
if not os.path.isfile(fname+".l0"):
print >>sys.stderr, "*** Writing level 0 hash", fname+".l0"
l0out = open(fname+".l0", "w")
fin = open(fname, "rb")
while True:
ch = fin.read(1)
if ch=='': break
l0out.write(chr(ord(ch)/16-10+ord('A') if ord(ch)/16 >= 10 else ord(ch)/16+ord('0')))
l0out.write(chr(ord(ch)%16-10+ord('A') if ord(ch)%16 >= 10 else ord(ch)%16+ord('0')))
l0out.close()
fin.close()
fsz = os.path.getsize(fname)
fbits = fsz*8
hashexe = os.path.join(os.path.dirname(os.path.abspath(__file__)), "hash.exe" if os.name=="nt" else "hash")
level = 0
while fbits>1216:
fin = fname+".l"+str(level)
fout = fname+".l"+str(level+1)
if not os.path.exists(fout):
print >>sys.stderr, "*** Hashing", fin, "to", fout
inf = open(fin)
outf = open(fout, "w")
if not os.path.isfile(hashexe):
print >>sys.stderr, "*** Hashing executable", hashexe, "not found; compile it first"
sys.exit(2)
subprocess.call(hashexe, stdin=inf, stdout=outf)
fbits = fbits/6
level = level+1
tophash = list(readhexbits(open(fname+".l"+str(level))))
print "Level", level-1, "hash:", packout(tophash)
if len(sys.argv)==2: sys.exit(0)
# hash tree authentication
if len(sys.argv)==4 and sys.argv[3]=="--single":
enterfn("hashtree_" + str(level) + "_single")
elif len(sys.argv)==4:
print "*** Usage:", sys.argv[2], "<file> <pos> [--single]"
sys.exit(2)
else:
ggh_hash_packed = subqap("ggh")(ggh_hash_packed)
enterfn("hashtree_" + str(level))
posi = int(sys.argv[2])
pos = Var(posi, "pos")
pos.assert_smaller(fsz)
# read level 0
quo,rem= pos.divmod(912, posi.bit_length()-9)
l0file = open(fname+".l0")
l0file.seek(2*(quo.value*912))
bits = map(lambda x: Var(x, True), list(hextobits(l0file.read(2*912))))
bits = bits+[Var(0,True) for _ in xrange(7296-len(bits))]
for bit in bits: bit.assert_bit() # TODO: ggh_hash_packed also assert bits, but this does not ensure that packin's input are bits!
l0file.close()
tobytes = [bits[i]*128+bits[i+1]*64+bits[i+2]*32+bits[i+3]*16+
bits[i+4]*8+bits[i+5]*4+bits[i+6]*2+bits[i+7] for i in xrange(0,len(bits),8)]
res = (Array(tobytes)[rem]).val("char")
print "Character at location", posi, ":", res, chr(res)
hin = packin(bits)
#printpackedin(hin)
hout = ggh_hash_packed(hin)
#printpackedout(hout)
for i in xrange(1,level):
print >>sys.stderr, "At level", i, "of", level-1
quo,rem = quo.divmod(6, posi.bit_length()-7-2*i) # could be slightly tighter
hashfl = open(fname+".l"+str(i))
hashfl.seek(1824*quo.value)
hin = Array([Array([Var(val, True) for val in packout(list(hextobits(hashfl.read(304))))]) for _ in xrange(6)])
hashfl.close()
hin[rem].assert_equals(Array(hout))
hout = ggh_hash_packed(hin.joined())
#printpackedout(hout)
print "Level", level-1, "hash (from VC): ", Var.vals(hout, "tophash") | [
2,
15069,
357,
66,
8,
1584,
12,
7908,
17431,
676,
75,
2926,
365,
46905,
399,
13,
53,
13,
1439,
2489,
10395,
13,
317,
198,
2,
6634,
5964,
329,
41425,
290,
779,
287,
2723,
290,
13934,
5107,
11,
198,
2,
351,
393,
1231,
17613,
11,
3... | 2.361282 | 2,278 |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import errno
from .criteria import criteria
from bes.fs import file_type
class file_type_criteria(criteria):
'match the file type.'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
12,
9,
12,
19617,
25,
40477,
12,
23,
26,
4235,
25,
29412,
26,
33793,
12,
8658,
82,
12,
14171,
25,
18038,
26,
269,
12,
35487,
12,
28968,
25,
362,
26,
7400,
12,
10394,
25,
362,
... | 2.736264 | 91 |
import pytest
import random
from asyncio_queue_rotation import RotationQueue
@pytest.mark.asyncio
@pytest.mark.asyncio
@pytest.mark.asyncio
| [
11748,
12972,
9288,
198,
11748,
4738,
198,
6738,
30351,
952,
62,
36560,
62,
10599,
341,
1330,
371,
14221,
34991,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
628,
198,
31,
9078,
9288,
13,
4102,
13,
292,
13361,
952,
628,
... | 2.754717 | 53 |
# -*- coding: utf-8 -*-
"""Helper functions and classes for the LTA CLI and parser."""
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
47429,
5499,
290,
6097,
329,
262,
406,
5603,
43749,
290,
30751,
526,
15931,
198
] | 3 | 29 |
from django import forms
from cal.models import CFPBCalendar
from sheerlike.templates import get_date_string
from v1.util.util import ERROR_MESSAGES
| [
6738,
42625,
14208,
1330,
5107,
198,
198,
6738,
2386,
13,
27530,
1330,
327,
5837,
2749,
282,
9239,
198,
6738,
15163,
2339,
13,
11498,
17041,
1330,
651,
62,
4475,
62,
8841,
198,
6738,
410,
16,
13,
22602,
13,
22602,
1330,
33854,
62,
44,... | 3.229167 | 48 |
# coding: utf-8
# # Filter By Sequence Regex Demo
#
# This example shows how to filter poteins by their sequence regualar expression.
#
# [More about regular expression](https://www.regular-expressions.info)
#
# ## Imports
# In[1]:
from pyspark import SparkConf, SparkContext
from mmtfPyspark.io import mmtfReader
from mmtfPyspark.filters import ContainsSequenceRegex
from mmtfPyspark.structureViewer import view_group_interaction
# ## Configure Spark
# In[2]:
conf = SparkConf().setMaster("local[*]") .setAppName("FilterBySequenceRegex")
sc = SparkContext(conf = conf)
# ## Read in MMTF Files
# In[3]:
path = "../../resources/mmtf_reduced_sample/"
pdb = mmtfReader.read_sequence_file(path, sc)
# ## Filter by sequence regular expression
#
# #### Zinc Finger Motif regular expression: C.{2,4}C.{12}H.{3,5}H
#
# <img src="./figures/ZincFingerMotif.png" style="width: 300px;"/>
# In[8]:
structures = pdb.filter(ContainsSequenceRegex("C.{2,4}C.{12}H.{3,5}H"))
# ## Count number of entires
# In[9]:
count = structures.count()
print(f"Number of entries containing Zinc figure motif is : {count}")
# ## Visualize Structure Zinc interactions
# In[12]:
structure_names = structures.keys().collect()
view_group_interaction(structure_names, 'ZN', style='line')
# ## Terminate Spark
# In[13]:
sc.stop()
| [
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
1303,
25853,
2750,
45835,
797,
25636,
34588,
198,
2,
220,
198,
2,
770,
1672,
2523,
703,
284,
8106,
279,
1258,
1040,
416,
511,
8379,
842,
723,
283,
5408,
13,
220,
198,
2,
220,
198... | 2.704365 | 504 |
# Leetcode 57. Insert Interval
#
# Link: https://leetcode.com/problems/insert-interval/
# Difficulty: Medium
# Solution using DP.
# Complexity:
# O(N) time | where N represent the number of intervals
# O(1) space
| [
2,
1004,
316,
8189,
7632,
13,
35835,
4225,
2100,
198,
2,
198,
2,
7502,
25,
3740,
1378,
293,
316,
8189,
13,
785,
14,
1676,
22143,
14,
28463,
12,
3849,
2100,
14,
198,
2,
27419,
25,
13398,
198,
198,
2,
28186,
1262,
27704,
13,
198,
... | 2.906667 | 75 |
# This Python file uses the following encoding: utf-8
# ##############################################################################
# KOLIBRI CLOUD API DAEMON EXAMPLE WITH ACCESS TOKEN #
# #
# Needs: #
# - Python 2.7 (Untested with Python 3.x) #
# - urllib2 #
# #
# It is necessary to have a valid ACCESS_TOKEN. #
# Please ask KELLER to provide a valid ACCESS_TOKEN! #
# #
# DateTime format from the API are always in UTC #
# Pressure values from the API are always in bar #
# Temperature values from the API are always in °C #
##############################################################################
import json
import logging
import os
import sys
import urllib2
from datetime import timedelta
import datetime
# client specific access token
ACCESS_TOKEN = "___modified___sVKKoibnUftMMkZlB9dFHFfWDoCDgu4wYSDvX3jXs16n+LJkpHcjDbdnObLVByQxn67yG/dczWMYrIjNd/s3qHyAAAAAMNB4dy+qNxTrW6TUVa/qk6/5esIKLuZbKG5D5eM34kpANDLOJzhcpBaOnZoNSvQgA==" # represents the user
def turn_on_logging_of_imported_libraries():
""" Use this to get more log information from the ADAL library and the urllib2
For debug purpose.
"""
logging.basicConfig(level=logging.DEBUG)
def get_data(_endpoint, _access_token):
"""
With the correct Access Token one can get data from the API.
The API specification can be seen here:
https://api.kolibricloud.ch/swagger/index.html?url=/swagger/v1/swagger.json
"""
base_url = "https://api.kolibricloud.ch"
url = base_url + _endpoint
req = urllib2.Request(url)
req.add_header("userOid",_access_token)
try:
response = urllib2.urlopen(req)
except urllib2.HTTPError, e: print e.headers
try:
html = response.read()
json_obj = json.loads(html)
except UnboundLocalError:
print("Could not find data in "+_endpoint+"\n")
json_obj = ""
return json_obj
#uncomment for verbose log
#turn_on_logging_of_imported_libraries()
# with the access token you can access the data from the API without a bearer token
# here are some examples
endpoint = "/v1/Devices"
print("Here are the list of all devices:")
data1 = get_data(endpoint, ACCESS_TOKEN)
print(json.dumps(data1, indent=2))
print("----------------------------------------------------"+"\n")
my_device = data1["devices"][0]["id"] # or better use the specific device id my_device = 1234 # must be a device id that is in the set of accessible devices
endpoint = "/v1/Devices/"+str(my_device)
data2 = get_data(endpoint, ACCESS_TOKEN)
print("Here are information of device "+str(my_device)+":")
print("(If you do not own #"+str(my_device)+" this will break here.)")
print(json.dumps(data2, indent=2))
print("----------------------------------------------------"+"\n")
print('Last measurement time was: '+data2['lastMeasurementTransmissionDateTime'])
# data3 is the JSON data from device "my_device" of the channel with the id 8 which is TBaro in [° C] of the last 12 hours
measurementDefinitionId = 8 # is TBaro (Air Temperature) / See get_measurementDefinitionId_LookUpTable()
data3 = get_data_measurements_from_timespan(12, measurementDefinitionId, my_device, ACCESS_TOKEN)
measurementDefinitionIds = get_measurementDefinitionId_LookUpTable()
print("Measurements of " + measurementDefinitionIds[measurementDefinitionId]+" : ")
print(json.dumps(data3, indent=2))
print("----------------------------------------------------"+"\n")
print("The list of the device ids and their channels: ")
for each_device in data1['devices']:
endpoint = "/v1/Devices/"+str(each_device['id'])
data = get_data(endpoint, ACCESS_TOKEN)
all_channels_of_this_device = []
for each_channel in data['measurementDefinitions']:
all_channels_of_this_device.append(str(each_channel['id'])+":"+each_channel['name'])
all_channels_of_this_device = [str(all_channels_of_this_device[x]) for x in range(len(all_channels_of_this_device))] #just prettify the texts
print("#"+str(each_device['id'])+" has measurement channels: "+str(all_channels_of_this_device))
| [
2,
770,
11361,
2393,
3544,
262,
1708,
21004,
25,
3384,
69,
12,
23,
198,
2,
1303,
29113,
29113,
7804,
4242,
2,
198,
2,
509,
3535,
9865,
7112,
7852,
2606,
35,
7824,
17051,
3620,
1340,
7788,
2390,
16437,
13315,
15859,
7597,
5390,
43959,
... | 2.339113 | 1,961 |
#!/usr/bin/env python3
'''
File: template_filters.py
Author: Zachary King
Implements custom Django template filters to be used
in Django templates.
'''
from django.template.defaultfilters import register
from datetime import date, datetime
from decimal import Decimal
@register.simple_tag
@register.simple_tag
@register.filter
def get_item(dictionary, key):
'''Custom filter for getting a value from a dictionary
inside a Django template. To use in a template:
`{{ my_dict|get_item:item.NAME }}'''
return dictionary.get(key)
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.simple_tag
@register.simple_tag
def transaction_class(transaction):
'''Returns a Bootstrap class string for a Transaction'''
if transaction.amount == 0:
return 'default'
is_income = transaction.category.is_income
if is_income:
if transaction.amount >= 0:
return 'success'
return 'danger'
else:
if transaction.amount >= 0:
return 'danger'
return 'success'
@register.simple_tag
def shorten_string(string, limit):
'''Returns the string, shortened to limit chars and with '...' appended'''
if len(string) <= limit:
return string
return string[:limit-3] + '...'
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
7061,
6,
198,
8979,
25,
11055,
62,
10379,
1010,
13,
9078,
198,
13838,
25,
18825,
560,
2677,
198,
198,
3546,
1154,
902,
2183,
37770,
11055,
16628,
284,
307,
973,
198,
259,
37770,
... | 2.838013 | 463 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Visualize validation curve."""
import pandas as pd
import matplotlib.pyplot as plt
Y_SCALE_FACTOR = 100
# Prepare dataframe
df = pd.read_csv('log.csv', sep=';')
df = df[['epoch', 'acc', 'val_acc']]
df[['acc', 'val_acc']] = df[['acc', 'val_acc']] * Y_SCALE_FACTOR
df = df.set_index('epoch').rename(columns={'acc': 'Training Accuracy',
'val_acc': 'Validation Accuracy'})
print(df)
# Plot
fig, ax = plt.subplots()
df.plot.line(ylim=(0.65 * Y_SCALE_FACTOR, 0.85 * Y_SCALE_FACTOR),
title='Validation Curve',
ax=ax)
ax.minorticks_on() # required for minor grid
ax.grid()
ax.grid(which='minor', linestyle=':', linewidth='0.5', color='black')
plt.savefig('validation-curve.png', dpi=300)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
36259,
1096,
21201,
12133,
526,
15931,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
2603,
29487,
... | 2.132626 | 377 |
# Copyright 2014, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
amqp1_opts = [
cfg.StrOpt('server_request_prefix',
default='exclusive',
deprecated_group='amqp1',
help="address prefix used when sending to a specific server"),
cfg.StrOpt('broadcast_prefix',
default='broadcast',
deprecated_group='amqp1',
help="address prefix used when broadcasting to all servers"),
cfg.StrOpt('group_request_prefix',
default='unicast',
deprecated_group='amqp1',
help="address prefix when sending to any server in group"),
cfg.StrOpt('container_name',
default=None,
deprecated_group='amqp1',
help='Name for the AMQP container'),
cfg.IntOpt('idle_timeout',
default=0, # disabled
deprecated_group='amqp1',
help='Timeout for inactive connections (in seconds)'),
cfg.BoolOpt('trace',
default=False,
deprecated_group='amqp1',
help='Debug: dump AMQP frames to stdout'),
cfg.StrOpt('ssl_ca_file',
default='',
deprecated_group='amqp1',
help="CA certificate PEM file for verifing server certificate"),
cfg.StrOpt('ssl_cert_file',
default='',
deprecated_group='amqp1',
help='Identifying certificate PEM file to present to clients'),
cfg.StrOpt('ssl_key_file',
default='',
deprecated_group='amqp1',
help='Private key PEM file used to sign cert_file certificate'),
cfg.StrOpt('ssl_key_password',
default=None,
deprecated_group='amqp1',
help='Password for decrypting ssl_key_file (if encrypted)'),
cfg.BoolOpt('allow_insecure_clients',
default=False,
deprecated_group='amqp1',
help='Accept clients using either SSL or plain TCP')
]
| [
2,
220,
220,
220,
15069,
1946,
11,
2297,
10983,
11,
3457,
13,
198,
2,
198,
2,
220,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
220,
220,
220,
407,
779,
428,
23... | 2.263972 | 1,163 |
"""Datasafe module for the labinform package.
The datasafe is a key feature of labinform which serves to safely store data.
Functionality includes directory generation and checksum creation.
"""
import os
import hashlib
import shutil
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class NoSuchDirectoryError(Error):
"""Raised when an invalid path is set."""
pass
class IncorrectLoiError(Error):
"""Raised when an incorrect loi is provided."""
pass
class DirNamesAreNotIntsError(Error):
"""Raised when it is tried to increment non numeric dir names."""
class Datasafe:
"""Data handler for moving data in the context of a datasafe.
The operations performed include generation of a directory structure,
storing data in and retrieving data from these directories as well
verifying the integrity of and providing general information about the
data stored.
"""
def set_path(self, path=""):
"""Set the path of the datasafe's top level directory.
The directory is set as path if it exists.
Parameters
----------
path: :class: `str`
The path that should be set as the instance attribute.
"""
if not self.verify_path(path):
raise NoSuchDirectoryError
self.path = path
@staticmethod
def verify_path(path=""):
"""Verify if a path is correct.
Static method which works for any path not just the datasafe root
path.
Parameters
----------
path: :class: `str`
path that should be checked
Returns
-------
path_okay: :class: `bool`
result opf the path check
"""
path_okay = os.path.isdir(path)
return path_okay
def verify_own_path(self):
"""Verify if the path set as instance attribute is a correct path.
Wrapper around :method: `verify_path` specifically for checking the
root path of the datasafe.
Returns
-------
path_okay: :class: `bool`
result opf the path check
"""
path_okay = self.verify_path(self.path)
return path_okay
def loi_to_path(self, loi=""):
"""Retrieve the a file's datasafe directory path from the data's loi.
Retrieves the data's path (relative to the datasafe's root path) which
is included in the loi. If the loi is not correctly formatted, an
exception is raised.
Parameters
----------
loi: :class: `str`
loi from which the path should be retrieved
Returns
-------
path: :class: `str`
path retrieved from the loi
"""
path = self.path
loi_parts = loi.split("/")
if len(loi_parts) != 7:
raise IncorrectLoiError
loi_parts_useful = loi_parts[2:]
for part in loi_parts_useful:
path += "/"
path += part
return path
def add_directory(self, path):
"""Create a directory at a specified path
Parameters
----------
path: :class: `str`
path of the directory that should be created
"""
if not os.path.exists(path):
os.makedirs(path)
@staticmethod
def dir_empty(path=""):
"""Check whether a directory is empty.
Parameters
----------
path: :class: `str`
path of the directory which should be checked
"""
try:
dir_content = os.listdir(path)
except FileNotFoundError:
raise NoSuchDirectoryError
return dir_content == list()
@staticmethod
@staticmethod
def increment(number=0):
"""Increment an integer by one.
Parameters
----------
number: :class: `int`
integer that should be incremented
"""
incremented = number + 1
return incremented
@staticmethod
def find_highest(path=""):
"""Find a numbered directory with the highest number.
For a given path, find the numbered directory (i.e. directory with an
integer as name) with the highest number. If the directory that the
path leads to doesn't exist, if it is empty or if the subdirectories
are not 'numbered' an error is raised.
..Todo: What happens, when there are 'numbered' _files_ in the dir?
Parameters
----------
path: :class: `str`
path of the directory that should be searched
"""
try:
dir_content = os.listdir(path)
except FileNotFoundError:
raise NoSuchDirectoryError
dir_names = list()
for entry in dir_content:
try:
dir_name = int(entry)
dir_names.append(dir_name)
except ValueError:
pass
if dir_names == list():
return 0
#raise DirNamesAreNotIntsError
else:
highest = max(dir_names)
return highest
def generate(self, experiment="", sample_id=""):
"""Generate directory structure and return identifier.
This method will verify to what extent the relevant directory structure
is present and create directories as required. In this context the
measurement number for a given sample is - in case of consecutive
measurements - automatically increased.
Finally the method will return a unique identifier for the respective
measurement and sample, including the directory path.
Parameters
----------
experiment: :class: `str`
type of experiment performed, e.g. 'cwepr'
sample_id: :class: `str`
unique identifier for the sample measured
Returns
-------
loi: :class: `str`
unique loi including the information provided
"""
loi_basic = "42.1001/ds/"
path_for_loi = str()
path_for_loi = os.path.join(path_for_loi, experiment)
dir_path = os.path.join(self.path, path_for_loi)
if not self.has_dir(dir_path):
self.add_directory(dir_path)
path_for_loi = os.path.join(path_for_loi, sample_id)
dir_path = os.path.join(self.path, path_for_loi)
if not self.has_dir(dir_path):
self.add_directory(dir_path)
if self.dir_empty(dir_path):
path_for_loi = os.path.join(path_for_loi, "1")
dir_path = os.path.join(self.path, path_for_loi)
else:
number = str(self.increment(self.find_highest(dir_path)))
path_for_loi = os.path.join(path_for_loi, number)
dir_path = os.path.join(self.path, path_for_loi)
if not self.has_dir(dir_path):
self.add_directory(dir_path)
path_for_loi = os.path.join(path_for_loi, "data")
dir_path = os.path.join(self.path, path_for_loi)
if not self.has_dir(dir_path):
self.add_directory(dir_path)
path_for_loi = os.path.join(path_for_loi, "raw")
dir_path = os.path.join(self.path, path_for_loi)
if not self.has_dir(dir_path):
self.add_directory(dir_path)
loi_complete = loi_basic + path_for_loi
return loi_complete
def push(self, data="", loi=""):
"""Move data inside the datasafe.
Before moving the existence of the target directory (as specified in
the loi) as well as its emptiness are verified.
Parameters
----------
data: :class: `str`
data (file) to be moved
loi: :class: `str`
unique identifier providing a directory path
"""
target_path = self.loi_to_path(loi)
print(target_path)
shutil.copy(data, target_path)
def pull(self, loi=""):
"""Retrieve data from the datasafe.
Retrieves data from the datasafe if present at the target directory
(as specified in the loi), raises an exception otherwise.
Parameters
----------
loi: :class: `str`
unique identifier for the data to be retrieved
Returns
-------
xxxxxx: :class: `str`
retrieved data
"""
return str()
def index(self, loi=""):
"""Retrieve background information from the datasafe.
Retrieves background information (manifest.yaml file) from the datasafe
if present at the target directory (as specified in the loi), raises
an exception otherwise.
Parameters
----------
loi: :class: `str`
unique identifier for the data for which the background information
should be retrieved.
Returns
-------
xxxxxx: :class: `str`
retrieved background information (manifest.yaml) as dict
"""
return dict()
def checksum(self, loi=""):
"""Create a cryptographic hash (MD5) for a file in the datasafe.
Creates a checksum for a file in the datasafe if present at the target
directory (as specified in the loi), raises an exception otherwise.
Parameters
----------
loi: :class: `str`
unique identifier for the data (file) for which a checksum should
be created
Returns
-------
xxxxxx: :class: `str`
checksum (MD5)
"""
return str()
def moveto(self, data="", experiment="", sample_id=""):
"""Prepare directory and move data there.
This is a wrapper function which calls :method:`generate` to generate
a directory structure if necessary and creates a local checksum of
the file to be moved. Then moves the file to the datasafe, creates
another checksum.
The two checksums are compared and the result of the comparison is
returned.
Parameters
----------
data: :class: `str`
data (file) that should be moved inside the datasafe.
experiment: :class: `str`
type of experiment performed, e.g. 'cwepr'
sample_id: :class: `str`
unique identifier for the sample measured
Returns
-------
xxxxx: :class: `bool`
result of the checksum comparison
"""
return True
| [
37811,
27354,
292,
8635,
8265,
329,
262,
2248,
259,
687,
5301,
13,
198,
198,
464,
19395,
8635,
318,
257,
1994,
3895,
286,
2248,
259,
687,
543,
9179,
284,
11512,
3650,
1366,
13,
198,
22203,
1483,
3407,
8619,
5270,
290,
8794,
388,
6282,... | 2.364252 | 4,431 |
from flask import Flask, render_template, url_for, request
import numpy as np
from sklearn.externals import joblib
app = Flask(__name__)
filename = './data/model.sav'
model = joblib.load(filename)
@app.route("/")
@app.route("/index.html")
@app.route("/", methods=['POST'])
| [
6738,
42903,
1330,
46947,
11,
8543,
62,
28243,
11,
19016,
62,
1640,
11,
2581,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
1341,
35720,
13,
1069,
759,
874,
1330,
1693,
8019,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
198,
198,
3... | 2.816327 | 98 |
###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2018, John McNamara, jmcnamara@cpan.org
#
import unittest
from ...compatibility import StringIO
from ..helperfunctions import _xml_to_list
from ...worksheet import Worksheet
from ...sharedstrings import SharedStringTable
class TestAssembleWorksheet(unittest.TestCase):
"""
Test assembling a complete Worksheet file.
"""
def test_assemble_xml_file(self):
"""Test writing a worksheet with data out of bounds."""
self.maxDiff = None
fh = StringIO()
worksheet = Worksheet()
worksheet._set_filehandle(fh)
worksheet.str_table = SharedStringTable()
worksheet.select()
max_row = 1048576
max_col = 16384
bound_error = -1
# Test some out of bound values.
got = worksheet.write_string(max_row, 0, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_string(0, max_col, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_string(max_row, max_col, 'Foo')
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, 0, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(0, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_number(max_row, max_col, 123)
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, 0, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_blank(0, max_col, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_blank(max_row, max_col, None, 'format')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(0, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_formula(max_row, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, 0, max_col, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, 0, max_row, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(0, max_col, 0, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, 0, 0, 0, '=A1')
self.assertEqual(got, bound_error)
got = worksheet.write_array_formula(max_row, max_col, max_row, max_col, '=A1')
self.assertEqual(got, bound_error)
# Column out of bounds.
got = worksheet.set_column(6, max_col, 17)
self.assertEqual(got, bound_error)
got = worksheet.set_column(max_col, 6, 17)
self.assertEqual(got, bound_error)
# Row out of bounds.
worksheet.set_row(max_row, 30)
# Reverse man and min column numbers
worksheet.set_column(0, 3, 17)
# Write some valid strings.
worksheet.write_string(0, 0, 'Foo')
worksheet.write_string(2, 0, 'Bar')
worksheet.write_string(2, 3, 'Baz')
worksheet._assemble_xml_file()
exp = _xml_to_list("""
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<worksheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main" xmlns:r="http://schemas.openxmlformats.org/officeDocument/2006/relationships">
<dimension ref="A1:D3"/>
<sheetViews>
<sheetView tabSelected="1" workbookViewId="0"/>
</sheetViews>
<sheetFormatPr defaultRowHeight="15"/>
<cols>
<col min="1" max="4" width="17.7109375" customWidth="1"/>
</cols>
<sheetData>
<row r="1" spans="1:4">
<c r="A1" t="s">
<v>0</v>
</c>
</row>
<row r="3" spans="1:4">
<c r="A3" t="s">
<v>1</v>
</c>
<c r="D3" t="s">
<v>2</v>
</c>
</row>
</sheetData>
<pageMargins left="0.7" right="0.7" top="0.75" bottom="0.75" header="0.3" footer="0.3"/>
</worksheet>
""")
got = _xml_to_list(fh.getvalue())
self.assertEqual(got, exp)
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
198,
2,
30307,
329,
1395,
7278,
87,
34379,
13,
198,
2,
198,
2,
15069,
357,
66,
828,
2211,
12,
7908,
11,
1757,
22586,
47848,
11,
474,
23209,
7402,
3301,
31,
66,
6839,
13,
2398,
198,
2,
198,
... | 1.983857 | 2,354 |
import pytest
@pytest.fixture(scope='module')
def test_module_scope_simple_fixture(module_scope_simple_fixture):
"""
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_module_scope_simple_fixture',
... has_container(allure_report,
... has_before('module_scope_simple_fixture')
... )
... )
... )
"""
pass
def test_reuse_module_scope_simple_fixture(module_scope_simple_fixture):
"""
>>> allure_report = getfixture('allure_report')
>>> assert_that(allure_report,
... has_test_case('test_reuse_module_scope_simple_fixture',
... has_container(allure_report,
... has_before('module_scope_simple_fixture')
... )
... )
... )
>>> assert_that(allure_report,
... has_same_container('test_module_scope_simple_fixture',
... 'test_reuse_module_scope_simple_fixture',
... has_before('module_scope_simple_fixture')
... )
... )
"""
pass
| [
11748,
12972,
9288,
628,
198,
31,
9078,
9288,
13,
69,
9602,
7,
29982,
11639,
21412,
11537,
628,
198,
4299,
1332,
62,
21412,
62,
29982,
62,
36439,
62,
69,
9602,
7,
21412,
62,
29982,
62,
36439,
62,
69,
9602,
2599,
198,
220,
220,
220,
... | 1.816689 | 731 |