content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 19 18:35:42 2018
@author: Chat
"""
import pip
def install(): # Run this to install the matplotlib dependency.
pip.main(['install', 'matplotlib'])
import matplotlib.pyplot as plt
import numpy as np
import matplotlib
import praw
import datetime
# Fixing random state for reproducibility
def hour_to_count(y, hours_and_count):
for x in y:
hours_and_count[x] = (y.count(x))
while x in y:
y.remove(x)
return y
reddit = praw.Reddit(client_id='ID',
client_secret='SECRET',
password='REDDIT_PASSWORD',
user_agent='USER_AGENT',
username='USERNAME')
submissions = []
keys = []
values = []
y = []
hours_and_count = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0,15:0,16:0,17:0,18:0,19:0,20:0,21:0,22:0,23:0}
SUBREDDIT = 'Python'
LIMIT = 1000
subreddit = reddit.subreddit(SUBREDDIT)
for submission in subreddit.new(limit=LIMIT):
submissions.append(vars(submission)) #Converts Reddit Post Objects to Dicts. Makes it easier to analyze
i = len(submissions) - 1
while i >= 0:
y.append(int(datetime.datetime.fromtimestamp(int(submissions[i]['created_utc'])).strftime('%H')))
i -= 1
hour_to_count(y, hours_and_count)
if len(y) > 0:
hour_to_count(y, hours_and_count)
s = 100
for key in hours_and_count:
keys.append(key)
for x in hours_and_count.values():
values.append(x)
plt.scatter(keys, values, s, c="b", alpha=0.5)
plt.xlabel("Time")
plt.ylabel("Number of Posts")
plt.legend(loc=2)
plt.show()
if sum(values) == len(submissions):
print("Data is valid")
input("Press any key to exit")
else:
print("Data does not add up to the limit. Check limit and subreddit")
input("Press any key to exit")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: movie_catalogue.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='movie_catalogue.proto',
package='grpc_demo',
syntax='proto3',
serialized_options=b'Z\006.;main',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x15movie_catalogue.proto\x12\tgrpc_demo\"D\n\nCastMember\x12\x11\n\tcharacter\x18\x01 \x01(\t\x12\x11\n\tfirstName\x18\x02 \x01(\t\x12\x10\n\x08lastName\x18\x03 \x01(\t\"\x90\x01\n\x05Movie\x12\r\n\x05title\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t\x12\x16\n\x0eproductionYear\x18\x03 \x01(\x05\x12\r\n\x05genre\x18\x04 \x01(\t\x12\x10\n\x08\x64uration\x18\x05 \x01(\x05\x12*\n\x0b\x63\x61stMembers\x18\x06 \x03(\x0b\x32\x15.grpc_demo.CastMember\"2\n\x0eMovieCatalogue\x12 \n\x06movies\x18\x01 \x03(\x0b\x32\x10.grpc_demo.MovieB\x08Z\x06.;mainb\x06proto3'
)
_CASTMEMBER = _descriptor.Descriptor(
name='CastMember',
full_name='grpc_demo.CastMember',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='character', full_name='grpc_demo.CastMember.character', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='firstName', full_name='grpc_demo.CastMember.firstName', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='lastName', full_name='grpc_demo.CastMember.lastName', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=36,
serialized_end=104,
)
_MOVIE = _descriptor.Descriptor(
name='Movie',
full_name='grpc_demo.Movie',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='title', full_name='grpc_demo.Movie.title', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='description', full_name='grpc_demo.Movie.description', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='productionYear', full_name='grpc_demo.Movie.productionYear', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='genre', full_name='grpc_demo.Movie.genre', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='duration', full_name='grpc_demo.Movie.duration', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='castMembers', full_name='grpc_demo.Movie.castMembers', index=5,
number=6, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=107,
serialized_end=251,
)
_MOVIECATALOGUE = _descriptor.Descriptor(
name='MovieCatalogue',
full_name='grpc_demo.MovieCatalogue',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='movies', full_name='grpc_demo.MovieCatalogue.movies', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=303,
)
_MOVIE.fields_by_name['castMembers'].message_type = _CASTMEMBER
_MOVIECATALOGUE.fields_by_name['movies'].message_type = _MOVIE
DESCRIPTOR.message_types_by_name['CastMember'] = _CASTMEMBER
DESCRIPTOR.message_types_by_name['Movie'] = _MOVIE
DESCRIPTOR.message_types_by_name['MovieCatalogue'] = _MOVIECATALOGUE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CastMember = _reflection.GeneratedProtocolMessageType('CastMember', (_message.Message,), {
'DESCRIPTOR' : _CASTMEMBER,
'__module__' : 'movie_catalogue_pb2'
# @@protoc_insertion_point(class_scope:grpc_demo.CastMember)
})
_sym_db.RegisterMessage(CastMember)
Movie = _reflection.GeneratedProtocolMessageType('Movie', (_message.Message,), {
'DESCRIPTOR' : _MOVIE,
'__module__' : 'movie_catalogue_pb2'
# @@protoc_insertion_point(class_scope:grpc_demo.Movie)
})
_sym_db.RegisterMessage(Movie)
MovieCatalogue = _reflection.GeneratedProtocolMessageType('MovieCatalogue', (_message.Message,), {
'DESCRIPTOR' : _MOVIECATALOGUE,
'__module__' : 'movie_catalogue_pb2'
# @@protoc_insertion_point(class_scope:grpc_demo.MovieCatalogue)
})
_sym_db.RegisterMessage(MovieCatalogue)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
nilq/baby-python
|
python
|
__all__ = ['atmospheric']
from . import atmospheric
|
nilq/baby-python
|
python
|
from spaceone.core.service import *
__all__ = ['HelloWorldService']
@authentication_handler
@authorization_handler
@event_handler
class HelloWorldService(BaseService):
@transaction
@check_required(['name'])
def say_hello(self, params):
helloworld_mgr = self.locator.get_manager('HelloWorldManager')
return helloworld_mgr.say_hello(params['name'])
|
nilq/baby-python
|
python
|
schema = """
CREATE TABLE IF NOT EXISTS ratings (
rating_id INTEGER PRIMARY KEY, name TEXT UNIQUE, league TEXT, year TEXT, home_advantage REAL, r_squared REAL, consistency REAL, games_played INTEGER, games_scheduled INTEGER, description TEXT, finished INTEGER );
CREATE TABLE IF NOT EXISTS teams (
rating_id INTEGER, team_id INTEGER, name TEXT, wins INTEGER, losses INTEGER, rating REAL, rank INTEGER, strength_of_schedule_past REAL, strength_of_schedule_future REAL, strength_of_schedule_all REAL, expected_wins INTEGER, expected_losses INTEGER, offense REAL, defense REAL, offense_rank INTEGER, defense_rank INTEGER );
CREATE TABLE IF NOT EXISTS games (
rating_id INTEGER, team_id INTEGER, opponent_id INTEGER, points_for INTEGER, points_against INTEGER, result TEXT, date TEXT, location TEXT, normalized_score REAL, weight REAL, win_probability REAL );
"""
|
nilq/baby-python
|
python
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class ConfigurationApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_configuration(self, **kwargs):
"""
Get configuration by name. Available : - PLATFORM_NAME - ACTIVE - LANG_DEFAULT - CURRENCY_DEFAULT - COUNTRY_DEFAULT - TIMEZONE - COPYRIGHT - COOKIE_WARNING - RECAPTCHA_KEY - CUSTOMER_REGISTRATION - CATALOG_RESTRICTED - CATALOG_SUBSCRIPTION - PRODUCTS_ORDER_BY - PRODUCTS_ORDER_WAY - PRODUCTS_RAIL_NB - PRODUCTS_NEW_DAYS - FORCE_TAX_ID - CMS_CONDITIONS_ID - GEOLOCATION_WHITELIST - PASSWORD_MIN_LENGTH - PASSWORD_MIN_CAPITALIZE - PASSWORD_MIN_NUMERIC - PASSWORD_MIN_SPECIAL
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: ConfigurationList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_with_http_info(**kwargs)
else:
(data) = self.get_configuration_with_http_info(**kwargs)
return data
def get_configuration_with_http_info(self, **kwargs):
"""
Get configuration by name. Available : - PLATFORM_NAME - ACTIVE - LANG_DEFAULT - CURRENCY_DEFAULT - COUNTRY_DEFAULT - TIMEZONE - COPYRIGHT - COOKIE_WARNING - RECAPTCHA_KEY - CUSTOMER_REGISTRATION - CATALOG_RESTRICTED - CATALOG_SUBSCRIPTION - PRODUCTS_ORDER_BY - PRODUCTS_ORDER_WAY - PRODUCTS_RAIL_NB - PRODUCTS_NEW_DAYS - FORCE_TAX_ID - CMS_CONDITIONS_ID - GEOLOCATION_WHITELIST - PASSWORD_MIN_LENGTH - PASSWORD_MIN_CAPITALIZE - PASSWORD_MIN_NUMERIC - PASSWORD_MIN_SPECIAL
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: ConfigurationList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/configuration'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConfigurationList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_configuration_analytics(self, **kwargs):
"""
Get analytics configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_analytics(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: Analytics
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_analytics_with_http_info(**kwargs)
else:
(data) = self.get_configuration_analytics_with_http_info(**kwargs)
return data
def get_configuration_analytics_with_http_info(self, **kwargs):
"""
Get analytics configuration
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_analytics_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: Analytics
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration_analytics" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/configuration/analytics'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Analytics',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_configuration_by_name(self, configuration_name, **kwargs):
"""
Get configuration by name. Available : - LANG_DEFAULT - CURRENCY_DEFAULT - COUNTRY_DEFAULT - TIMEZONE
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_by_name(configuration_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str configuration_name: (required)
:return: Configuration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_by_name_with_http_info(configuration_name, **kwargs)
else:
(data) = self.get_configuration_by_name_with_http_info(configuration_name, **kwargs)
return data
def get_configuration_by_name_with_http_info(self, configuration_name, **kwargs):
"""
Get configuration by name. Available : - LANG_DEFAULT - CURRENCY_DEFAULT - COUNTRY_DEFAULT - TIMEZONE
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_by_name_with_http_info(configuration_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str configuration_name: (required)
:return: Configuration
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['configuration_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'configuration_name' is set
if ('configuration_name' not in params) or (params['configuration_name'] is None):
raise ValueError("Missing the required parameter `configuration_name` when calling `get_configuration_by_name`")
collection_formats = {}
resource_path = '/configuration/{configuration_name}'.replace('{format}', 'json')
path_params = {}
if 'configuration_name' in params:
path_params['configuration_name'] = params['configuration_name']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Configuration',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_configuration_logo(self, **kwargs):
"""
Get logo settings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_logo(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: LogoSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_logo_with_http_info(**kwargs)
else:
(data) = self.get_configuration_logo_with_http_info(**kwargs)
return data
def get_configuration_logo_with_http_info(self, **kwargs):
"""
Get logo settings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_logo_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: LogoSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration_logo" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/configuration/logo'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogoSettings',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_configuration_social(self, **kwargs):
"""
Get social networks settings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_social(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: SocialSettings
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_configuration_social_with_http_info(**kwargs)
else:
(data) = self.get_configuration_social_with_http_info(**kwargs)
return data
def get_configuration_social_with_http_info(self, **kwargs):
"""
Get social networks settings
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_configuration_social_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: SocialSettings
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_configuration_social" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/configuration/social'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='SocialSettings',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.2 on 2018-04-11 18:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0021_auto_20180209_0935"),
]
operations = [
migrations.AlterField(
model_name="referencepaymentbatch",
name="service_identifier",
field=models.CharField(blank=True, max_length=9, verbose_name="service identifier"),
),
]
|
nilq/baby-python
|
python
|
import os
import cv2
import time
def convertImg(Path):
# Read in the image
img = cv2.imread(Path)
# Convert the image to grayscale
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Invert the grayscale image
inverted_gray_image = cv2.bitwise_not(gray_image)
# blur the image by gaussian function
blurred_image = cv2.GaussianBlur(inverted_gray_image, (21, 21), 0)
# Invert the image
inverted_blurred_image = cv2.bitwise_not(blurred_image)
# create the pencil sketch
pencil_sketch_img = cv2.divide(gray_image, inverted_blurred_image, scale=256.0)
# download cache pencil image
if os.path.exists("History") == False:
os.mkdir("History")
# time.time()-> time stamp ---->it generate epoch or Unix time
getPath = "History/pencil_" + str(int(time.time())) + ".jpg"
cv2.imwrite(getPath, pencil_sketch_img)
return getPath
|
nilq/baby-python
|
python
|
import torch
import typing
import numpy as np
from pathlib import Path
from torchvision import datasets
from sklearn import model_selection
from quince.library.datasets import utils
class HCMNIST(datasets.MNIST):
def __init__(
self,
root: str,
gamma_star: float,
split: str = "train",
mode: str = "mu",
p_u: str = "bernoulli",
theta: float = 4.0,
beta: float = 0.75,
sigma_y: float = 1.0,
domain: float = 2.0,
seed: int = 1331,
transform: typing.Optional[typing.Callable] = None,
target_transform: typing.Optional[typing.Callable] = None,
download: bool = True,
) -> None:
train = split == "train" or split == "valid"
root = Path.home() / "quince_datasets" if root is None else Path(root)
self.__class__.__name__ = "MNIST"
super(HCMNIST, self).__init__(
root,
train=train,
transform=transform,
target_transform=target_transform,
download=download,
)
self.data = self.data.view(len(self.targets), -1).numpy()
self.targets = self.targets.numpy()
if train:
(
data_train,
data_valid,
targets_train,
targets_valid,
) = model_selection.train_test_split(
self.data, self.targets, test_size=0.3, random_state=seed
)
self.data = data_train if split == "train" else data_valid
self.targets = targets_train if split == "train" else targets_valid
self.mode = mode
self.dim_input = [1, 28, 28]
self.dim_treatment = 1
self.dim_output = 1
self.phi_model = fit_phi_model(
root=root, edges=torch.arange(-domain, domain + 0.1, (2 * domain) / 10),
)
size = (self.__len__(), 1)
rng = np.random.RandomState(seed=seed)
if p_u == "bernoulli":
self.u = rng.binomial(1, 0.5, size=size).astype("float32")
elif p_u == "uniform":
self.u = rng.uniform(size=size).astype("float32")
elif p_u == "beta_bi":
self.u = rng.beta(0.5, 0.5, size=size).astype("float32")
elif p_u == "beta_uni":
self.u = rng.beta(2, 5, size=size).astype("float32")
else:
raise NotImplementedError(f"{p_u} is not a supported distribution")
phi = self.phi
self.pi = (
utils.complete_propensity(x=phi, u=self.u, gamma=gamma_star, beta=beta)
.astype("float32")
.ravel()
)
self.t = rng.binomial(1, self.pi).astype("float32")
eps = (sigma_y * rng.normal(size=self.t.shape)).astype("float32")
self.mu0 = (
utils.f_mu(x=phi, t=0.0, u=self.u, theta=theta).astype("float32").ravel()
)
self.mu1 = (
utils.f_mu(x=phi, t=1.0, u=self.u, theta=theta).astype("float32").ravel()
)
self.y0 = self.mu0 + eps
self.y1 = self.mu1 + eps
self.y = self.t * self.y1 + (1 - self.t) * self.y0
self.tau = self.mu1 - self.mu0
self.y_mean = np.array([0.0], dtype="float32")
self.y_std = np.array([1.0], dtype="float32")
def __getitem__(self, index):
x = ((self.data[index].astype("float32") / 255.0) - 0.1307) / 0.3081
t = self.t[index : index + 1]
if self.mode == "pi":
return x, t
elif self.mode == "mu":
return np.hstack([x, t]), self.y[index : index + 1]
else:
raise NotImplementedError(
f"{self.mode} not supported. Choose from 'pi' for propensity models or 'mu' for expected outcome models"
)
@property
def phi(self):
x = ((self.data.astype("float32") / 255.0) - 0.1307) / 0.3081
z = np.zeros_like(self.targets.astype("float32"))
for k, v in self.phi_model.items():
ind = self.targets == k
x_ind = x[ind].reshape(ind.sum(), -1)
means = x_ind.mean(axis=-1)
z[ind] = utils.linear_normalization(
np.clip((means - v["mu"]) / v["sigma"], -1.4, 1.4), v["lo"], v["hi"]
)
return np.expand_dims(z, -1)
@property
def x(self):
return ((self.data.astype("float32") / 255.0) - 0.1307) / 0.3081
def fit_phi_model(root, edges):
ds = datasets.MNIST(root=root)
data = (ds.data.float().div(255) - 0.1307).div(0.3081).view(len(ds), -1)
model = {}
digits = torch.unique(ds.targets)
for i, digit in enumerate(digits):
lo, hi = edges[i : i + 2]
ind = ds.targets == digit
data_ind = data[ind].view(ind.sum(), -1)
means = data_ind.mean(dim=-1)
mu = means.mean()
sigma = means.std()
model.update(
{
digit.item(): {
"mu": mu.item(),
"sigma": sigma.item(),
"lo": lo.item(),
"hi": hi.item(),
}
}
)
return model
|
nilq/baby-python
|
python
|
import curses
import curses.ascii
from sciibo.graphics import colors
from .field import Field
class Selection(Field):
def __init__(self, y, x, items, selected=0, on_select=None):
super(Selection, self).__init__(y, x, 1, self.item_width(items))
self.items = items
self.selected = selected
self.on_select = on_select
self.value = items[selected]
self.update()
def item_width(self, items):
return sum(map(len, list(map(str, items)))) + len(items) * 2
def update(self):
if self.active:
self.set_color(colors.FORM_SELECTION_ACTIVE)
else:
self.set_color(colors.FORM_SELECTION)
self.erase()
self.draw_str(0, 1, " ".join(map(str, self.items)))
x = self.item_width(self.items[:self.selected])
color = colors.FORM_SELECTION_ACTIVE_SELECTED if self.active else colors.FORM_SELECTION_SELECTED
self.draw_str(0, x, ' %s ' % self.items[self.selected], color)
def on_key(self, ch):
if ch == curses.KEY_LEFT:
if self.selected > 0:
self.selected -= 1
elif ch == curses.KEY_RIGHT:
if self.selected + 1 < len(self.items):
self.selected += 1
else:
return False
self.value = self.items[self.selected]
self.update()
return True
def on_mouse(self, chain, y, x):
if self not in chain:
return
width = 0
for index, item in enumerate(map(str, self.items)):
width += len(item) + 2
if x < width:
self.selected = index
self.value = self.items[self.selected]
self.update()
return
|
nilq/baby-python
|
python
|
# Docs: https://docs.google.com/document/d/1AVC-4QqkpMBKVUo306-ojkOKmmcJvRSu1AAQjlbxZ7I/edit
def find_max_consecutive_ones(nums) -> int:
counter = 0
max_count = 0
for num in nums: # [1, 1, 0, 1, 1, 1]
if num == 1:
counter += 1 # 3
else:
max_count = counter if counter > max_count else max_count # 2
counter = 0
max_count = counter if counter > max_count else max_count
return max_count
assert find_max_consecutive_ones([1,1,0,1,1,1]) == 3
assert find_max_consecutive_ones([1]) == 1
|
nilq/baby-python
|
python
|
from lightning import Lightning
from sklearn import datasets
lgn = Lightning()
imgs = datasets.load_sample_images()['images']
lgn.imagepoly(imgs[0])
|
nilq/baby-python
|
python
|
from collections import deque
def solution():
data = open(r'inputs\day10.in').readlines()
print('Part 1 result: ' + str(part1(data)))
print('Part 2 result: ' + str(part2(data)))
# number of points for each character for part 1
error_points = {
')': 3,
']': 57,
'}': 1197,
'>': 25137
}
# number of points for each character for part 2
autocomplete_points = {
')': 1,
']': 2,
'}': 3,
'>': 4
}
# closing bracket for each type
closers = {
'{': '}',
'[': ']',
'(': ')',
'<': '>'
}
def part1(data):
# total syntax score
syntax_score = 0
# loop through all the lines
for line in data:
line = line.strip()
# queue (that we use as a stack) to keep track of the brackets
Q = deque()
for c in line:
# if its an opener, we just push it onto the stack
if c == '{' or c == '<' or c == '(' or c == '[':
Q.append(c)
else:
# otherwise, figure out which bracket we're expecting to close with, by checking the closer for the last item in the stack (popping that off at the same time)
expected_closing = closers[Q.pop()]
# if we didn't find the correct closing bracket, we know this line is corrupted
if c != expected_closing:
# so we can simply add the score to the syntax score
syntax_score += error_points[c]
# and break out of the loop to go to the next line
break
return syntax_score
def part2(data):
# track the scores for each line
line_scores = []
for line in data:
line = line.strip()
Q = deque()
# variable to determine if a line is corrupt or not
corrupt = False
for c in line:
# just like before push openers onto the stack
if c == '{' or c == '<' or c == '(' or c == '[':
Q.append(c)
else:
# and pop off the item to check the expected closing bracket
expected_closing = closers[Q.pop()]
if c != expected_closing:
# this time, we ignore this line because its corrupted
corrupt = True
# if its corrupt, we can just continue onto the next line
if corrupt:
continue
# now we've popped down to the end of the line and we know its not corrupt
# track the line score for this line
line_score = 0
# while we still have stuff on the stack (all openers)
while Q:
# pop off the last item
c = Q.pop()
# multiply the score by 5
line_score *= 5
# get the closer for the last item
closer = closers[c]
# and add to the line score the amount of points for the closer
line_score += autocomplete_points[closer]
# then add the line score to the list
line_scores.append(line_score)
# sort the scores
line_scores.sort()
# and return the middle score as the solution
return line_scores[len(line_scores) // 2]
solution()
|
nilq/baby-python
|
python
|
import numpy as np
from scipy.constants import c,h,eV
def dThetadE(E,Q):
""" Calculates the bragg angle derivative to energy at a certain Q and
photon Energy"""
return (-*c*h*Q/eV)/(4*np.pi*E**2*np.sqrt(1-(c**2*h**2*Q**2)/(16*np.pi**2*E**2*eV**2)))
|
nilq/baby-python
|
python
|
from flask import Flask, g, request, session, redirect, url_for ,current_app
from flask_simpleldap import LDAP
from ldap import filter as pyldap_filter
from ldap import LDAPError as pyldap_LDAPError
from ldap import SCOPE_SUBTREE as pyldap_SCOPE_SUBTREE
import sys
#override the get_user_groups() , cause of our openldap group settings
class zdnst_ldap(LDAP):
def get_user_gid(self,user):
query = None
fields = None
conn = self.bind
if user is not None:
fields = '*'
query = pyldap_filter.filter_format(current_app.config['LDAP_USER_OBJECT_FILTER'], (user,))
try:
records = conn.search_s(current_app.config['LDAP_BASE_DN'],pyldap_SCOPE_SUBTREE, query, fields)
conn.unbind_s()
if records:
if current_app.config['LDAP_GROUP_ID_FIELD'] in records[0][1]:
gid = records[0][1][current_app.config['LDAP_GROUP_ID_FIELD']]
return ''.join(gid)
except pyldap_LDAPError as e:
raise pyldap_LDAPError(self.error(e.args))
def get_user_groups(self, user):
conn = self.bind
try:
if current_app.config['LDAP_OPENLDAP']:
fields = \
[str(current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'])]
records = conn.search_s(
current_app.config['LDAP_GROUP_BASE_DN'], pyldap_SCOPE_SUBTREE,
pyldap_filter.filter_format(
current_app.config['LDAP_GROUP_MEMBER_FILTER'],
(self.get_user_gid(user),)),
fields)
conn.unbind_s()
else:
records = []
if records:
if current_app.config['LDAP_OPENLDAP']:
group_member_filter = \
current_app.config['LDAP_GROUP_MEMBER_FILTER_FIELD']
if sys.version_info[0] > 2:
groups = [record[1][group_member_filter][0].decode(
'utf-8') for record in records]
else:
groups = [record[1][group_member_filter][0] for
record in records]
return groups
else:
if current_app.config['LDAP_USER_GROUPS_FIELD'] in \
records[0][1]:
groups = records[0][1][
current_app.config['LDAP_USER_GROUPS_FIELD']]
result = [re.findall(b'(?:cn=|CN=)(.*?),', group)[0]
for group in groups]
if sys.version_info[0] > 2:
result = [r.decode('utf-8') for r in result]
return result
except pyldap_LDAPError as e:
raise LDAPException(self.error(e.args))
app = Flask(__name__)
app.secret_key = 'this is a secret key'
app.debug = True
app.config.from_pyfile('../conf/ldap.conf')
app.config['LDAP_OPENLDAP'] = app.config.get('LDAP_OPENLDAP')
app.config['LDAP_REALM_NAME'] = app.config.get('LDAP_REALM_NAME')
app.config['LDAP_HOST'] = app.config.get('LDAP_HOST')
app.config['LDAP_BASE_DN'] = app.config.get('LDAP_BASE_DN')
app.config['LDAP_USER_BASE_DN'] = app.config.get('LDAP_USER_BASE_DN')
app.config['LDAP_GROUP_BASE_DN'] = app.config.get('LDAP_GROUP_BASE_DN')
app.config['LDAP_USER_OBJECT_FILTER'] = app.config.get('LDAP_USER_OBJECT_FILTER')
# Admin configuration (not allow anonymous)
app.config['LDAP_USERNAME'] = app.config.get('LDAP_USERNAME')
app.config['LDAP_PASSWORD'] = app.config.get('LDAP_PASSWORD')
# Group configuration
app.config['LDAP_GROUP_OBJECT_FILTER'] = app.config.get('LDAP_GROUP_OBJECT_FILTER')
app.config['LDAP_GROUP_MEMBERS_FIELD'] = app.config.get('LDAP_GROUP_MEMBERS_FIELD')
app.config['LDAP_GROUP_ID_FIELD'] = app.config.get('LDAP_GROUP_ID_FIELD')
app.config['LDAP_GROUP_MEMBER_FILTER'] = app.config.get('LDAP_GROUP_MEMBER_FILTER')
app.config['LDAP_GROUP_MEMBER_FILTER_FIELD'] = app.config.get('LDAP_GROUP_MEMBER_FILTER_FIELD')
ldap = zdnst_ldap(app)
@app.before_request
def before_request():
g.user = None
if 'user_id' in session:
# This is where you'd query your database to get the user info.
g.user = {}
# Create a global with the LDAP groups the user is a member of.
g.ldap_groups = ldap.get_user_groups(user=session['user_id'])
@app.route('/')
@ldap.login_required
def index():
return 'Successfully logged in!'
@app.route('/login', methods=['GET', 'POST'])
def login():
if g.user:
return redirect(url_for('index'))
if request.method == 'POST':
user = request.form['user']
passwd = request.form['passwd']
test = ldap.bind_user(user, passwd)
if test is None or passwd == '':
return 'Invalid credentials'
else:
session['user_id'] = request.form['user']
return redirect('/')
return """<form action="" method="post">
user: <input name="user"><br>
password:<input type="password" name="passwd"><br>
<input type="submit" value="Submit"></form>"""
@app.route('/group')
@ldap.group_required(groups=['test','operation'])
def group():
return 'Group restricted page'
@app.route('/g')
def show_group():
if 'user_id' in session:
group = ''.join(ldap.get_user_groups(user=session['user_id']))
return group
else:
return 'login need'
@app.route('/logout')
def logout():
session.pop('user_id', None)
return redirect(url_for('index'))
if __name__ == '__main__':
app.run()
|
nilq/baby-python
|
python
|
import requests
from app import Server
from automl import openml_utils
import pandas as pd
import ray
@ray.remote
def send_example(model_id, features, label):
# Make a prediction.
request = {"model_id": model_id, "features": features}
response = requests.post("http://localhost:8000/models/predict", json=request)
response.raise_for_status()
response = response.json()
prediction_id = response["prediction_id"]
probs = response["probs"]
# Observe the label if provided.
if label:
request = {"prediction_id": prediction_id, "label": label}
response = requests.post("http://localhost:8000/models/train", json=request)
response.raise_for_status()
print(f"prediction_id :: {prediction_id} probs :: {probs} label :: {label}")
return prediction_id, probs, label["class"]
def send_examples(model_id, df, label_col):
results = []
for record in df.to_dict(orient="records"):
record = {k: str(v) for k, v in record.items()}
label = {label_col: record.pop(label_col)}
result = send_example.remote(model_id, record, label)
results.append(result)
return ray.get(results)
def run_demo():
dataset = openml_utils.dataset_from_task(31, 9)
# Create model.
request = {
"feature_schema": {
**{key: "float" for key in dataset.numerical_cols},
**{key: "str" for key in dataset.categorical_cols},
},
"label_column": dataset.label_col,
}
response = requests.post("http://localhost:8000/models/create", json=request)
response.raise_for_status()
model_id = response.json()["model_id"]
print(f"Model id :: {model_id}")
# Train the model.
send_examples(model_id, dataset.train, dataset.label_col)
response = requests.get(f"http://localhost:8000/models/fit?model_id={model_id}")
response.raise_for_status()
# Make predictions.
correct, total = 0, 0
results = send_examples(model_id, dataset.test, dataset.label_col)
for _, probs, label in results:
best_pred, best_prob = None, 0.0
for pred, prob in probs.items():
if prob > best_prob:
best_prob = prob
best_pred = pred
if best_pred == label:
correct += 1
total += 1
print(f"Accuracy :: {correct / total:.2f}")
if __name__ == "__main__":
ray.init(address="auto", namespace="serve")
Server.deploy()
run_demo()
|
nilq/baby-python
|
python
|
# name=Arturia Keylab mkII DAW (MIDIIN2/MIDIOUT2)
# url=https://github.com/rjuang/flstudio-arturia-keylab-mk2
# receiveFrom=Arturia Keylab mkII (MIDI)
import version
from arturia import ArturiaController
from arturia_processor import ArturiaMidiProcessor
import arturia_midi
import config
import ui
WELCOME_DISPLAY_INTERVAL_MS = 1500
# --------------------[ Global state for MIDI Script ] ------------------------------------------
_controller = ArturiaController()
_processor = ArturiaMidiProcessor(_controller)
_payload_buffer = []
# --------------------[ MIDI Script Integration Events for FL Studio ]---------------------------
def OnInit():
global _controller
print('Loaded MIDI script for Arturia Keylab mkII (ver %d)' % version.CHANGE_DATE)
_controller.Sync(0xFFFF)
_controller.paged_display().SetPageLines('welcome', line1='Connected to ', line2=' FL Studio')
_controller.paged_display().SetActivePage('main')
_controller.paged_display().SetActivePage('welcome', expires=WELCOME_DISPLAY_INTERVAL_MS)
ui.setHintMsg('Script version: %d' % version.CHANGE_DATE)
def OnDeInit():
print('Unloaading plugin...')
def OnIdle():
_controller.Idle()
def OnMidiMsg(event):
global _payload_buffer, _processor
if event.status == arturia_midi.INTER_SCRIPT_STATUS_BYTE:
if event.data1 == arturia_midi.INTER_SCRIPT_DATA1_BEGIN_PAYLOAD_CMD:
_payload_buffer = []
elif event.data1 == arturia_midi.INTER_SCRIPT_DATA1_END_PAYLOAD_CMD:
arturia_midi.send_to_device(_payload_buffer)
_payload_buffer = []
elif event.data1 == arturia_midi.INTER_SCRIPT_DATA1_UPDATE_STATE:
if event.data2 == arturia_midi.INTER_SCRIPT_DATA2_STATE_PAD_RECORD_START:
_processor.NotifyPadRecordingState(True)
elif event.data2 == arturia_midi.INTER_SCRIPT_DATA2_STATE_PAD_RECORD_STOP:
_processor.NotifyPadRecordingState(False)
event.handled = True
elif event.status == arturia_midi.PAYLOAD_STATUS_BYTE:
_payload_buffer.append(event.data1)
_payload_buffer.append(event.data2)
event.handled = True
else:
if _processor.ProcessEvent(event):
event.handled = True
_controller.RefreshDisplay()
def OnRefresh(flags):
_controller.Sync(flags)
def OnUpdateBeatIndicator(value):
_controller.metronome().ProcessBeat(value)
|
nilq/baby-python
|
python
|
class Rectangle():
l = 0
b = 0
def _init_(self, *s):
if not len(s):
self.l = 0
self.b = 0
elif len(s) == 1:
self.l = self.b = s[0]
else:
self.l = s[0]
self.b = s[1]
def area(self):
return self.l * self.b
obj1 = Rectangle(5)
print(obj1.area())
obj2 = Rectangle(2)
print(obj2.area())
obj3 = Rectangle(2, 4)
print(obj3.area())
|
nilq/baby-python
|
python
|
# Copyright 2019, The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for QuantileAdaptiveClipSumQuery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow_privacy.privacy.analysis import privacy_ledger
from tensorflow_privacy.privacy.dp_query import quantile_adaptive_clip_sum_query
from tensorflow_privacy.privacy.dp_query import test_utils
tf.compat.v1.enable_eager_execution()
class QuantileAdaptiveClipSumQueryTest(tf.test.TestCase):
def test_sum_no_clip_no_noise(self):
record1 = tf.constant([2.0, 0.0])
record2 = tf.constant([-1.0, 1.0])
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=10.0,
noise_multiplier=0.0,
target_unclipped_quantile=1.0,
learning_rate=0.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
query_result, _ = test_utils.run_query(query, [record1, record2])
result = query_result.numpy()
expected = [1.0, 1.0]
self.assertAllClose(result, expected)
def test_sum_with_clip_no_noise(self):
record1 = tf.constant([-6.0, 8.0]) # Clipped to [-3.0, 4.0].
record2 = tf.constant([4.0, -3.0]) # Not clipped.
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=5.0,
noise_multiplier=0.0,
target_unclipped_quantile=1.0,
learning_rate=0.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
query_result, _ = test_utils.run_query(query, [record1, record2])
result = query_result.numpy()
expected = [1.0, 1.0]
self.assertAllClose(result, expected)
def test_sum_with_noise(self):
record1, record2 = 2.71828, 3.14159
stddev = 1.0
clip = 5.0
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=clip,
noise_multiplier=stddev / clip,
target_unclipped_quantile=1.0,
learning_rate=0.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
noised_sums = []
for _ in xrange(1000):
query_result, _ = test_utils.run_query(query, [record1, record2])
noised_sums.append(query_result.numpy())
result_stddev = np.std(noised_sums)
self.assertNear(result_stddev, stddev, 0.1)
def test_average_no_noise(self):
record1 = tf.constant([5.0, 0.0]) # Clipped to [3.0, 0.0].
record2 = tf.constant([-1.0, 2.0]) # Not clipped.
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipAverageQuery(
initial_l2_norm_clip=3.0,
noise_multiplier=0.0,
denominator=2.0,
target_unclipped_quantile=1.0,
learning_rate=0.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
query_result, _ = test_utils.run_query(query, [record1, record2])
result = query_result.numpy()
expected_average = [1.0, 1.0]
self.assertAllClose(result, expected_average)
def test_average_with_noise(self):
record1, record2 = 2.71828, 3.14159
sum_stddev = 1.0
denominator = 2.0
clip = 3.0
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipAverageQuery(
initial_l2_norm_clip=clip,
noise_multiplier=sum_stddev / clip,
denominator=denominator,
target_unclipped_quantile=1.0,
learning_rate=0.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
noised_averages = []
for _ in range(1000):
query_result, _ = test_utils.run_query(query, [record1, record2])
noised_averages.append(query_result.numpy())
result_stddev = np.std(noised_averages)
avg_stddev = sum_stddev / denominator
self.assertNear(result_stddev, avg_stddev, 0.1)
def test_adaptation_target_zero(self):
record1 = tf.constant([8.5])
record2 = tf.constant([-7.25])
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=10.0,
noise_multiplier=0.0,
target_unclipped_quantile=0.0,
learning_rate=1.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
global_state = query.initial_global_state()
initial_clip = global_state.l2_norm_clip
self.assertAllClose(initial_clip, 10.0)
# On the first two iterations, nothing is clipped, so the clip goes down
# by 1.0 (the learning rate). When the clip reaches 8.0, one record is
# clipped, so the clip goes down by only 0.5. After two more iterations,
# both records are clipped, and the clip norm stays there (at 7.0).
expected_sums = [1.25, 1.25, 0.75, 0.25, 0.0]
expected_clips = [9.0, 8.0, 7.5, 7.0, 7.0]
for expected_sum, expected_clip in zip(expected_sums, expected_clips):
actual_sum, global_state = test_utils.run_query(
query, [record1, record2], global_state)
actual_clip = global_state.l2_norm_clip
self.assertAllClose(actual_clip.numpy(), expected_clip)
self.assertAllClose(actual_sum.numpy(), (expected_sum,))
def test_adaptation_target_one(self):
record1 = tf.constant([-1.5])
record2 = tf.constant([2.75])
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=0.0,
noise_multiplier=0.0,
target_unclipped_quantile=1.0,
learning_rate=1.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
global_state = query.initial_global_state()
initial_clip = global_state.l2_norm_clip
self.assertAllClose(initial_clip, 0.0)
# On the first two iterations, both are clipped, so the clip goes up
# by 1.0 (the learning rate). When the clip reaches 2.0, only one record is
# clipped, so the clip goes up by only 0.5. After two more iterations,
# both records are clipped, and the clip norm stays there (at 3.0).
expected_sums = [0.0, 0.0, 0.5, 1.0, 1.25]
expected_clips = [1.0, 2.0, 2.5, 3.0, 3.0]
for expected_sum, expected_clip in zip(expected_sums, expected_clips):
actual_sum, global_state = test_utils.run_query(
query, [record1, record2], global_state)
actual_clip = global_state.l2_norm_clip
self.assertAllClose(actual_clip.numpy(), expected_clip)
self.assertAllClose(actual_sum.numpy(), (expected_sum,))
def test_adaptation_linspace(self):
# 100 records equally spaced from 0 to 10 in 0.1 increments.
# Test that with a decaying learning rate we converge to the correct
# median with error at most 0.1.
records = [tf.constant(x) for x in np.linspace(
0.0, 10.0, num=21, dtype=np.float32)]
learning_rate = tf.Variable(1.0)
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=0.0,
noise_multiplier=0.0,
target_unclipped_quantile=0.5,
learning_rate=learning_rate,
clipped_count_stddev=0.0,
expected_num_records=2.0)
global_state = query.initial_global_state()
for t in range(50):
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
_, global_state = test_utils.run_query(query, records, global_state)
actual_clip = global_state.l2_norm_clip
if t > 40:
self.assertNear(actual_clip, 5.0, 0.25)
def test_adaptation_all_equal(self):
# 100 equal records. Test that with a decaying learning rate we converge to
# that record and bounce around it.
records = [tf.constant(5.0)] * 20
learning_rate = tf.Variable(1.0)
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=0.0,
noise_multiplier=0.0,
target_unclipped_quantile=0.5,
learning_rate=learning_rate,
clipped_count_stddev=0.0,
expected_num_records=2.0)
global_state = query.initial_global_state()
for t in range(50):
tf.compat.v1.assign(learning_rate, 1.0 / np.sqrt(t + 1))
_, global_state = test_utils.run_query(query, records, global_state)
actual_clip = global_state.l2_norm_clip
if t > 40:
self.assertNear(actual_clip, 5.0, 0.25)
def test_ledger(self):
record1 = tf.constant([8.5])
record2 = tf.constant([-7.25])
population_size = tf.Variable(0)
selection_probability = tf.Variable(1.0)
query = quantile_adaptive_clip_sum_query.QuantileAdaptiveClipSumQuery(
initial_l2_norm_clip=10.0,
noise_multiplier=1.0,
target_unclipped_quantile=0.0,
learning_rate=1.0,
clipped_count_stddev=0.0,
expected_num_records=2.0)
query = privacy_ledger.QueryWithLedger(
query, population_size, selection_probability)
# First sample.
tf.compat.v1.assign(population_size, 10)
tf.compat.v1.assign(selection_probability, 0.1)
_, global_state = test_utils.run_query(query, [record1, record2])
expected_queries = [[10.0, 10.0], [0.5, 0.0]]
formatted = query.ledger.get_formatted_ledger_eager()
sample_1 = formatted[0]
self.assertAllClose(sample_1.population_size, 10.0)
self.assertAllClose(sample_1.selection_probability, 0.1)
self.assertAllClose(sample_1.queries, expected_queries)
# Second sample.
tf.compat.v1.assign(population_size, 20)
tf.compat.v1.assign(selection_probability, 0.2)
test_utils.run_query(query, [record1, record2], global_state)
formatted = query.ledger.get_formatted_ledger_eager()
sample_1, sample_2 = formatted
self.assertAllClose(sample_1.population_size, 10.0)
self.assertAllClose(sample_1.selection_probability, 0.1)
self.assertAllClose(sample_1.queries, expected_queries)
expected_queries_2 = [[9.0, 9.0], [0.5, 0.0]]
self.assertAllClose(sample_2.population_size, 20.0)
self.assertAllClose(sample_2.selection_probability, 0.2)
self.assertAllClose(sample_2.queries, expected_queries_2)
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2018, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh.models import Circle, MultiLine, ColumnDataSource
# Module under test
from bokeh.models.renderers import GlyphRenderer, GraphRenderer
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
def test_graphrenderer_init_props():
renderer = GraphRenderer()
assert renderer.x_range_name == "default"
assert renderer.y_range_name == "default"
assert renderer.node_renderer.data_source.data == dict(index=[])
assert renderer.edge_renderer.data_source.data == dict(start=[], end=[])
assert renderer.layout_provider is None
def test_graphrenderer_check_malformed_graph_source_no_errors():
renderer = GraphRenderer()
check = renderer._check_malformed_graph_source()
assert check == []
def test_graphrenderer_check_malformed_graph_source_no_node_index():
node_source = ColumnDataSource()
node_renderer = GlyphRenderer(data_source=node_source, glyph=Circle())
renderer = GraphRenderer(node_renderer=node_renderer)
check = renderer._check_malformed_graph_source()
assert check != []
def test_graphrenderer_check_malformed_graph_source_no_edge_start_or_end():
edge_source = ColumnDataSource()
edge_renderer = GlyphRenderer(data_source=edge_source, glyph=MultiLine())
renderer = GraphRenderer(edge_renderer=edge_renderer)
check = renderer._check_malformed_graph_source()
assert check != []
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
nilq/baby-python
|
python
|
from typing import Any, Dict
from trench.exceptions import MFAMethodDoesNotExistError
from trench.settings import TrenchAPISettings, trench_settings
class GetMFAConfigByNameQuery:
def __init__(self, settings: TrenchAPISettings) -> None:
self._settings = settings
def execute(self, name: str) -> Dict[str, Any]:
try:
return self._settings.MFA_METHODS[name]
except KeyError as cause:
raise MFAMethodDoesNotExistError from cause
get_mfa_config_by_name_query = GetMFAConfigByNameQuery(settings=trench_settings).execute
|
nilq/baby-python
|
python
|
# Generated by Django 3.1.7 on 2021-07-03 12:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("record_requests", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="recordrequest",
name="estimated_response_date",
field=models.DateField(null=True),
),
migrations.AddField(
model_name="recordrequest",
name="filed_at",
field=models.DateField(null=True),
),
migrations.AddField(
model_name="recordrequest",
name="last_communication_date",
field=models.DateField(null=True),
),
migrations.AddField(
model_name="recordrequest",
name="tracking_number",
field=models.CharField(db_index=True, max_length=256, null=True),
),
migrations.AlterField(
model_name="agency",
name="name",
field=models.CharField(db_index=True, max_length=256, unique=True),
),
migrations.AlterField(
model_name="recordrequest",
name="status",
field=models.CharField(
choices=[
("submitted", "Processing"),
("ack", "Awaiting Acknowledgement"),
("processed", "Awaiting Response"),
("appealing", "Awaiting Apeal"),
("fix", "Fix Required"),
("payment", "Payment Required"),
("lawsuit", "In Litigation"),
("rejected", "Rejected"),
("no_docs", "No Responsive Documents"),
("done", "Completed"),
("partial", "Partially Completed"),
("abandoned", "Withdrawn"),
("review", "In Review"),
("install", "Installments"),
],
db_index=True,
max_length=10,
),
),
]
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""CSES Problem Set Coin Piles.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/10smn6uwgTZ4dTjcUl24YcliEyzzcZ5fw
a,b
x = 2 from a and 1 from b
y = 2 from b and 1 from a
a = 2x + 1y ------------ (i)
b = 2y + 1x ------------ (ii)
(i) *2 =>
2a = 4x + 2y ---------------(iii)
(ii)*2 =>
2b = 4y + 2x --------------(iv)
(iii)-(ii) =>
2a - b = 3x Ans.
(iv)-(i) =>
2b - a = 3y Ans.
"""
for i in range(int(input())):
a,b = map(int, input().split())
if 2*a-b >=0 and (2*a-b)%3==0 and 2*b-a >= 0 and (2*b-a)%3==0:
print("YES")
else:
print("NO")
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.5 on 2021-07-14 03:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import participant_profile.models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('participant_profile', '0002_auto_20210709_0716'),
]
operations = [
migrations.CreateModel(
name='PaymentUpload',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('verified', models.BooleanField(db_index=True, default=False, verbose_name='Verifikasi')),
('payment', models.FileField(upload_to=participant_profile.models.user_directory_path, verbose_name='Upload Bukti Pembayaran')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(default=django.utils.timezone.now)),
('participant', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
nilq/baby-python
|
python
|
import os
c.NotebookApp.ip='0.0.0.0'
c.NotebookApp.port = int(os.getenv('PORT', 8888))
c.NotebookApp.open_browser = False
c.MultiKernelManager.default_kernel_name = 'python3'
c.NotebookApp.notebook_dir = './'
c.Application.log_level = 0
c.NotebookApp.allow_root = True
c.NotebookApp.terminado_settings = { 'shell_command': ['/bin/bash', '-i'] }
# Authentication TOKEN
# WARNING : Leaving empty Token could be insecure. ONLY use on private Network/Local Workstation
c.NotebookApp.token = ''
|
nilq/baby-python
|
python
|
import logging
from typing import List, Optional, Dict
from uuid import UUID
from sqlalchemy.ext.asyncio.session import AsyncSession
from app.domain.models.Arkivuttrekk import ArkivuttrekkStatus
from app.domain.models.BevaringOverforing import BevaringOverforingStatus
from app.domain.models.Arkivuttrekk import Arkivuttrekk
from app.domain.models.Workflow import WorkflowType
from app.connectors.argo_workflow.queues.ArgoWorkflowRequestSender import ArgoWorkflowRequestSender
from app.connectors.arkiv_downloader.queues.ArchiveDownloadRequestSender import ArchiveDownloadRequestSender
from app.connectors.azure_storage.azure_storage_client import AzureStorageClient
from app.connectors.sas_generator.sas_generator_client import SASGeneratorClient
from app.database.dbo.mottak import (
Arkivkopi as Arkivkopi_DBO,
Overforingspakke as Overforingspakke_DBO,
WorkflowMetadata as WorkflowMetadata_DBO,
Invitasjon as Invitasjon_DBO,
)
from app.database.repositories import (
arkivkopi_repository,
arkivuttrekk_lokasjon_repository,
arkivuttrekk_repository,
invitasjon_repository,
overforingspakke_repository,
workflow_metadata_repository,
)
from app.domain import overforingspakke as overforingspakke_domain, arkivuttrekk_service
from app.domain.models.Arkivkopi import Arkivkopi, ArkivkopiRequestParameters
from app.domain.models.Invitasjon import Invitasjon
from app.domain.models.WorkflowMetadata import WorkflowMetadataTypes
from app.exceptions import (
ArkivkopiNotFoundByOverforingspakke,
ArkivkopiRequestFailed,
ArkivuttrekkLokasjonNotFound,
ArkivuttrekkNotFound,
InvitasjonNotFound,
OverforingspakkeNotFound,
ReportNotFound,
SASTokenPreconditionFailed,
InvitasjonEksternIdNotFound,
OverforingspakkeAlreadyExists,
OverforingspakkeNotFoundByTusdId,
SendArgoWorkflowRequestFailed,
)
from app.routers.dto.BevaringOverforing import BevaringOverforing
from app.routers.dto.Overforingspakke import OverforingspakkeCreate, OverforingspakkeUpdate
from app.settings import get_settings
TAR_SUFFIX = ".tar"
FOLDER_SUFFIX = "/"
logger = logging.getLogger(__name__)
settings = get_settings()
async def create_overforingspakke(overforingspakke: OverforingspakkeCreate, db: AsyncSession) -> Overforingspakke_DBO:
invitasjon_id = await _get_invitasjon_id_by_ekstern_id(overforingspakke.ekstern_id, db)
if await _overforingspakke_exist(db, invitasjon_id):
raise OverforingspakkeAlreadyExists(overforingspakke.ekstern_id)
return await overforingspakke_repository.create(db, overforingspakke.to_domain(invitasjon_id))
async def update_overforingspakke_by_tusd_id(overforingspakke_update: OverforingspakkeUpdate,
db: AsyncSession) -> Overforingspakke_DBO:
result = await overforingspakke_repository.update_by_tusd_id(db, overforingspakke_update)
if not result:
raise OverforingspakkeNotFoundByTusdId(overforingspakke_update.tusd_id)
return result
async def _overforingspakke_exist(db: AsyncSession, invitasjon_id: int):
await overforingspakke_repository.get_by_invitasjon_id(db, invitasjon_id)
async def get_workflows_for_overforingspakke(
overforingspakke_id: int,
db: AsyncSession,
workflow_type: Optional[WorkflowMetadataTypes],
skip: int,
limit: int,
) -> List[WorkflowMetadata_DBO]:
"""
Method that retrieves all workflows related to an overforingspakke_id.
If the workflow_type is not specified, all workflows are returned.
"""
return await workflow_metadata_repository.get_all_with_overforingspakke_id(db, overforingspakke_id, workflow_type, skip, limit)
async def _get_invitasjon(overforingspakke_id: int, db: AsyncSession) -> Invitasjon:
overforingspakke_dbo = await overforingspakke_repository.get_by_id(db, overforingspakke_id)
if not overforingspakke_dbo:
raise OverforingspakkeNotFound(overforingspakke_id)
invitasjon_dbo = await invitasjon_repository.get_by_id(db, overforingspakke_dbo.invitasjon_id)
if not invitasjon_dbo:
raise InvitasjonNotFound(id_=overforingspakke_id, by_arkivuttrekk_id=False)
return Invitasjon(
id_=invitasjon_dbo.id,
ekstern_id=invitasjon_dbo.ekstern_id,
arkivuttrekk_id=invitasjon_dbo.arkivuttrekk_id,
avgiver_epost=invitasjon_dbo.avgiver_epost,
status=invitasjon_dbo.status,
opprettet=invitasjon_dbo.opprettet,
)
async def _get_invitasjon_id_by_ekstern_id(ekstern_id: str, db: AsyncSession) -> int:
invitasjon = await invitasjon_repository.get_by_ekstern_id(db, ekstern_id)
if not invitasjon:
raise InvitasjonEksternIdNotFound(ekstern_id)
return invitasjon.id
async def get_arkivkopi_status(overforingspakke_id: int, db: AsyncSession, is_object: bool) -> Arkivkopi_DBO:
invitasjon = await _get_invitasjon(overforingspakke_id, db)
arkivkopi = await arkivkopi_repository.get_by_invitasjon_id_and_is_object_newest(db, invitasjon.id, is_object)
if not arkivkopi:
raise ArkivkopiNotFoundByOverforingspakke(overforingspakke_id, is_object)
return arkivkopi
def _get_source_container_id(is_object: bool, ekstern_id: Optional[UUID] = None) -> str:
"""
Return the name of the azure container where the overforingspakke is stored.
The source container is either the name of an Azure container containing the unpacked archive or
the container where Tusd is storing uploaded tarfiles.
- If downloading a bucket, the source container is an Azure container.
The name of the Azure container is a string representation of the unique "invitasjon.ekstern_id".
The "ekstern_id" is used as "target_container_name" when unpacking the tarfile to an azure container
during the argo workflow verify-overforingspakke.
- If downloading a tarfile, the source container is the Tusd storage container on Azure.
The name of this container is configured in the .env file.
"""
if is_object:
return settings.tusd_download_location_container
else:
return str(ekstern_id)
async def _generate_sas_token(container_id: str, sas_generator_client: SASGeneratorClient):
sas_token = await sas_generator_client.request_sas(container_id)
if not sas_token:
raise SASTokenPreconditionFailed(container_id)
return sas_token
def _generate_target_name(ekstern_id: UUID, is_object: bool) -> str:
"""
Generates a target_name for the copied archive.
I.e. the name of the folder or the tarfile when it is stored on-prem.
We have chosen to use the unique ekstern_id as the identifier of arkivkopies
on the on-prem server.
"""
target_name = str(ekstern_id)
if is_object:
target_name = target_name + TAR_SUFFIX
else:
target_name = target_name + FOLDER_SUFFIX
return target_name
async def request_download_of_bucket(overforingspakke_id: int, db: AsyncSession,
archive_download_request_sender: ArchiveDownloadRequestSender,
sas_generator_client: SASGeneratorClient) -> Optional[Arkivkopi_DBO]:
invitasjon = await _get_invitasjon(overforingspakke_id, db)
source_container_id = _get_source_container_id(is_object=False, ekstern_id=invitasjon.ekstern_id)
sas_token = await _generate_sas_token(source_container_id, sas_generator_client)
target_name = _generate_target_name(ekstern_id=invitasjon.ekstern_id, is_object=False)
arkivkopi = await arkivkopi_repository.create(
db,
Arkivkopi.create_from(
invitasjon_id=invitasjon.id,
sas_token=sas_token,
target_name=target_name,
is_object=False,
),
)
parameters = ArkivkopiRequestParameters(arkivkopi_id=arkivkopi.id, sas_token=sas_token)
request_sent = await archive_download_request_sender.send_download_request(parameters)
if not request_sent:
# Because we don't commit the arkivkopi to the database, we can just issue a rollback command.
await db.rollback()
raise ArkivkopiRequestFailed(overforingspakke_id, is_object=False)
return arkivkopi
async def get_arkade_report(overforingspakke_id: int, db: AsyncSession,
sas_generator_client: SASGeneratorClient) -> dict:
bucket_name = await _get_bucket_name(overforingspakke_id, db)
sas_token = await _generate_sas_token(bucket_name, sas_generator_client)
storage_client = AzureStorageClient(sas_token=sas_token, container_name=bucket_name)
available_reports = await overforingspakke_domain.get_reports_in_container(storage_client)
report_dict = overforingspakke_domain.select_most_relevant_report(available_reports)
if not report_dict:
logger.error(f"Found no reports for overforingspakke_id={overforingspakke_id} in Azure container={bucket_name}")
raise ReportNotFound(overforingspakke_id=overforingspakke_id, azure_container=bucket_name)
report_dict["report"] = await storage_client.download_blob(report_dict["path"])
await storage_client.close_connections()
return report_dict
async def _get_source_name(overforingspakke_id: int, db: AsyncSession) -> str:
"""
The source_name is the name given to an uploaded archive (tarfile) stored in the tusd container on Azure Storage.
"""
overforingspakke = await overforingspakke_repository.get_by_id(db, overforingspakke_id)
if not overforingspakke:
raise OverforingspakkeNotFound(overforingspakke_id)
return overforingspakke.tusd_objekt_navn
async def _get_bucket_name(overforingspakke_id: int, db: AsyncSession) -> str:
"""
The arkivuttrekk_lokasjon is the name of the Azure storage container containing an unpacked archive or
an uploaded archive (tarfile) stored in the tusd container on Azure Storage.
"""
arkivuttrekk_lokasjon = await arkivuttrekk_lokasjon_repository.get_by_id(db, overforingspakke_id)
if not arkivuttrekk_lokasjon:
raise ArkivuttrekkLokasjonNotFound(overforingspakke_id)
return arkivuttrekk_lokasjon.bucket
async def request_download_of_tarfile(overforingspakke_id: int, db: AsyncSession,
archive_download_request_sender: ArchiveDownloadRequestSender,
sas_generator_client: SASGeneratorClient) -> Optional[Arkivkopi_DBO]:
invitasjon = await _get_invitasjon(overforingspakke_id, db)
source_container_id = _get_source_container_id(is_object=True)
source_name = await _get_source_name(overforingspakke_id, db)
sas_token = await _generate_sas_token(source_container_id, sas_generator_client)
target_name = _generate_target_name(ekstern_id=invitasjon.ekstern_id, is_object=True)
arkivkopi = await arkivkopi_repository.create(
db,
Arkivkopi.create_from(
invitasjon_id=invitasjon.id,
sas_token=sas_token,
target_name=target_name,
is_object=True,
),
)
parameters = ArkivkopiRequestParameters(
arkivkopi_id=arkivkopi.id,
sas_token=sas_token,
target_name=target_name,
source_name=source_name,
)
request_sent = await archive_download_request_sender.send_download_request(parameters)
if not request_sent:
# Because we don't commit the arkivkopi to the database, we can just issue a rollback command.
await db.rollback()
raise ArkivkopiRequestFailed(overforingspakke_id, is_object=True)
return arkivkopi
async def get_invitasjon_by_ekstern_id(ekstern_id: str, db: AsyncSession) -> Optional[Invitasjon_DBO]:
return await invitasjon_repository.get_by_ekstern_id(db, ekstern_id)
async def get_arkivuttrekk_id_by_overforingspakke_id(overforingspakke_id: int, db: AsyncSession) -> int:
invitasjon = await _get_invitasjon(overforingspakke_id, db)
return invitasjon.arkivuttrekk_id
async def _get_arkivuttrekk_by_overforingspakke_id(overforingspakke_id: int, db: AsyncSession) -> Arkivuttrekk:
arkivuttrekk_id = await get_arkivuttrekk_id_by_overforingspakke_id(overforingspakke_id, db)
arkivuttrekk = await arkivuttrekk_service.get_arkivuttrekk_domain_object_by_id(arkivuttrekk_id, db)
return arkivuttrekk
async def _get_transfer_to_bevaring_params(overforingspakke_id: int, db: AsyncSession) -> Dict[str, str]:
arkivuttrekk = await _get_arkivuttrekk_by_overforingspakke_id(overforingspakke_id, db)
bucket_name = await _get_bucket_name(overforingspakke_id, db)
# TODO: Implement arkivskaper_organisasjon and arkiveier_organisasjon throughout the mottak application
# TODO: Rename params to new names DEPOT_INSTITUSJON -> DATABEHANDLER etc
# TODO: Add new fields
params = {
"OVERFORINGSPAKKE_ID": overforingspakke_id,
"DEPOT_INSTITUSJON": arkivuttrekk.databehandler.name, # DATABEHANDLER
"AVGIVER": arkivuttrekk.arkivskaper_kontaktperson, # ARKIVSKAPER_KONTAKTPERSON
"ARKIVSKAPER_ORGANISASJON": "IKKE IMPLEMENTERT", # Ikke implementert
"ARKIVEIER_ORGANISASJON": "IKKE IMPLEMENTERT", # Ikke implementert
"TITTEL": arkivuttrekk.merkelapp, # MERKELAPP
"START_DATO": arkivuttrekk.arkiv_startdato.isoformat(),
"SLUTT_DATO": arkivuttrekk.arkiv_sluttdato.isoformat(),
"AVTALENUMMER": arkivuttrekk.avtalenummer,
"AZURE_CONTAINER": bucket_name,
}
return params
async def transfer_to_bevaring(
overforingspakke_id: int,
db: AsyncSession,
argo_workflow_request_sender: ArgoWorkflowRequestSender,
) -> BevaringOverforing:
params = await _get_transfer_to_bevaring_params(overforingspakke_id, db)
arkivuttrekk = await arkivuttrekk_repository.get_by_overforingspakke_id(db, overforingspakke_id)
if not arkivuttrekk:
raise ArkivuttrekkNotFound(overforingspakke_id, "Fant ikke arkivuttrekk som tilhører overføringspakke med id=")
request_sent_successfully = await argo_workflow_request_sender.send_argo_workflow_request(
workflow_type=WorkflowType.TRANSFER_TO_BEVARING,
params=params
)
await arkivuttrekk_repository.update_status(db, arkivuttrekk.id, ArkivuttrekkStatus.OVERFORES_TIL_BEVARING)
if not request_sent_successfully:
await db.rollback()
raise SendArgoWorkflowRequestFailed(overforingspakke_id)
# We need to commit the arkivuttrekk to the database in order to update the status on the web-app.
# Web-app does a request straight after this request is sent, which might arrive before autocommit is done.
await db.commit()
return BevaringOverforing(status=BevaringOverforingStatus.IN_PROGRESS)
async def get_bevaring_transfer_status(
overforingspakke_id: int,
db: AsyncSession,
) -> BevaringOverforing:
arkivuttrekk = await arkivuttrekk_repository.get_by_overforingspakke_id(db, overforingspakke_id)
if not arkivuttrekk:
raise ArkivuttrekkNotFound(overforingspakke_id, "Fant ikke arkivuttrekk som tilhører overføringspakke med id=")
status = BevaringOverforingStatus.NOT_STARTED
if arkivuttrekk.status == ArkivuttrekkStatus.OVERFORES_TIL_BEVARING:
status = BevaringOverforingStatus.IN_PROGRESS
if arkivuttrekk.status == ArkivuttrekkStatus.OVERFORING_TIL_BEVARING_FEILET:
status = BevaringOverforingStatus.FAILED
if arkivuttrekk.status == ArkivuttrekkStatus.SENDT_TIL_BEVARING:
status = BevaringOverforingStatus.COMPLETED
return BevaringOverforing(status=status)
|
nilq/baby-python
|
python
|
import math
def binary_search(arr, target):
"""
Performs a binary search
- Time complexity: O(log(n))
- Space complexity: O(1)
Args:
arr (list): List of sorted numbers
target (float): Target to find
Returns:
mid (int): Index of the target. Return -1 if not found
"""
left = 0
right = len(arr) - 1
while left <= right:
mid = math.floor((left + right) / 2)
if arr[mid] < target:
left = mid + 1
elif arr[mid] > target:
right = mid - 1
else:
return mid
return -1
if __name__ == '__main__':
arr = [-2,3,4,7,8,9]
index_target = binary_search(arr, 9)
if index_target != -1:
print("Target found at index: {0} with value: {1}".format(index_target, arr[index_target]))
else:
print("Target not found")
|
nilq/baby-python
|
python
|
from IPython.display import Image
from IPython.core.display import HTML
import numpy as np
import sympy as sp
import random as r
import time
import matplotlib.pyplot as plt
import ipyturtle as turtle
from scipy.ndimage.filters import gaussian_filter1d
from scipy.signal import savgol_filter
|
nilq/baby-python
|
python
|
"""
Put your ad videos here
"""
|
nilq/baby-python
|
python
|
"""Defines current AXT versions and dependencies.
Ensure UsageTrackerRegistry is updated accordingly when incrementing version numbers.
"""
# AXT versions
RUNNER_VERSION = "1.3.1-alpha03"
ESPRESSO_VERSION = "3.4.0-alpha03"
CORE_VERSION = "1.3.1-alpha03"
ANDROIDX_JUNIT_VERSION = "1.1.3-alpha03"
ANDROIDX_TRUTH_VERSION = "1.3.1-alpha03"
UIAUTOMATOR_VERSION = "2.2.0"
JANK_VERSION = "1.0.1"
SERVICES_VERSION = RUNNER_VERSION
# Maven dependency versions
ANDROIDX_VERSION = "1.0.0"
ANDROIDX_VERSION_PATH = "1.0.0"
GOOGLE_MATERIAL_VERSION = "1.0.0"
ANDROIDX_LIFECYCLE_VERSION = "2.0.0"
ANDROIDX_MULTIDEX_VERSION = "2.0.0"
JUNIT_VERSION = "4.12"
HAMCREST_VERSION = "1.3"
TRUTH_VERSION = "1.0"
GUAVA_VERSION = "27.0.1-android"
|
nilq/baby-python
|
python
|
import copy
import numpy as np
import logging
import random
from pprint import pformat
from sklearn.metrics import roc_auc_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble._gb import GradientBoostingClassifier, GradientBoostingRegressor
import xgboost as xgb
from settree.set_data import set_object_to_matrix
from settree.set_tree import SetTree, SetSplitNode
from settree.gbest import GradientBoostedSetTreeClassifier, GradientBoostedSetTreeRegressor
from exps.eval_utils.metrics import acc, mse
from exps.eval_utils.general import load_pickle, Timer
def random_params_search_set_tree(ds_train, train_y, ds_test, test_y,
n_experiments,
params_dict, search_params_dict,
mode='bin_cls'):
best_test_acc = 0.0
best_config = None
best_model_copy = None
logging.info('Starting random params search for SetTree for {} rounds'.format(n_experiments))
# np.greater: Return the truth value of (x1 > x2) element-wise.
condition = np.greater if 'cls' in mode else np.less
for counter in range(n_experiments):
params_dict_copy = copy.deepcopy(params_dict)
for k, v in search_params_dict.items():
params_dict_copy[k] = random.choice(v)
logging.info('\nExp[{}]'.format(counter))
model, train_met, test_met = train_and_predict_set_gbdt(params_dict_copy,
ds_train, train_y,
ds_test, test_y,
mode, out_metrics=True)
if condition(test_met, best_test_acc):
best_config = copy.deepcopy(params_dict_copy)
best_test_acc = copy.deepcopy(test_met)
best_model_copy = copy.deepcopy(model)
logging.info('##################################################')
logging.info('Best experiment test metric: {}'.format(best_test_acc))
logging.info(pformat(best_config))
return best_model_copy, best_config, best_test_acc
def random_params_search(classifier,
train_x, train_y, test_x, test_y,
n_experiments,
params_dict, search_params_dict,
val_x=None, val_y=None,
early_stopping_rounds=5,
mode='bin_cls'):
best_test_acc = 0.0
best_config = None
logging.info('Starting random params search for {} for {} rounds'.format(classifier,
n_experiments))
# to use early stopping in sklearn framework
if classifier == 'sklearn' and early_stopping_rounds != None:
n_iter_no_change = early_stopping_rounds
params_dict['n_iter_no_change'] = n_iter_no_change
for counter in range(n_experiments):
params_dict_copy = copy.deepcopy(params_dict)
for k, v in search_params_dict.items():
params_dict_copy[k] = random.choice(v)
logging.info('\nExp[{}]'.format(counter))
if classifier == 'xgboost':
_, train_acc, test_acc = train_and_predict_xgboost(params_dict_copy,
train_x, train_y,
test_x, test_y,
val_x, val_y,
early_stopping_rounds,
mode,
out_metrics=True)
elif classifier == 'sklearn':
_, train_acc, test_acc = train_and_predict_sklearn_gbtd(params_dict_copy,
train_x, train_y,
test_x, test_y,
mode)
else:
raise ValueError('Invalid classifier {}'.format(classifier))
if test_acc > best_test_acc:
best_config = params_dict_copy
best_test_acc = test_acc
logging.info('##################################################')
logging.info('Best experiment test metric: {}'.format(best_test_acc))
logging.info(pformat(best_config))
return best_config, best_test_acc
def split_to_random_sets(x, min_size=2, max_size=20):
'''
Parameters
----------
x : <numpy.ndarray> input data shape (N, d)
min_size : int
max_size : int
Returns
-------
list of <numpy.ndarray>
'''
if not(isinstance(x, np.ndarray)):
x = np.array(x)
n_items = len(x)
sizes = []
while(True):
sizes.append(random.choice(range(min_size, max_size)))
if sum(sizes) > n_items:
break
sizes = np.cumsum(np.array(sizes))
if sizes[-1] >= n_items:
sizes = sizes[:-1]
return np.split(x, indices_or_sections=sizes, axis=0)
def eval_sklearn_dt(eval_dt, set_dataset_train, set_dataset_test, verbose=True):
sklearn_dt = DecisionTreeClassifier(criterion="entropy")
sk_train_x, sk_train_y = set_object_to_matrix(set_dataset_train, eval_dt.splitter.operations)
sk_test_x, sk_test_y = set_object_to_matrix(set_dataset_test, eval_dt.splitter.operations)
sklearn_dt = sklearn_dt.fit(sk_train_x, sk_train_y)
if verbose:
sklearn_train_acc = (sklearn_dt.predict(sk_train_x) == sk_train_y).mean()
sklearn_test_acc = (sklearn_dt.predict(sk_test_x) == sk_test_y).mean()
train_acc = (eval_dt.predict(set_dataset_train) == set_dataset_train.y).mean()
test_acc = (eval_dt.predict(set_dataset_test) == set_dataset_test.y).mean()
print('SklearnTree: train acc {:.4f} | test acc : {:.4f}'.format(sklearn_train_acc, sklearn_test_acc))
print('SetTree: train acc {:.4f} | test acc : {:.4f}'.format(train_acc, test_acc))
return sklearn_dt
def train_decision_tree(ds_train, y_train, ds_test, y_test,
splitter, use_attention_set, use_attention_set_comp, attention_set_limit, tree_args):
''' Train a single DT and compare to Sklearn'''
dt = SetTree(attention_set_limit=attention_set_limit,
use_attention_set=use_attention_set,
use_attention_set_comp=use_attention_set_comp,
splitter=splitter,
**tree_args)
logging.info('############ Set tree ############ ')
timer = Timer()
dt.fit(ds_train, y_train)
logging.info('Train took: {}'.format(timer.end()))
timer = Timer()
train_preds = dt.predict(ds_train)
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = dt.predict(ds_test)
train_acc = (train_preds == y_train).mean()
test_acc = (test_preds == y_test).mean()
logging.info('Results : train acc {:.4f} | test acc : {:.4f}'.format(train_acc, test_acc))
logging.info('Tree depth: {} n_leafs: {}'.format(dt.depth, dt.n_leafs))
operations = getattr(dt, 'operations', False) if getattr(dt, 'operations', False) else dt.splitter.operations
sk_train_x = set_object_to_matrix(ds_train, operations)
sk_test_x = set_object_to_matrix(ds_test, operations)
sklearn_dt = DecisionTreeClassifier(criterion="entropy")
logging.info('############ Sklearn ############ ')
timer = Timer()
sklearn_dt = sklearn_dt.fit(sk_train_x, y_train)
logging.info('Train took: {}'.format(timer.end()))
timer = Timer()
sklearn_train_preds = sklearn_dt.predict(sk_train_x)
logging.info('Eval train took: {}'.format(timer.end()))
sklearn_train_acc = (sklearn_train_preds == y_train).mean()
sklearn_test_acc = (sklearn_dt.predict(sk_test_x) == y_test).mean()
logging.info('Results : train acc {:.4f} | test acc : {:.4f}'.format(sklearn_train_acc, sklearn_test_acc))
logging.info('Tree depth: {} n_leafs: {}'.format(sklearn_dt.tree_.max_depth, sklearn_dt.tree_.node_count))
return dt, sklearn_dt
def count_parametres(gb):
N_PARAMS_NODE = 5
N_PARAMS_LEAF = 1
def count_nodes(node, count=0):
if isinstance(node, SetSplitNode):
return 1 + count_nodes(node.right, count) + count_nodes(node.left, count)
else:
return 0
count = 0
for tree in gb.estimators_.flatten():
count += count_nodes(tree.tree_, count=0) * N_PARAMS_NODE
count += tree.n_leafs * N_PARAMS_LEAF
return count
def load_checkpoint_gbdt(checkpoint):
gbdt = load_pickle(checkpoint)
none_estimators_inds = np.where(gbdt.estimators_[:, 0] == None)[0]
if hasattr(gbdt, 'n_estimators_'):
n_stages = gbdt.n_estimators_
elif len(none_estimators_inds):
n_stages = min(none_estimators_inds)
else:
n_stages = gbdt.n_estimators
if n_stages < gbdt.n_estimators:
gbdt.estimators_ = gbdt.estimators_[:n_stages]
gbdt.train_score_ = gbdt.train_score_[:n_stages]
if hasattr(gbdt, 'oob_improvement_'):
gbdt.oob_improvement_ = gbdt.oob_improvement_[:n_stages]
return gbdt
def train_and_predict_set_gbdt(params, ds_train, train_y, ds_test, test_y,
mode='bin_cls', out_metrics=False, resume=None, eval_train=True, verbose=True):
# mode : bin_cls, multi_cls, reg
if verbose:
logging.info('############ Set GBDT ############ ')
logging.info('Params:\n' + pformat(params))
if mode == 'bin_cls':
gbdt = GradientBoostedSetTreeClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
elif mode == 'multi_cls':
gbdt = GradientBoostedSetTreeClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
else:
gbdt = GradientBoostedSetTreeRegressor(**params)
eval_met = mse
eval_met_name = 'mse'
timer = Timer()
if resume != None:
gbdt = load_pickle(resume)
# if it is a checkpoint - saved before completed the train - resize the estimators_ array
none_estimators_inds = np.where(gbdt.estimators_[:, 0] == None)[0]
if hasattr(gbdt, 'n_estimators_'):
n_stages = gbdt.n_estimators_
elif len(none_estimators_inds):
n_stages = min(none_estimators_inds)
else:
n_stages = gbdt.n_estimators
if n_stages < gbdt.n_estimators:
gbdt.estimators_ = gbdt.estimators_[:n_stages]
gbdt.train_score_ = gbdt.train_score_[:n_stages]
if hasattr(gbdt, 'oob_improvement_'):
gbdt.oob_improvement_ = gbdt.oob_improvement_[:n_stages]
logging.info('Loaded model from {}, with {} trees, resume training'.format(resume, n_stages))
gbdt.set_params(**{'n_estimators': n_stages + params['n_estimators']})
logging.info('Continue training for {} estimators'.format(params['n_estimators']))
logging.info('Warning: continue training with the previous parameters')
logging.info('Original model parameters:')
logging.info(pformat(params))
gbdt.fit(ds_train, train_y)
if verbose:
logging.info('Train took: {}'.format(timer.end()))
if mode == 'bin_cls':
timer = Timer()
if eval_train:
train_raw_predictions = gbdt.decision_function(ds_train)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
else:
logging.info('Skipped train evaluation - train metrics are irrelevant')
train_raw_predictions = np.zeros((len(ds_train),)) # tmp solution
test_raw_predictions = gbdt.decision_function(ds_test)
train_encoded_labels = gbdt.loss_._raw_prediction_to_decision(train_raw_predictions)
train_preds = gbdt.classes_.take(train_encoded_labels, axis=0)
test_encoded_labels = gbdt.loss_._raw_prediction_to_decision(test_raw_predictions)
test_preds = gbdt.classes_.take(test_encoded_labels, axis=0)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
train_probs = gbdt.loss_._raw_prediction_to_proba(train_raw_predictions)
test_probs = gbdt.loss_._raw_prediction_to_proba(test_raw_predictions)
train_auc = roc_auc_score(train_y, train_probs[:, 1])
test_auc = roc_auc_score(test_y, test_probs[:, 1])
if verbose:
logging.info('Results : train {} {:.6f} auc: {:.6f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,
train_auc, eval_met_name,
test_met, test_auc))
else:
timer = Timer()
if eval_train:
train_preds = gbdt.predict(ds_train)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
else:
logging.info('Skipped train evaluation - train metrics are irrelevant')
train_preds = np.zeros((len(ds_train),)) # tmp solution
test_preds = gbdt.predict(ds_test)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
if verbose:
logging.info('Results : train {} {:.6f} | test {} : {:.6f}'.format(eval_met_name, train_met,
eval_met_name, test_met))
depths = []
n_leafs = []
n_stages, K = gbdt.estimators_.shape
for i in range(n_stages):
for k in range(K):
depths.append(gbdt.estimators_[i, k].depth)
n_leafs.append(gbdt.estimators_[i, k].n_leafs)
depths = np.array(depths)
n_leafs = np.array(n_leafs)
if verbose:
logging.info('Trees sizes stats: depth: {:.1f}+-{:.3f} | n_leafs: {:.1f}+-{:.3f}'.format(depths.mean(), depths.std(),
n_leafs.mean(), n_leafs.std()))
if out_metrics:
return gbdt, train_met, test_met
else:
return gbdt
def train_and_predict_set_tree(params, ds_train, train_y, ds_test, test_y,
mode='bin_cls', out_metrics=False, verbose=True):
# mode : bin_cls, multi_cls, reg
if verbose:
logging.info('############ Set Tree ############ ')
logging.info('Params:\n' + pformat(params))
tree = SetTree(**params)
if mode == 'bin_cls':
eval_met = acc
eval_met_name = 'acc'
elif mode == 'multi_cls':
eval_met = acc
eval_met_name = 'acc'
else:
eval_met = mse
eval_met_name = 'mse'
timer = Timer()
tree.fit(ds_train, train_y)
if verbose:
logging.info('Train took: {}'.format(timer.end()))
if mode == 'bin_cls':
timer = Timer()
train_probs = tree.predict_proba(ds_train)
train_preds = tree.predict(ds_train)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_probs = tree.predict_proba(ds_test)
test_preds = tree.predict(ds_test)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
train_auc = roc_auc_score(train_y, train_probs[:, 1])
test_auc = roc_auc_score(test_y, test_probs[:, 1])
if verbose:
logging.info('Results : train {} {:.6f} auc: {:.6f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,
train_auc, eval_met_name,
test_met, test_auc))
else:
timer = Timer()
train_preds = tree.predict(ds_train)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = tree.predict(ds_test)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
if verbose:
logging.info('Results : train {} {:.6f} | test {} : {:.6f}'.format(eval_met_name, train_met,
eval_met_name, test_met))
if out_metrics:
return tree, train_met, test_met
else:
return tree
def train_and_predict_xgboost(params,
train_x, train_y, test_x, test_y, val_x=None, val_y=None,
early_stopping_rounds=None, mode='bin_cls', out_metrics=False, verbose=True):
if verbose:
logging.info('############ XGBoost ############ ')
logging.info('Params:\n' + pformat(params))
if mode == 'bin_cls':
gbdt = xgb.XGBClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
elif mode == 'multi_cls':
gbdt = xgb.XGBClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
else:
gbdt = xgb.XGBRegressor(**params)
eval_met = mse
eval_met_name = 'mse'
if verbose:
logging.info('Params: {}'.format(params))
timer = Timer()
if np.any(val_x):
gbdt.fit(X=train_x,
y=train_y,
eval_set=[(val_x, val_y)],
early_stopping_rounds=early_stopping_rounds)
else:
gbdt.fit(train_x, train_y)
if verbose:
logging.info('Train took: {}'.format(timer.end()))
timer = Timer()
train_preds = gbdt.predict(train_x)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = gbdt.predict(test_x)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
if mode == 'bin_cls':
train_proba = gbdt.predict_proba(train_x)[:, 1]
test_proba = gbdt.predict_proba(test_x)[:, 1]
train_auc = roc_auc_score(train_y, train_proba)
test_auc = roc_auc_score(test_y, test_proba)
if verbose:
logging.info('Results : train {} {:.6f} auc: {:.4f} | test {} : {:.6f} auc: {:.4f}'.format(eval_met_name, train_met,
train_auc, eval_met_name,
test_met, test_auc))
else:
if verbose:
logging.info('Results : train {} {:.6f} | test {} : {:.6f}'.format(eval_met_name, train_met,
eval_met_name, test_met))
if out_metrics:
return gbdt, train_met, test_met
else:
return gbdt
def train_and_predict_sklearn_gbtd(params,
train_x, train_y, test_x, test_y,
mode='bin_cls', out_metrics=False, verbose=True):
if verbose:
logging.info('############ Sklearn GBDT ############ ')
logging.info('Params:\n' + pformat(params))
if mode == 'bin_cls':
gbdt = GradientBoostingClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
elif mode == 'multi_cls':
gbdt = GradientBoostingClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
else:
gbdt = GradientBoostingRegressor(**params)
eval_met = mse
eval_met_name = 'mse'
if verbose:
logging.info('Params: {}'.format(params))
timer = Timer()
gbdt.fit(train_x, train_y)
if verbose:
logging.info('Train took: {}'.format(timer.end()))
if mode == 'bin_cls':
timer = Timer()
train_raw_predictions = gbdt.decision_function(train_x)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_raw_predictions = gbdt.decision_function(test_x)
train_encoded_labels = gbdt.loss_._raw_prediction_to_decision(train_raw_predictions)
train_preds = gbdt.classes_.take(train_encoded_labels, axis=0)
test_encoded_labels = gbdt.loss_._raw_prediction_to_decision(test_raw_predictions)
test_preds = gbdt.classes_.take(test_encoded_labels, axis=0)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
train_probs = gbdt.loss_._raw_prediction_to_proba(train_raw_predictions)
test_probs = gbdt.loss_._raw_prediction_to_proba(test_raw_predictions)
train_auc = roc_auc_score(train_y, train_probs[:, 1])
test_auc = roc_auc_score(test_y, test_probs[:, 1])
if verbose:
logging.info('Results : train {} {:.6f} auc: {:.6f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,
train_auc, eval_met_name,
test_met, test_auc))
else:
timer = Timer()
train_preds = gbdt.predict(train_x)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = gbdt.predict(test_x)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
if verbose:
logging.info('Results : train {} {:.6f} | test {} : {:.6f}'.format(eval_met_name, train_met,
eval_met_name, test_met))
if out_metrics:
return gbdt, train_met, test_met
else:
return gbdt
def train_and_predict_sklearn_dt(params,
train_x, train_y, test_x, test_y,
mode='bin_cls', out_metrics=False, verbose=True):
if verbose:
logging.info('############ Sklearn DT ############ ')
logging.info('Params:\n' + pformat(params))
if mode == 'bin_cls':
dt = DecisionTreeClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
elif mode == 'multi_cls':
dt = DecisionTreeClassifier(**params)
eval_met = acc
eval_met_name = 'acc'
else:
dt = DecisionTreeRegressor(**params)
eval_met = mse
eval_met_name = 'mse'
if verbose:
logging.info('Params: {}'.format(params))
timer = Timer()
dt.fit(train_x, train_y)
if verbose:
logging.info('Train took: {}'.format(timer.end()))
if mode == 'bin_cls':
timer = Timer()
train_preds = dt.predict(train_x)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = dt.predict(test_x)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
train_probs = dt.predict_proba(train_x)
test_probs = dt.predict_proba(test_x)
train_auc = roc_auc_score(train_y, train_probs[:, 1])
test_auc = roc_auc_score(test_y, test_probs[:, 1])
if verbose:
logging.info('Results : train {} {:.6f} auc: {:.6f} | test {} : {:.4f} auc: {:.4f}'.format(eval_met_name, train_met,
train_auc, eval_met_name,
test_met, test_auc))
else:
timer = Timer()
train_preds = dt.predict(train_x)
if verbose:
logging.info('Eval train took: {}'.format(timer.end()))
test_preds = dt.predict(test_x)
train_met = eval_met(train_y, train_preds)
test_met = eval_met(test_y, test_preds)
if verbose:
logging.info('Results : train {} {:.6f} | test {} : {:.6f}'.format(eval_met_name, train_met,
eval_met_name, test_met))
if out_metrics:
return dt, train_met, test_met
else:
return dt
|
nilq/baby-python
|
python
|
from postDB import Column, Model, types
class UserRole(Model):
"""
User Role Class
Database Attributes:
Attributes stored in the `userroles` table.
:param int user_id: The users Discord ID
:param int role_id: The role ID (Snowflake)
"""
user_id = Column(
types.ForeignKey("users", "id", sql_type=types.Integer(big=True)),
primary_key=True,
)
role_id = Column(
types.ForeignKey("roles", "id", sql_type=types.Integer(big=True)),
primary_key=True,
)
@classmethod
async def create(cls, member_id: int, role_id: int):
query = """
INSERT INTO userroles (user_id, role_id) VALUES ($1, $2) RETURNING *;
"""
record = await cls.pool.fetchrow(query, member_id, role_id)
return cls(**record)
@classmethod
async def delete(cls, member_id: int, role_id: int):
query = """
DELETE FROM userroles WHERE user_id = $1 AND role_id = $2;
"""
await cls.pool.execute(query, member_id, role_id)
|
nilq/baby-python
|
python
|
class Charge:
def __init__(self, vehicle):
self.vehicle = vehicle
def start_charging(self):
return self.vehicle.send_command(
'charge_start'
)
def open_charge_port(self):
return self.vehicle.send_command(
'charge_port_door_open'
)
def stop_charging(self):
return self.vehicle.send_command(
'charge_stop'
)
def set_charge_standard(self):
return self.vehicle.send_command(
'charge_standard'
)
def set_charge_max_range(self):
return self.vehicle.send_command(
'charge_max_range'
)
def set_charge_limit(self, percentage):
percentage = round(percentage)
if percentage < 50 or percentage > 100:
raise ValueError('Percentage should be between 50 and 100')
return self.vehicle.send_command(
'set_charge_limit',
{'limit_value': percentage}
)
@property
def attributes(self):
return self.vehicle._charge_data
@property
def is_charging(self):
return self.charging_state == 'Charging'
@property
def charging_state(self):
return self.vehicle._charge_data.get('charging_state')
@property
def charge_limit_soc(self):
return self.vehicle._charge_data.get('charge_limit_soc')
@property
def charge_limit_soc_std(self):
return self.vehicle._charge_data.get('charge_limit_soc_std')
@property
def charge_limit_soc_min(self):
return self.vehicle._charge_data.get('charge_limit_soc_min')
@property
def charge_limit_soc_max(self):
return self.vehicle._charge_data.get('charge_limit_soc_max')
@property
def charge_to_max_range(self):
return self.vehicle._charge_data.get('charge_to_max_range')
@property
def battery_heater_on(self):
return self.vehicle._charge_data.get('battery_heater_on')
@property
def not_enough_power_to_heat(self):
return self.vehicle._charge_data.get('not_enough_power_to_heat')
@property
def max_range_charge_counter(self):
return self.vehicle._charge_data.get('max_range_charge_counter')
@property
def fast_charger_present(self):
return self.vehicle._charge_data.get('fast_charger_present')
@property
def fast_charger_type(self):
return self.vehicle._charge_data.get('fast_charger_type')
@property
def battery_range(self):
return self.vehicle._charge_data.get('battery_range')
@property
def est_battery_range(self):
return self.vehicle._charge_data.get('est_battery_range')
@property
def ideal_battery_range(self):
return self.vehicle._charge_data.get('ideal_battery_range')
@property
def battery_level(self):
return self.vehicle._charge_data.get('battery_level')
@property
def usable_battery_level(self):
return self.vehicle._charge_data.get('usable_battery_level')
@property
def battery_current(self):
return self.vehicle._charge_data.get('battery_current')
@property
def charge_energy_added(self):
return self.vehicle._charge_data.get('charge_energy_added')
@property
def charge_miles_added_rated(self):
return self.vehicle._charge_data.get('charge_miles_added_rated')
@property
def charge_miles_added_ideal(self):
return self.vehicle._charge_data.get('charge_miles_added_ideal')
@property
def charger_voltage(self):
return self.vehicle._charge_data.get('charger_voltage')
@property
def charger_pilot_current(self):
return self.vehicle._charge_data.get('charger_pilot_current')
@property
def charger_actual_current(self):
return self.vehicle._charge_data.get('charger_actual_current')
@property
def charger_power(self):
return self.vehicle._charge_data.get('charger_power')
@property
def time_to_full_charge(self):
return self.vehicle._charge_data.get('time_to_full_charge')
@property
def trip_charging(self):
return self.vehicle._charge_data.get('trip_charging')
@property
def charge_rate(self):
return self.vehicle._charge_data.get('charge_rate')
@property
def charge_port_door_open(self):
return self.vehicle._charge_data.get('charge_port_door_open')
@property
def motorized_charge_port(self):
return self.vehicle._charge_data.get('motorized_charge_port')
@property
def scheduled_charging_start_time(self):
return self.vehicle._charge_data.get('scheduled_charging_start_time')
@property
def scheduled_charging_pending(self):
return self.vehicle._charge_data.get('scheduled_charging_pending')
@property
def user_charge_enable_request(self):
return self.vehicle._charge_data.get('user_charge_enable_request')
@property
def eu_vehicle(self):
return self.vehicle._charge_data.get('eu_vehicle')
@property
def charger_phases(self):
return self.vehicle._charge_data.get('charger_phases')
@property
def charge_port_latch(self):
return self.vehicle._charge_data.get('charge_port_latch')
@property
def charge_current_request(self):
return self.vehicle._charge_data.get('charge_current_request')
@property
def charge_current_request_max(self):
return self.vehicle._charge_data.get('charge_current_request_max')
@property
def managed_charging_active(self):
return self.vehicle._charge_data.get('managed_charging_active')
@property
def managed_charging_user_canceled(self):
return self.vehicle._charge_data.get('managed_charging_user_canceled')
@property
def managed_charging_start_time(self):
return self.vehicle._charge_data.get('managed_charging_start_time')
|
nilq/baby-python
|
python
|
#
# PySNMP MIB module GDCUAS7626-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/GDCUAS7626-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:19:14 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueSizeConstraint, ValueRangeConstraint, ConstraintsIntersection, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ValueRangeConstraint", "ConstraintsIntersection", "ConstraintsUnion")
SCinstance, = mibBuilder.importSymbols("GDCMACRO-MIB", "SCinstance")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
enterprises, Unsigned32, MibIdentifier, ObjectIdentity, IpAddress, ModuleIdentity, TimeTicks, Bits, NotificationType, Counter32, Counter64, iso, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32 = mibBuilder.importSymbols("SNMPv2-SMI", "enterprises", "Unsigned32", "MibIdentifier", "ObjectIdentity", "IpAddress", "ModuleIdentity", "TimeTicks", "Bits", "NotificationType", "Counter32", "Counter64", "iso", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
gdc = MibIdentifier((1, 3, 6, 1, 4, 1, 498))
bql2 = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12))
uas7626 = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12))
uas7626Version = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 1))
uas7626Maintenance = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 2))
uas7626Configuration = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 3))
uas7626Diagnostics = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 4))
uas7626Performance = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 5))
uas7626AlarmConfig = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 6))
uas7626Alarms = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7))
uas7626MIBversion = MibScalar((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(5, 5)).setFixedLength(5)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626MIBversion.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626MIBversion.setDescription("Identifies the version of the MIB. The format of the version is x.yzT, where 'x' identifies the major revision number, 'y' identifies the minor revision number, 'z' identifies the typographical revision, and T identifies the test revision. Acceptable values for the individual revision components are as follows: x: 1 - 9 y: 0 - 9 z: 0 - 9 T: A - Z Upon formal release, no designation for the test revision will be present.")
uas7626VersionTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2), )
if mibBuilder.loadTexts: uas7626VersionTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626VersionTable.setDescription('The 7626 version table ')
uas7626VersionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626VersionIndex"))
if mibBuilder.loadTexts: uas7626VersionEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626VersionEntry.setDescription('An entry in the GDC uas7626System Version Table.')
uas7626VersionIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626VersionIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626VersionIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626ActiveFirmwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 2), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626ActiveFirmwareRev.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ActiveFirmwareRev.setDescription('The version number of the firmware currently executing. The format is MM.NN.BB where: MM: Major Revision (0-99) NN: Minor Revision (0-99) BB: Bug Fix Revision (0-99) Valid [i]nterface = 0')
uas7626StoredFirmwareRev = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(8, 8)).setFixedLength(8)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626StoredFirmwareRev.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626StoredFirmwareRev.setDescription('The version number of the firmware stored (in a compressed format) but not currently executing. MM: Major Revision (0-99) NN: Minor Revision (0-99) BB: Bug Fix Revision (0-99) Valid [i]nterface = 0')
uas7626StoredFirmwareStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("statBlank", 1), ("statDownLoading", 2), ("statOK", 3), ("statCheckSumBad", 4), ("statUnZipping", 5), ("statBadUnZip", 6), ("statDownloadAborted", 7)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626StoredFirmwareStatus.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626StoredFirmwareStatus.setDescription('This object represents the state of the Non-Active or Stored firmware: statBlank(1) Factory Default statDownLoading(2) In process of downloading firmware statOK(3) Zipped version checksum succesful (OK to switch) (can set uas7626SwitchActiveFirmware to switchActive(2)) statCheckSumBad(4) Failed checksum after download statUnZipping(5) In process of uncompressing into active area statBadUnZip(6) Failed last uncompress statDownloadAborted(7) Download aborted by user Valid [i]nterface = 0')
uas7626SwitchActiveFirmware = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("switchNorm", 1), ("switchActive", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626SwitchActiveFirmware.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626SwitchActiveFirmware.setDescription('This object is used to switch the active executing firmware from the version in uas7626ActiveFirmwareRev to the version in uas7626StoredFirmwareRev. When a switchActive(2) is set (write only) the element will: 1) reboot 2) uncompress stored code into active area 3) perform checksum on active area 4) Set uas7626StoredStatus object to indicate results 5) If succesfull: update uas7626ActiveFirmwareRev and uas7626StoredFirmwareRev and begin executing If failed: replace active area with previous revision The switchNorm(1) enumeration will always be read. Setting switchActive(2) when the uas7626StoredFirmwareStatus is not statOK(3) will result in an error. Valid [i]nterface = 0')
uas7626DownloadingMode = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 1, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disableAll", 1), ("enableAndWait", 2), ("enableAndSwitch", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626DownloadingMode.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DownloadingMode.setDescription('This object is used to configure the download mode: disableAll(1) Prevents any firmware downloading to UAS7626 enableAndWait(2) Allows downloading zipped code to be stored only! enableAndSwitch(3) Allows downloading and then unzips and begins executing the new code Valid [i]nterface = 0')
uas7626MaintenanceTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1), )
if mibBuilder.loadTexts: uas7626MaintenanceTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626MaintenanceTable.setDescription('The table describes the maintenance objects for uas7626.')
uas7626MaintenanceEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626MaintenanceLineIndex"))
if mibBuilder.loadTexts: uas7626MaintenanceEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626MaintenanceEntry.setDescription('An entry in the GDC 7626 Maintenance Table.')
uas7626MaintenanceLineIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626MaintenanceLineIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626MaintenanceLineIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626SoftReset = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626SoftReset.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626SoftReset.setDescription('Supports the action of soft resetting the unit. When this object is set to reset(2), then the unit performs a soft reset, whose meaning is specific to the type of unit being managed. The value of norm(1) will be returned when the reset is complete. The value of norm(1) can not be set by management. Valid [i]nterface = 0')
uas7626DefaultInit = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("normal", 1), ("factoryDefault", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626DefaultInit.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DefaultInit.setDescription('This is used to allow the NonVolatile Configuration to be set to a factory default state. When this value is set to factoryDefault(2) the unit will perform a reset to make the default configuration take affect. The value of normal(1) will be returned when the initialization is complete. The value of normal(1) can not be set by management. Valid [i]nterface = 0')
uas7626ResetMajorAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626ResetMajorAlarm.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ResetMajorAlarm.setDescription('This variable is used to reset the Major BER alarm. A value of norm(1) cannot be set by management and will always be returned on a read. Valid [i]nterface = 01-06')
uas7626ResetMinorAlarm = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626ResetMinorAlarm.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ResetMinorAlarm.setDescription('This variable is used to reset the Minor BER alarm. A value of norm(1) cannot be set by management and will always be returned on a read. Valid [i]nterface = 01-06')
uas7626ResetStatistics = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626ResetStatistics.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ResetStatistics.setDescription('This variable is used to reset the performance intervals. When it is set to reset, the performance tables and uas7626Valid intervals are reset to zero. The value of norm(1) can not be set by management. Valid [i]nterface = 01-06')
uas7626ValidIntervals = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 7), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626ValidIntervals.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ValidIntervals.setDescription('The number of previous intervals for which valid data was collected. The value will be 16 unless the interface was brought on-line within the last 4 hours, in which case the value will be the number of complete 15 minute intervals the since interface has been online. Valid [i]nterface = 01-06')
uas7626SysUpTime = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 8), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626SysUpTime.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626SysUpTime.setDescription('This variable is used to report the elapsed system tick time for conversion to real time at the controller and is not related to the sysUpTime referenced in MIB-II. Upon power-up of the unit, the elapsed time is cleared. The elapsed time counter rolls over upon reaching the maximum count. Valid [i]nterface = 0')
uas7626LedStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 9), OctetString().subtype(subtypeSpec=ValueSizeConstraint(4, 4)).setFixedLength(4)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626LedStatus.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626LedStatus.setDescription('Returns a bitwise snapshot of the front panel LED state: key --- 001 = SOLID GREEN 010 = SOLID RED 011 = FLASHING RED 100 = FLASHING GREEN 1.7 - n/a 1.6 - \\ 1.5 - -- Loop 1 (001, 010, 011, 100) 1.4 - / 1.3 - \\ 1.2 - -- Loop 2 (001, 010, 011, 100)Y 1.1 - / 1.0 - f/u 2.7 - f/u 2.6 - \\ 2.5 - -- Loop 3 (001, 010, 011, 100) 2.4 - / 2.3 - \\ 2.2 - -- Loop 4 (001, 010, 011, 100) 2.1 - / 2.0 - f/u 3.7 - f/u 3.6 - \\ 3.5 - -- Loop 5 (001, 010, 011, 100) 3.4 - / 3.3 - \\ 3.2 - -- Loop 6 (001, 010, 011, 100) 3.1 - / 3.0 - f/u 4.7 - f/u 4.6 - f/u 4.5 - f/u 4.4 - Alarm (0 = Off; 1 = Alarm Active ) 4.3 - Test Mode (0 = Off; 1 = Alarm Active ) 4.2 - In Service (0 = Off; 1 = Alarm Active ) 4.1 - Transmitter Timing Failure (0 = Off; 1 = Alarm Active ) 4.0 - Management Response (0 = Off; 1 = Alarm Active ) Valid [i]nterface = 0')
uas7626AlarmStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 10), OctetString().subtype(subtypeSpec=ValueSizeConstraint(7, 7)).setFixedLength(7)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626AlarmStatus.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmStatus.setDescription('Returns actual bit-wise Status of alarms regardless of whether the alarm is masked or not. key --- 0 = Inactive 1 = Active 1.7 - f/u 1.6 - Power-up 1.5 - f/u 1.4 - f/u 1.3 - f/u 1.2 - f/u 1.1 - f/u 1.0 - f/u Note: n=2-6 for octets 2-7 n.7 - f/u n.6 - Loop n Major n.5 - Loop n Minor n.4 - Loop n Loss of Transmit Clock n.3 - Loop n 2B1Q Out of Sync n.2 - Loop n Sealing Current Non-Continuity Alarm n.1 - Loop n Errored Seconds n.0 - Loop n Unavailable Seconds Valid [i]nterface = 0')
uas7626StatLastInitialized = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 11), TimeTicks()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626StatLastInitialized.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626StatLastInitialized.setDescription("The elapsed time since the statistics were last initialized. This will be zero'd when uas7626ResetStatistics is reset(2). Valid [i]nterface = 01-06")
uas7626CircuitID = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 2, 1, 1, 12), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(16, 16)).setFixedLength(16)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626CircuitID.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626CircuitID.setDescription('Name to identify the circuit. Valid [i]nterface = 01-06')
uas7626ConfigTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1), )
if mibBuilder.loadTexts: uas7626ConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ConfigTable.setDescription('The GDC Configuration table.')
uas7626ConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626ConfigIndex"))
if mibBuilder.loadTexts: uas7626ConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ConfigEntry.setDescription('The GDC Configuration table entry.')
uas7626ConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626ConfigIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ConfigIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626DataRate = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("kbps64", 1), ("kbps128", 2), ("inhibit", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626DataRate.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DataRate.setDescription('Object to select the Data Rate on Loop n. Valid [i]nterface = 01-06')
uas7626Highway = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7, 8, 9))).clone(namedValues=NamedValues(("notAssigned", 1), ("highway1", 2), ("highway2", 3), ("highway3", 4), ("highway4", 5), ("highway5", 6), ("highway6", 7), ("highway7", 8), ("highway8", 9)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626Highway.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626Highway.setDescription('Object to select the data highway for Loop n. Valid [i]nterface = 01-06')
uas7626TimeSlot = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 3, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 64))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626TimeSlot.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TimeSlot.setDescription('Object to select the timeslots for the data highway given by uas7624Highway for loop n. Valid selections are 1-31 and 64. A value of 64 indicates the loop is not assigned to a highway. Valid [i]nterface = 01-06')
uas7626DiagTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1), )
if mibBuilder.loadTexts: uas7626DiagTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DiagTable.setDescription('The GDC 7626 Diagnostics table. Information in the entries of this table support diagnostics testing, both active testing via patterns, and passive testing via loopbacks.')
uas7626DiagEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626DiagIndex"))
if mibBuilder.loadTexts: uas7626DiagEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DiagEntry.setDescription('The GDC 7626 Diagnostics table entry.')
uas7626DiagIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626DiagIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626DiagIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626TestSelection = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))).clone(namedValues=NamedValues(("stopTest", 1), ("digitalLoopback", 2), ("selfTest", 3), ("remoteDigitalLoopback", 4), ("rdlSelfTest", 5)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626TestSelection.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TestSelection.setDescription('Selects the test to run. A selection of stopTest(1) stops the current test. Valid [i]nterface = 01-06')
uas7626TestResults = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1048576))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626TestResults.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TestResults.setDescription('Returns the self test bit error count. Valid [i]nterface = 01-06')
uas7626ResetTestResults = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 4, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("norm", 1), ("reset", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626ResetTestResults.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ResetTestResults.setDescription('Resets the self test bit error count. A value of norm(1) cannot be set by management and is always returned on a read. Valid [i]nterface = 01-06')
uas7626NoResponseAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 1))
uas7626DiagRxErrAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 2))
uas7626PowerUpAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 3))
uas7626LossofTransmitClockAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 4))
uas7626OutofSyncAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 5))
uas7626SealingCurrentNoContAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 6))
uas7626UASAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 7))
uas7626ESAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 8))
uas7626MajorBERAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 9))
uas7626MinorBERAlm = MibIdentifier((1, 3, 6, 1, 4, 1, 498, 12, 12, 7, 10))
uas7626AlarmConfigTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 1), )
if mibBuilder.loadTexts: uas7626AlarmConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmConfigTable.setDescription('This table contains entries that configure Alarm reporting.')
uas7626AlarmConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 1, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626AlarmConfigIndex"), (0, "GDCUAS7626-MIB", "uas7626AlarmConfigIdentifier"))
if mibBuilder.loadTexts: uas7626AlarmConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmConfigEntry.setDescription('An entry in the uas7626 Alarm Configuration table.')
uas7626AlarmConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 1, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626AlarmConfigIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmConfigIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626AlarmConfigIdentifier = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 1, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626AlarmConfigIdentifier.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmConfigIdentifier.setDescription('format: iso.org.dod.internet.private.enterprises.gdc. bql2.uas7626.uas7626Alarms.ALM example: 1.3.6.1.4.1.498.12.12.7.ALM where ALM = 1 for uas7626NoResponse alarm, 2 for uas7626DiagRxErr alarm, etc., as specified in the Alarm Definitions above.')
uas7626AlarmCountThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("thres10E03", 1), ("thres10E04", 2), ("thres10E05", 3), ("thres10E06", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626AlarmCountThreshold.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626AlarmCountThreshold.setDescription('This function sets/reads the alarm threshold criteria. This threshold is used along with the the alarm window to determine the number of instances in a given time frame for an alarm to occur before the alarm is considered active. Valid [i]nterface = 01-06')
uas7626LocalAlarmConfigTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2), )
if mibBuilder.loadTexts: uas7626LocalAlarmConfigTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626LocalAlarmConfigTable.setDescription('The uas7626LocalAlarmConfigTable contains entries that configure alarm reporting to the alarm card.')
uas7626LocalAlarmConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626LocalAlarmConfigIndex"))
if mibBuilder.loadTexts: uas7626LocalAlarmConfigEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626LocalAlarmConfigEntry.setDescription('An entry in the GDC Local Alarm Configuration table.')
uas7626LocalAlarmConfigIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626LocalAlarmConfigIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626LocalAlarmConfigIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626LossOfClockLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledMinor", 2), ("enabledMajor", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626LossOfClockLocal.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626LossOfClockLocal.setDescription('Disables or enables the alarm on the Major buss or Minor Buss. Valid [i]nterface = 01-06')
uas7626ESLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledMinor", 2), ("enabledMajor", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626ESLocal.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626ESLocal.setDescription('Disables or enables the alarm on the Major buss or Minor Buss. Valid [i]nterface = 01-06')
uas7626UASLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledMinor", 2), ("enabledMajor", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626UASLocal.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UASLocal.setDescription('Disables or enables the alarm on the Major buss or Minor Buss. Valid [i]nterface = 01-06')
uas7626OutofSyncLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledMinor", 2), ("enabledMajor", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626OutofSyncLocal.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626OutofSyncLocal.setDescription('Disables or enables the alarm on the Major buss or Minor Buss. Valid [i]nterface = 01-06')
uas7626NoSealingCurrentLocal = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 6, 2, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("disabled", 1), ("enabledMinor", 2), ("enabledMajor", 3)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: uas7626NoSealingCurrentLocal.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626NoSealingCurrentLocal.setDescription('Disables or enables the alarm on the Major buss or Minor Buss. Valid [i]nterface = 01-06')
uas7626CurrentTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 3), )
if mibBuilder.loadTexts: uas7626CurrentTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626CurrentTable.setDescription('The uas7626 Current table contains various statistics being collected for the current 15 minute interval.')
uas7626CurrentEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 3, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626CurrentIndex"))
if mibBuilder.loadTexts: uas7626CurrentEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626CurrentEntry.setDescription('An entry in the uas7626 Current table.')
uas7626CurrentIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 3, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626CurrentIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626CurrentIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626CurrentStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 3, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(10, 10)).setFixedLength(10)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626CurrentStat.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626CurrentStat.setDescription('Returns a bitwise snapshot of the current 15 minute statistics: Octet 1 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Loop ID Octet 2 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 3 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Errored Seconds Octet 4 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 5 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Severely Errored Seconds Octet 6 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 7 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Unavailable Seconds Octet 8 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^16 bit 1 - 2^15 bit 0 - 2^14 Octet 9 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 10 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 FEBE (Far End Block Error Count) Valid [i]nterface = 01-06')
uas7626IntervalTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 4), )
if mibBuilder.loadTexts: uas7626IntervalTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626IntervalTable.setDescription('The uas7626 Interval table contains various statistics collected by each Interface over the last 16 15-minute intervals.')
uas7626IntervalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 4, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626IntervalIndex"), (0, "GDCUAS7626-MIB", "uas7626IntervalNumber"))
if mibBuilder.loadTexts: uas7626IntervalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626IntervalEntry.setDescription('An entry in the Interval table.')
uas7626IntervalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 4, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626IntervalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626IntervalIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626IntervalNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 4, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 16))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626IntervalNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626IntervalNumber.setDescription('The interval number 1- 16. Valid [i]nterface = 01-06')
uas7626IntervalStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(11, 11)).setFixedLength(11)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626IntervalStat.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626IntervalStat.setDescription('Returns a bitwise snapshot of the interval statistics. Octet 1 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Loop ID Octet 2 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Interval Octet 3 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 4 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Errored Seconds Octet 5 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 6 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Severely Errored Seconds Octet 7 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 8 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Unavailable Seconds Octet 9 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^16 bit 1 - 2^15 bit 0 - 2^14 Octet 10 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 11 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 FEBE (Far End Block Error Count) Valid [i]nterface = 01-06')
uas7626TotalTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 5), )
if mibBuilder.loadTexts: uas7626TotalTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TotalTable.setDescription('The uas7626 Total Table contains the totals of the various statistics collected for the current 24 hour period for each interface.')
uas7626TotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 5, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626TotalIndex"))
if mibBuilder.loadTexts: uas7626TotalEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TotalEntry.setDescription('An entry in the uas7626 Total table.')
uas7626TotalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 5, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626TotalIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TotalIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626TotalStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 5, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(14, 14)).setFixedLength(14)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626TotalStat.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626TotalStat.setDescription('Returns a bitwise snapshot of the current 24 hour statistics. Octet 1 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Loop ID Octet 2 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 3 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 4 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Errored Seconds Octet 5 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 6 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 7 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Severely Errored Seconds Octet 8 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 9 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 10 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Unavailable Seconds Octet 11 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^23 bit 1 - 2^22 bit 0 - 2^21 Octet 12 bit 7 - not used bit 6 - 2^20 bit 5 - 2^19 bit 4 - 2^18 bit 3 - 2^17 bit 2 - 2^16 bit 1 - 2^15 bit 0 - 2^14 Octet 13 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 14 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 FEBE (Far End Block Error Count) Valid [i]nterface = 01-06')
uas7626Recent24HrTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 6), )
if mibBuilder.loadTexts: uas7626Recent24HrTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626Recent24HrTable.setDescription('The Recent 24 Hour table contains the totals of the various statistics collected for the previous 24 hour period for each interface.')
uas7626Recent24HrEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 6, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626Recent24HrIndex"))
if mibBuilder.loadTexts: uas7626Recent24HrEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626Recent24HrEntry.setDescription('An entry in the Recent24Hr table.')
uas7626Recent24HrIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 6, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626Recent24HrIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626Recent24HrIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626Recent24HrStat = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 6, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(14, 14)).setFixedLength(14)).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626Recent24HrStat.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626Recent24HrStat.setDescription('Returns a bitwise snapshot of the recent 24 hour statistics. Octet 1 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Loop ID Octet 2 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 3 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 4 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Errored Seconds Octet 5 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 6 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 7 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Severely Errored Seconds Octet 8 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - >65535 bit 1 - 2^15 bit 0 - 2^14 Octet 9 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 10 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 Unavailable Seconds Octet 11 bit 7 - not used bit 6 - future use bit 5 - future use bit 4 - future use bit 3 - future use bit 2 - 2^23 bit 1 - 2^22 bit 0 - 2^21 Octet 12 bit 7 - not used bit 6 - 2^20 bit 5 - 2^19 bit 4 - 2^18 bit 3 - 2^17 bit 2 - 2^16 bit 1 - 2^15 bit 0 - 2^14 Octet 13 bit 7 - not used bit 6 - 2^13 bit 5 - 2^12 bit 4 - 2^11 bit 3 - 2^10 bit 2 - 2^9 bit 1 - 2^8 bit 0 - 2^7 Octet 14 bit 7 - not used bit 6 - 2^6 bit 5 - 2^5 bit 4 - 2^4 bit 3 - 2^3 bit 2 - 2^2 bit 1 - 2^1 bit 0 - 2^0 FEBE (Far End Block Error Count) Valid [i]nterface = 01-06')
uas7626UnavailableTimeRegTable = MibTable((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7), )
if mibBuilder.loadTexts: uas7626UnavailableTimeRegTable.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegTable.setDescription('The 7626 Unavailable Time Register table.')
uas7626UnavailableTimeRegEntry = MibTableRow((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7, 1), ).setIndexNames((0, "GDCUAS7626-MIB", "uas7626UnavailableTimeRegIndex"), (0, "GDCUAS7626-MIB", "uas7626UnavailableTimeRegNumber"))
if mibBuilder.loadTexts: uas7626UnavailableTimeRegEntry.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegEntry.setDescription('An entry in the Unavailable Time Register table.')
uas7626UnavailableTimeRegIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7, 1, 1), SCinstance()).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626UnavailableTimeRegIndex.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegIndex.setDescription('Integer value which uniquely identifies the UAS7626 to which this entry is applicable. SCinstance is defined to be SLDi where: S (byte value) - physical shelf slot location (01-32) L (byte value) - line number (01) D (byte value) - drop number (00) i (byte value) - loop or interface number (00-06)')
uas7626UnavailableTimeRegNumber = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 6))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626UnavailableTimeRegNumber.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegNumber.setDescription('A number between 1 and 6, where the number is the number of the Unavailable Time Register.')
uas7626UnavailableTimeRegStart = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626UnavailableTimeRegStart.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegStart.setDescription('Start time of one of the Unavailable Time Registers.')
uas7626UnavailableTimeRegStop = MibTableColumn((1, 3, 6, 1, 4, 1, 498, 12, 12, 5, 7, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 2147483647))).setMaxAccess("readonly")
if mibBuilder.loadTexts: uas7626UnavailableTimeRegStop.setStatus('mandatory')
if mibBuilder.loadTexts: uas7626UnavailableTimeRegStop.setDescription('Stop time of one of the Unavailable Time Registers.')
mibBuilder.exportSymbols("GDCUAS7626-MIB", uas7626SoftReset=uas7626SoftReset, uas7626MaintenanceTable=uas7626MaintenanceTable, uas7626ResetMajorAlarm=uas7626ResetMajorAlarm, uas7626IntervalNumber=uas7626IntervalNumber, uas7626UnavailableTimeRegStart=uas7626UnavailableTimeRegStart, uas7626UnavailableTimeRegEntry=uas7626UnavailableTimeRegEntry, uas7626OutofSyncAlm=uas7626OutofSyncAlm, uas7626MaintenanceEntry=uas7626MaintenanceEntry, uas7626VersionTable=uas7626VersionTable, uas7626Alarms=uas7626Alarms, uas7626ConfigEntry=uas7626ConfigEntry, uas7626DiagIndex=uas7626DiagIndex, uas7626TestResults=uas7626TestResults, uas7626IntervalStat=uas7626IntervalStat, uas7626Highway=uas7626Highway, uas7626TotalStat=uas7626TotalStat, gdc=gdc, uas7626DefaultInit=uas7626DefaultInit, uas7626IntervalEntry=uas7626IntervalEntry, uas7626SealingCurrentNoContAlm=uas7626SealingCurrentNoContAlm, uas7626OutofSyncLocal=uas7626OutofSyncLocal, uas7626CurrentIndex=uas7626CurrentIndex, uas7626DiagTable=uas7626DiagTable, uas7626SwitchActiveFirmware=uas7626SwitchActiveFirmware, uas7626ESLocal=uas7626ESLocal, uas7626Recent24HrIndex=uas7626Recent24HrIndex, uas7626UnavailableTimeRegTable=uas7626UnavailableTimeRegTable, uas7626DiagRxErrAlm=uas7626DiagRxErrAlm, uas7626LossofTransmitClockAlm=uas7626LossofTransmitClockAlm, bql2=bql2, uas7626MinorBERAlm=uas7626MinorBERAlm, uas7626PowerUpAlm=uas7626PowerUpAlm, uas7626MIBversion=uas7626MIBversion, uas7626AlarmConfigEntry=uas7626AlarmConfigEntry, uas7626DataRate=uas7626DataRate, uas7626TotalTable=uas7626TotalTable, uas7626ESAlm=uas7626ESAlm, uas7626LocalAlarmConfigEntry=uas7626LocalAlarmConfigEntry, uas7626UnavailableTimeRegIndex=uas7626UnavailableTimeRegIndex, uas7626=uas7626, uas7626AlarmConfigIndex=uas7626AlarmConfigIndex, uas7626ConfigTable=uas7626ConfigTable, uas7626CurrentEntry=uas7626CurrentEntry, uas7626VersionIndex=uas7626VersionIndex, uas7626NoResponseAlm=uas7626NoResponseAlm, uas7626StatLastInitialized=uas7626StatLastInitialized, uas7626Version=uas7626Version, uas7626IntervalIndex=uas7626IntervalIndex, uas7626Configuration=uas7626Configuration, uas7626LocalAlarmConfigTable=uas7626LocalAlarmConfigTable, uas7626ValidIntervals=uas7626ValidIntervals, uas7626ResetStatistics=uas7626ResetStatistics, uas7626TestSelection=uas7626TestSelection, uas7626CircuitID=uas7626CircuitID, uas7626TotalIndex=uas7626TotalIndex, uas7626IntervalTable=uas7626IntervalTable, uas7626AlarmStatus=uas7626AlarmStatus, uas7626LedStatus=uas7626LedStatus, uas7626Maintenance=uas7626Maintenance, uas7626UnavailableTimeRegNumber=uas7626UnavailableTimeRegNumber, uas7626DownloadingMode=uas7626DownloadingMode, uas7626StoredFirmwareStatus=uas7626StoredFirmwareStatus, uas7626AlarmConfig=uas7626AlarmConfig, uas7626ResetTestResults=uas7626ResetTestResults, uas7626AlarmConfigIdentifier=uas7626AlarmConfigIdentifier, uas7626LossOfClockLocal=uas7626LossOfClockLocal, uas7626AlarmConfigTable=uas7626AlarmConfigTable, uas7626StoredFirmwareRev=uas7626StoredFirmwareRev, uas7626MajorBERAlm=uas7626MajorBERAlm, uas7626UASLocal=uas7626UASLocal, uas7626SysUpTime=uas7626SysUpTime, uas7626ConfigIndex=uas7626ConfigIndex, uas7626Recent24HrStat=uas7626Recent24HrStat, uas7626LocalAlarmConfigIndex=uas7626LocalAlarmConfigIndex, uas7626ResetMinorAlarm=uas7626ResetMinorAlarm, uas7626AlarmCountThreshold=uas7626AlarmCountThreshold, uas7626Recent24HrTable=uas7626Recent24HrTable, uas7626Performance=uas7626Performance, uas7626Recent24HrEntry=uas7626Recent24HrEntry, uas7626UnavailableTimeRegStop=uas7626UnavailableTimeRegStop, uas7626NoSealingCurrentLocal=uas7626NoSealingCurrentLocal, uas7626VersionEntry=uas7626VersionEntry, uas7626DiagEntry=uas7626DiagEntry, uas7626UASAlm=uas7626UASAlm, uas7626Diagnostics=uas7626Diagnostics, uas7626TotalEntry=uas7626TotalEntry, uas7626CurrentStat=uas7626CurrentStat, uas7626MaintenanceLineIndex=uas7626MaintenanceLineIndex, uas7626ActiveFirmwareRev=uas7626ActiveFirmwareRev, uas7626TimeSlot=uas7626TimeSlot, uas7626CurrentTable=uas7626CurrentTable)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import glob
import os
import plistlib
import re
import shlex
import subprocess
import tempfile
import yaml
# Colours
BOLD = '\033[1m'
RED = '\033[91m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
BLUE = '\033[94m'
ENDC = '\033[0m'
# Open /dev/null
DEVNULL = open(os.devnull, 'w')
def run(command, **kwargs):
if not kwargs.get('shell', False):
command = shlex.split(command)
return subprocess.run(command, encoding='utf-8', **kwargs)
def sudo(command, **kwargs):
return run(f'sudo {command}', **kwargs)
def logic_pro_x_content(sample_libraries_source, destination_basedir):
print()
print(f'{BOLD}Logic Pro X Content{ENDC}')
source = f'{sample_libraries_source}/Apple/Apple Logic Pro X Sound Library'
destination = f'{destination_basedir}/Apple/Logic Pro X Sound Library'
print()
print(f'{BLUE}Cleaning up any content on operating system drive{ENDC}')
for dir in [
'/Library/Application Support/GarageBand',
'/Library/Application Support/Logic',
'/Library/Audio/Apple Loops',
'/Library/Audio/Impulse Responses'
]:
print(f'- {dir}')
sudo(f'rm -rf "{dir}"')
print()
print(f'{BLUE}Creating directory structure on sample drive{ENDC}')
for dir in [
f'{destination}/GarageBand',
f'{destination}/Logic',
f'{destination}/Apple Loops',
f'{destination}/Impulse Responses'
]:
print(f'- {dir}')
run(f'mkdir -p "{dir}"')
print()
print(f'{BLUE}Building symbolic links to new directories{ENDC}')
for src, dest in [
(f'{destination}/GarageBand', '/Library/Application Support/GarageBand'),
(f'{destination}/Logic', '/Library/Application Support/Logic'),
(f'{destination}/Apple Loops', '/Library/Audio/Apple Loops'),
(f'{destination}/Impulse Responses', '/Library/Audio/Impulse Responses')
]:
print(f'- {src} -> {dest}')
sudo(f'ln -s "{src}" "{dest}"')
packages_proc = run(f'find "{source}" -type f -name "*.pkg"', stdout=subprocess.PIPE)
for package in packages_proc.stdout.strip().split('\n'):
print()
print(f'{BLUE}Running installer {os.path.basename(package)}{ENDC}')
sudo(f'installer -package "{package}" -target /')
print()
print(f'{GREEN}Installation of the Logic Pro X content complete{ENDC}')
def komplete_libraries(sample_libraries_source, destination_basedir):
print()
print(f'{BOLD}Komplete Libraries{ENDC}')
# Create a temporary plist for use in determining the installer option for library location
empty_plist_fd, empty_plist_name = tempfile.mkstemp()
with open(empty_plist_name, 'wb') as f:
plistlib.dump([], f)
# Allocate another temp file for the plist that will specify our install location
location_plist_fd, location_plist_name = tempfile.mkstemp()
source = f'{sample_libraries_source}/Native Instruments'
destination = f'{destination_basedir}/Native Instruments'
run(f'mkdir -p "{destination}"')
isos_proc = run(f'find "{source}" -type f -name "*.iso"', stdout=subprocess.PIPE)
for iso in isos_proc.stdout.strip().split('\n'):
print()
print(f'{BLUE}Mounting ISO image {os.path.basename(iso)}{ENDC}')
mount_proc = run(f'hdiutil mount "{iso}"', stdout=subprocess.PIPE)
mountpoint = mount_proc.stdout.strip().split('\t')[-1]
print(f'{BLUE}ISO mounted under {mountpoint}{ENDC}')
try:
packages = glob.glob(f'{mountpoint}/* Installer Mac.pkg')
if len(packages) != 1:
print(
f'{RED}Unable to determine the installer package for this library, '
f'skipping{ENDC}'
)
continue
package = packages[0]
print(f'{GREEN}Found installer package {package}{ENDC}')
# Obtain all installer choices as a plist
choices_proc = run(
f'sudo installer -showChoicesAfterApplyingChangesXML "{empty_plist_name}" '
f'-package "{package}" -target /', stdout=subprocess.PIPE
)
# Split the lines and crop output to only include the plist
# (sometimes the installer command includes extra lines before the plist)
choices_stdout_lines = choices_proc.stdout.strip().split('\n')
choices_plist_start_index = choices_stdout_lines.index(
'<?xml version="1.0" encoding="UTF-8"?>'
)
choices_plist_end_index = choices_stdout_lines.index('</plist>') + 1
choices_plist = '\n'.join(
choices_stdout_lines[choices_plist_start_index:choices_plist_end_index]
)
# Determine the installer option that we can override to set a custom install location
choice_library_identifier = None
for choice in plistlib.loads(choices_plist.encode('utf-8')):
if (
choice['choiceAttribute'] == 'customLocation' and
choice['attributeSetting'] == '/Users/Shared'
):
choice_library_identifier = choice['choiceIdentifier']
if not choice_library_identifier:
print(
f'{RED}Unable to identify install location choice identifier '
f'for this library, skipping{ENDC}'
)
continue
print(
f'{GREEN}Found install location choice identifier '
f'{choice_library_identifier}{ENDC}'
)
# Build the plist file containing our custom install location
with open(location_plist_name, 'wb') as f:
plistlib.dump([
{
'choiceIdentifier': choice_library_identifier,
'choiceAttribute': 'customLocation',
'attributeSetting': destination
}
], f)
print()
print(f'{BLUE}Running installer {os.path.basename(package)}{ENDC}')
sudo(
f'installer -applyChoiceChangesXML "{location_plist_name}" '
f'-package "{package}" -target /'
)
finally:
print()
print(f'{BLUE}Unmounting ISO image under {mountpoint}{ENDC}')
mount_proc = run(f'hdiutil unmount "{mountpoint}"')
print(f'{BLUE}Hiding the Library directory on the sample drive{ENDC}')
run(f'chflags hidden "{destination_basedir}/Library"')
print()
print(f'{GREEN}Installation of the Komplete libraries complete{ENDC}')
def omnisphere_steam_library(music_software_source, destination_basedir):
print()
print(f'{BOLD}Spectrasonics STEAM Library{ENDC}')
home = os.path.expanduser('~')
source = f'{music_software_source}/Spectrasonics/Spectrasonics Omnisphere v2/STEAM/'
destination = f'{destination_basedir}/Spectrasonics'
steam_symlink = f'{home}/Library/Application Support/Spectrasonics/STEAM'
print()
print(f'{BLUE}Installing STEAM library into {destination}{ENDC}')
print()
run(f'mkdir -p "{destination}"')
run(
'rsync --archive --info=progress2 --human-readable --exclude=.DS_Store '
f'"{source}" "{destination}"'
)
print()
print(f'{BLUE}Correcting permissions for files and folders in {destination}{ENDC}')
run(f'find "{destination}" -type d -exec chmod 755 "{{}}" ;')
run(f'find "{destination}" -type f -exec chmod 644 "{{}}" ;')
print()
print(f'{BLUE}Cleaning up any existing STEAM symbolic link{ENDC}')
print(f'- {steam_symlink}')
run(f'mkdir -p "{os.path.dirname(steam_symlink)}"')
run(f'rm -f "{steam_symlink}"')
print()
print(f'{BLUE}Creating a STEAM symbolic link to the library path{ENDC}')
print(f'- {destination} -> {steam_symlink}')
run(f'ln -s "{destination}" "{steam_symlink}"')
print()
print(f'{GREEN}Installation of the Omnisphere STEAM library complete{ENDC}')
def kontakt_libraries_and_drum_samples(sample_libraries_source, destination_basedir):
print()
print(f'{BOLD}Kontakt Libraries & Drum Samples{ENDC}')
library_paths_proc = run(
f'find "{sample_libraries_source}" -maxdepth 2 -mindepth 2 -type d',
stdout=subprocess.PIPE
)
for library_path in library_paths_proc.stdout.strip().split('\n'):
# Find all ZIP and RAR files present in the downloaded library
archives_proc = run(
f'find "{library_path}" -type f ( -name "*.zip" -o -name "*.rar" )',
stdout=subprocess.PIPE
)
if not archives_proc.stdout:
continue
# Determine the vendor of the library
vendor = os.path.basename(os.path.dirname(library_path))
# Determine the library name and remove the vendor name to remove redundancy
library = os.path.basename(library_path)
if library.startswith(f'{vendor} '):
library = library[len(f'{vendor} '):]
# Build the destination base directory
destination = f'{destination_basedir}/{vendor}/{library}'
print()
print(f'{BLUE}Processing {vendor} {library}{ENDC}')
# If present, read the library config to override library variables
library_config_path = f'{library_path}/.library.yaml'
library_config = {}
if os.path.isfile(library_config_path):
print(f'{BLUE}Loading the library YAML config file{ENDC}')
with open(library_config_path) as f:
try:
library_config = yaml.load(f)
except yaml.scanner.ScannerError:
print(
f'{RED}Unable to load the library config file due to a syntax error{ENDC}'
)
base_dir = library_config.get('base_dir', '')
installer = library_config.get('installer', None)
extract_subdirs = library_config.get('extract_subdirs', [])
if base_dir and os.path.isdir(destination) and os.listdir(destination):
print(f'Moving contents from base directory of {base_dir}')
tempdir = tempfile.mkdtemp(prefix='samplelibs.', dir=destination_basedir)
run(f'mv "{destination}/"* "{tempdir}"', shell=True)
run(f'mkdir -p "{destination}/{base_dir}/"')
run(f'mv "{tempdir}/"* "{destination}/{base_dir}/"', shell=True)
run(f'rmdir "{tempdir}"')
# Track whether anything was needed to be done
performed_action = False
print(f'{BLUE}Extracting library archives{ENDC}')
for archive in archives_proc.stdout.strip().split('\n'):
# Check for multipart archives and only extract part 1
if (
re.search('\.part[0-9]+\.rar$', archive) and
not re.search('\.part0*1\.rar$', archive)
):
continue
performed_action = True
# Determine the destination (also taking into account sub-directories)
archive_relative = archive.replace(f'{library_path}/', '')
subdir = os.path.dirname(archive_relative)
if subdir == '.':
subdir = ''
if archive_relative in extract_subdirs:
subdir = os.path.join(subdir, base_dir, extract_subdirs[archive_relative])
if subdir:
destination_subdir = os.path.join(destination, subdir)
else:
destination_subdir = destination
run(f'mkdir -p "{destination_subdir}"')
# Extract the archive
if subdir:
print(f'{YELLOW}- {archive_relative} -> {subdir}{ENDC}')
else:
print(f'{YELLOW}- {archive_relative}{ENDC}')
if os.path.splitext(archive)[1] == '.rar':
run(
f'unrar x -o+ -x"__MACOSX" -x"*.DS_Store" "{archive}" "{destination_subdir}"',
stdout=DEVNULL
)
else:
run(f'unzip -q -o "{archive}" -x "__MACOSX/*" "*.DS_Store" -d "{destination_subdir}"')
if base_dir:
if os.path.isdir(f'{destination}/{base_dir}'):
print(f'{BLUE}Stripping base directory of {base_dir}{ENDC}')
run(f'mv "{destination}/{base_dir}/"* "{destination}/"', shell=True)
run(f'rmdir "{destination}/{base_dir}/"')
else:
print(f'{RED}The base directory {base_dir} does not exist{ENDC}')
if installer:
if os.path.isfile(f'{destination}/{installer}'):
performed_action = True
print(f'{BLUE}Running installer {installer}{ENDC}')
sudo(f'installer -package "{destination}/{installer}" -target /')
else:
print(f'{RED}The installer {installer} does not exist{ENDC}')
if performed_action:
print(f'{GREEN}Installation of {vendor} {library} complete{ENDC}')
else:
print(f'{RED}No action required for {vendor} {library}{ENDC}')
print()
print(f'{GREEN}Installation of Kontakt libraries and drum samples complete{ENDC}')
def main():
# Check if both the sample libraries source and destination have been defined
try:
from samples_config import (
SAMPLE_LIBRARIES_SOURCE, MUSIC_SOFTWARE_SOURCE, DESTINATION_BASEDIR
)
except ImportError:
print(
f'{RED}The SAMPLE_LIBRARIES_SOURCE, MUSIC_SOFTWARE_SOURCE or DESTINATION_BASEDIR '
f'variable was not defined{ENDC}'
)
exit(1)
print()
print(f'{BOLD}Sample Library Installer{ENDC}')
print()
print(f'{GREEN}Sample Library Source: {SAMPLE_LIBRARIES_SOURCE}{ENDC}')
print(f'{GREEN}Destination Base Path: {DESTINATION_BASEDIR}{ENDC}')
sudo_enabled = False
return_code = 0
try:
# Prompt the user for their sudo password (if required)
sudo_check_proc = sudo('-vn', stderr=DEVNULL)
if sudo_check_proc.returncode != 0:
print()
sudo('-v')
# Enable passwordless sudo for the run
sudo('sed -i -e "s/^%admin.*/%admin ALL=(ALL) NOPASSWD: ALL/" /etc/sudoers')
sudo_enabled = True
# Install the various sample libraries
logic_pro_x_content(
sample_libraries_source=SAMPLE_LIBRARIES_SOURCE,
destination_basedir=DESTINATION_BASEDIR
)
komplete_libraries(
sample_libraries_source=SAMPLE_LIBRARIES_SOURCE,
destination_basedir=DESTINATION_BASEDIR
)
omnisphere_steam_library(
music_software_source=MUSIC_SOFTWARE_SOURCE,
destination_basedir=DESTINATION_BASEDIR
)
kontakt_libraries_and_drum_samples(
sample_libraries_source=SAMPLE_LIBRARIES_SOURCE,
destination_basedir=DESTINATION_BASEDIR
)
except KeyboardInterrupt:
print(
f'{RED}Aborting sample library installation, this could leave a '
f'library incomplete{ENDC}'
)
return_code = 1
finally:
# Disable passwordless sudo after the installation has completed or has been cancelled
if sudo_enabled:
sudo('sed -i -e "s/^%admin.*/%admin ALL=(ALL) ALL/" /etc/sudoers')
print()
exit(return_code)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.utils.safestring import mark_safe
from iommi import Fragment
from iommi.asset import Asset
from iommi.style import (
Style,
)
from iommi.style_base import base
from iommi.style_font_awesome_4 import font_awesome_4
navbar_burger_click_js = Fragment(mark_safe("""\
<script>
$(document).ready(function() {
// Check for click events on the navbar burger icon
$(".navbar-burger").click(function() {
// Toggle the "is-active" class on both the "navbar-burger" and the "navbar-menu"
$(".navbar-burger").toggleClass("is-active");
$(".navbar-menu").toggleClass("is-active");
});
});
</script>
"""))
bulma_base = Style(
base,
assets=dict(
css=Asset.css(
attrs__href='https://cdn.jsdelivr.net/npm/bulma@0.9.1/css/bulma.min.css',
),
navbar_burger_click_js=navbar_burger_click_js,
),
Header__attrs__class={
'title': True,
'is-1': lambda fragment, **_: fragment.tag == 'h1',
'is-2': lambda fragment, **_: fragment.tag == 'h2',
'is-3': lambda fragment, **_: fragment.tag == 'h3',
'is-4': lambda fragment, **_: fragment.tag == 'h4',
'is-5': lambda fragment, **_: fragment.tag == 'h5',
'is-6': lambda fragment, **_: fragment.tag == 'h6',
},
Container=dict(
tag='div',
attrs__class={
'main': True,
'container': True,
},
),
Field=dict(
shortcuts=dict(
boolean=dict(
input__attrs__class__input=False,
label__attrs__class__checkbox=True,
label__attrs__class__label=False,
),
textarea=dict(
input__attrs__class__input=False,
input__attrs__class__textarea=True,
),
radio=dict(
input__attrs__class__input=False,
),
),
attrs__class__field=True,
template='iommi/form/bulma/field.html',
label__attrs__class__label=True,
input__attrs__class__input=True,
input__attrs__class={
'is-danger': lambda field, **_: bool(field.errors),
},
errors__attrs__class={
'is-danger': True,
'help': True,
},
help__attrs__class=dict(
help=True,
)
),
Actions=dict(
tag="div",
attrs__class=dict(links=False, buttons=True),
),
Action=dict(
shortcuts=dict(
# In bulma the most neutral button styling is button, which
# gets you a button that's just an outline.
button__attrs__class={
'button': True,
},
delete__attrs__class={
'is-danger': True,
},
primary__attrs__class={
'is-primary': True,
},
),
),
Table={
'attrs__class__table': True,
'attrs__class__is-fullwidth': True,
'attrs__class__is-hoverable': True,
},
Column=dict(
shortcuts=dict(
select=dict(
header__attrs__title='Select all',
),
number=dict(
cell__attrs__class={
'has-text-right': True,
},
header__attrs__class={
'has-text-right': True,
},
),
),
),
Query__form=dict(
iommi_style='bulma_query_form',
),
Query__form_container=dict(
tag='span',
attrs__class={
'is-horizontal': True,
'field': True,
},
),
Menu=dict(
attrs__class__navbar=True,
tag='nav',
),
MenuItem__a__attrs__class={'navbar-item': True},
MenuItem__active_class='is-active',
DebugMenu=dict(
tag='aside',
attrs__class={
'navbar': False,
'menu': True,
},
),
Paginator=dict(
template='iommi/table/bulma/paginator.html',
),
Errors__attrs__class={
'help': True,
'is-danger': True,
},
)
bulma = Style(
bulma_base,
font_awesome_4,
)
bulma_query_form = Style(
bulma,
Field=dict(
attrs__class={
'mr-4': True,
},
label__attrs__class={
'mt-2': True,
'mr-1': True,
},
),
)
|
nilq/baby-python
|
python
|
from string import Template
from django.db import models
from mozdns.models import MozdnsRecord, LabelDomainMixin
from mozdns.validation import validate_txt_data
import reversion
class TXT(MozdnsRecord, LabelDomainMixin):
"""
>>> TXT(label=label, domain=domain, txt_data=txt_data)
"""
id = models.AutoField(primary_key=True)
txt_data = models.TextField(
help_text="The text data for this record.",
validators=[validate_txt_data]
)
search_fields = ("fqdn", "txt_data")
template = ("{bind_name:$lhs_just} {ttl_} {rdclass:$rdclass_just} "
"{rdtype:$rdtype_just} {txt_data:$rhs_just}")
@classmethod
def get_api_fields(cls):
data = super(TXT, cls).get_api_fields() + ['txt_data']
return data
@property
def rdtype(self):
return 'TXT'
def bind_render_record(self, pk=False, show_ttl=False):
template = Template(self.template).substitute(**self.justs)
bind_name = self.fqdn + "."
txt_lines = self.txt_data.split('\n')
if len(txt_lines) > 1:
txt_data = '('
for line in self.txt_data.split('\n'):
txt_data += '"{0}"\n'.format(line)
txt_data = txt_data.strip('\n') + ')'
else:
txt_data = '"{0}"'.format(self.txt_data)
if show_ttl:
ttl_ = self.ttl
else:
ttl_ = '' if self.ttl is None else self.ttl
return template.format(
bind_name=bind_name, ttl_=ttl_, rdtype=self.rdtype, rdclass='IN',
txt_data=txt_data
)
class Meta:
db_table = "txt"
# unique_together = ("domain", "label", "txt_data")
# TODO
# _mysql_exceptions.OperationalError: (1170, "BLOB/TEXT column
# "txt_data" used in key specification without a key length")
# Fix that ^
def details(self):
return (
("FQDN", self.fqdn),
("Record Type", "TXT"),
("Text", self.txt_data)
)
reversion.register(TXT)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from userCalendar.models import Locacao, Checkin, Checkout, Limpeza
# Register your models here.
# Registro do model no admin (para serem administrados)
admin.site.register(Locacao)
admin.site.register(Checkin)
admin.site.register(Checkout)
admin.site.register(Limpeza)
|
nilq/baby-python
|
python
|
from typing import TypeVar, MutableMapping
import trio
KT = TypeVar('KT')
VT = TypeVar('VT')
class AsyncDictionary(MutableMapping[KT, VT]):
"""MutableMapping with waitable get and pop.
TODO: exception support using outcome package
"""
def __init__(self, *args, **kwargs):
self._store = dict(*args, **kwargs)
self._pending = {} # key: Event
def __getitem__(self, key):
return self._store[key]
async def get_wait(self, key: KT) -> VT:
"""Return value of given key, blocking until populated."""
if key in self._store:
return self._store[key]
if key not in self._pending:
self._pending[key] = trio.Event()
await self._pending[key].wait()
return self._store[key]
async def pop_wait(self, key: KT) -> VT:
"""Remove key and return its value, blocking until populated."""
value = await self.get_wait(key)
del self._store[key]
return value
def is_waiting(self, key: KT) -> bool:
"""Return True if there is a task waiting for key."""
return key in self._pending
def __setitem__(self, key, value):
self._store[key] = value
if key in self._pending:
self._pending.pop(key).set()
def __delitem__(self, key):
del self._store[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def __repr__(self):
return repr(self._store)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import setuptools
setuptools.setup(
name = __import__('mp3sum').__name__,
description = __import__('mp3sum').__description__,
url = __import__('mp3sum').__url__,
version = __import__('mp3sum').__version__,
author = __import__('mp3sum').__author__,
author_email = __import__('mp3sum').__author_email__,
license = 'MIT',
keywords = 'audio mp3 crc checksum integrity musiccrc lame',
packages = [__import__('mp3sum').__name__],
include_package_data = True,
entry_points = {
'console_scripts': [
'%s = %s.__main__:main' % (
__import__('mp3sum').__name__,
__import__('mp3sum').__name__,
)
],
},
)
|
nilq/baby-python
|
python
|
class Student:
def __init__(self, name, school):
self.name = name
self.school = school
self.marks = []
def average(self):
return sum(self.marks) / len(self.marks)
@classmethod
def friend(cls, origin, friend_name, *args, **kwargs):
return cls(friend_name, origin.school, args, kwargs)
class WorkingStudent(Student):
def __init__(self, name, school, salary, job_title):
super().__init__(name, school)
self.salary = salary
self.job_title = job_title
anna = WorkingStudent('Anna', 'Oxford', 3500.00, 'Software Developer')
friend = WorkingStudent.friend(anna, 'John', 17.5, 'Software Developer')
print(friend.name)
print(friend.salary)
print(anna.salary)
|
nilq/baby-python
|
python
|
age = 37
name = 'Bob'
gender = 'male'
hobby = 'cycling'
timeofday = 'at night'
typeofbike = 'giant'
country = 'ireland'
sizeofwheels = '700'
print('{} {} {} was {} when he was {}'.format(timeofday,gender,name,hobby,age))
print('the sun is shining in the sky during the day')
print('{} flew to {} then bought a {} bike It had {} wheels'.format(name,country,typeofbike,sizeofwheels))
print('700 point what ? exactly how wide were {}s wheels'.format(name))
print('bobs wheels were {0:.3f}'.format(700))
print('how wide were they?')
print('they were {0:.3f} CM'.format(25))
|
nilq/baby-python
|
python
|
# Copyright 2016 Huawei Technologies Co. Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import requests
import traceback
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from networking_huawei._i18n import _LE
LOG = logging.getLogger(__name__)
class RestClient(object):
# Initialized and reads the configuration file base parameters
def __init__(self):
self.auth = (cfg.CONF.huawei_ac_config.username,
cfg.CONF.huawei_ac_config.password)
self.timeout = float(cfg.CONF.huawei_ac_config.request_timeout)
self.timeout_retry = int(cfg.CONF.huawei_ac_config.timeout_retry)
self.retry_count = int(cfg.CONF.huawei_ac_config.token_retry)
# Send the JSON message to the controller
def send(self, host, port, method, url,
resrc_id, body, callback=None):
result = {}
if method.upper() == 'GET' or method.upper() == 'DELETE' \
or method.upper() == 'PUT':
url = '%s%s%s' % (url, "/", resrc_id)
params = jsonutils.dumps(body)
headers = {"Content-type": "application/json",
"Accept": "application/json"}
LOG.debug('Send the request information, method: %s, url: %s, '
'headers: %s, data:%s', method, url, headers, params)
ret = self.process_request(method, self.auth, url, headers, params)
if ("Timeout Exceptions" == ret) or ("Exceptions" == ret):
LOG.error(_LE("Request to AC failed, error: %s"), ret)
result['response'] = None
result['status'] = -1
result['errorCode'] = None
result['reason'] = None
return result
LOG.debug("AC request response, status_code: %s, content: %s, "
"headers: %s", ret.status_code,
ret.content, ret.headers)
res_code = int(ret.status_code)
res_content = ret.content
try:
if requests.codes.ok <= res_code < requests.codes.multiple_choices:
LOG.debug('AC processed request successfully.')
res = self.fix_json(res_content)
LOG.debug("Send: response body is %s", res)
if not res_content.strip():
result['response'] = None
result['status'] = ret.status_code
result['errorCode'] = None
result['reason'] = None
else:
res1 = jsonutils.loads(res)
result['response'] = res1['result']
result['status'] = ret.status_code
result['errorCode'] = res1['errorCode']
result['reason'] = res1['errorMsg']
else:
LOG.error(_LE('AC process request failed.'))
if self.retry_count > 0 and \
requests.codes.unauthorized == res_code:
LOG.debug('Retrying the request to AC')
self.retry_count -= 1
(res_code, res_content) = self.send(host, port, method,
url,
resrc_id, body,
callback)
else:
LOG.error(_LE('Max retry of request to AC has reached.'))
result['response'] = None
result['status'] = ret.status_code
result['errorCode'] = None
result['reason'] = None
except Exception:
result['response'] = ''
result['status'] = ret.status_code
result['reason'] = -1
result['errorCode'] = -1
raise Exception
if callback is not None:
callback(result['errorCode'], result['reason'], result['status'])
else:
LOG.debug("Call back for the REST is not set.")
return result
def process_request(self, method, auth, url, headers, data):
timeout_retry = self.timeout_retry
ret = None
temp_ret = None
while True:
try:
if (method == 'get') or (method == 'GET'):
ret = requests.request(method, url=url, headers=headers,
auth=auth,
verify=False, timeout=self.timeout)
else:
ret = requests.request(method, url=url, headers=headers,
data=data, auth=auth, verify=False,
timeout=self.timeout)
break
except requests.exceptions.Timeout:
temp_ret = "Timeout Exceptions"
LOG.error(_LE("Exception: AC time out, "
"traceback: %s"), traceback.format_exc())
timeout_retry -= 1
if timeout_retry < 0:
ret = "Timeout Exceptions"
break
except Exception:
LOG.error(_LE("Exception: AC exception, traceback: %s"),
traceback.format_exc())
timeout_retry -= 1
if timeout_retry < 0:
if temp_ret == "Timeout Exceptions":
ret = "Timeout Exceptions"
else:
ret = "Exceptions"
break
if ("Timeout Exceptions" == ret) or ("Exceptions" == ret):
LOG.error(_LE('Request to AC failed, error code: %s') % ret)
return ret
# Internal function to fix the JSON parameters
def fix_json(self, str):
return str.replace(r'"result":null', r'"result":"null"')
# Check whether the http response is success ir not
def http_success(self, http):
LOG.debug(http)
status = int(http['status'])
if (status == requests.codes.ok or
status == requests.codes.not_modified) \
and http['response'] is not None:
return True
else:
return False
|
nilq/baby-python
|
python
|
# -*- config: utf-8 -*-
import sys
import random
import math
import numpy as np
from block import Block
class Stage(object):
def __init__(self):
self.field = (10 + 2, 20 + 2)
self.board = np.zeros((self.field[1], self.field[0]))
self.generate_wall()
self.bl = Block()
self.moving_block = np.zeros((4, 4))
self.next_block = np.zeros((4, 4))
self.position = []
self.rotation = 0
self.init_posture()
self.fixed_board = []
self.save_board()
self.past_board = []
self.flag_move = False
self.over = False
def init_posture(self):
'''
ブロックは4x4のリストで表現されている
実際にブロックがあるのはそのリストの2行目からなので
yの初期位置を-2に
'''
self.position = np.array([4, -2])
self.rotation = 0
def generate_wall(self):
for col in range(self.field[0]):
self.board[self.field[1] - 1][col] = -1
if col <= 3 or col >= 8:
self.board[0][col] = -1
for row in range(self.field[1]):
self.board[row][self.field[0] - 1] = -1
self.board[row][0] = -1
def select_block(self, block_num):
return np.array(self.bl.list[block_num])
def save_board(self):
self.fixed_board = np.array(self.board)
def update_block(self):
self.past_board = np.array(self.board)
self.board = np.array(self.fixed_board)
x = self.position[0]
y = self.position[1]
for col in range(4):
for row in range(4):
if self.moving_block[row][col] != 0:
self.board[y + row][x + col] = self.moving_block[row][col]
def clear_block(self, x, y):
for col in range(4):
for row in range(4):
self.board[y + row][x + col] = 0
def rotate(self, block, rotation):
for count in range(rotation):
block = np.copy(self.bl.rotation(block))
return block
def move_block(self, x, y):
self.position = [x, y]
self.update_block()
def check_movable(self, block, position, next_x=0, next_y=0, next_rot=0):
x = position[0] + next_x
y = position[1] + next_y
tmp_block = self.rotate(block, next_rot)
for col in range(4):
for row in range(4):
if tmp_block[row][col] != 0 and \
self.fixed_board[y + row][x + col] != 0:
return False
return True
def check_rotatable(self):
block = np.array(self.bl.rotation(self.moving_block))
return self.check_movable(block, self.position, 0, 0)
def get_moving_block_num(self):
num = self.moving_block[self.moving_block != 0][0]
return int(num)
def remove_lined_blocks(self):
for row in range(1, self.field[1]-1):
line = self.fixed_board[row][:]
if np.count_nonzero(line) == self.field[0]:
self.fixed_board = np.delete(self.fixed_board, row, 0)
self.fixed_board = np.insert(self.fixed_board, 1, 0, 0)
self.fixed_board[1][0] = self.fixed_board[1][-1] = -1
def judge_gameover(self):
line = self.fixed_board[1][4:-4]
if np.count_nonzero(line) > 0 and self.position[1] == -2:
return True
return False
def main():
st = Stage()
st.moving_block = st.select_block(5)
st.move_block(4, 0)
print(st.board)
st.rotate(st.moving_block, 1)
print(st.moving_block)
st.move_block(4, 1)
print(st.board)
if st.check_movable(st.moving_block, st.position, -4, 0) is True:
print('can move the block')
else:
print('cannot move the block')
if st.check_rotatable() is True:
print('can rotate the block')
else:
print('cannot rotate the block')
st.remove_lined_blocks()
print(st.board)
st.judge_gameover()
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
"""
Problem repository management for the shell manager.
"""
import gzip
import logging
from os import makedirs
from os.path import exists, isdir, join
from shutil import copy2
import spur
from shell_manager.util import FatalException
logger = logging.getLogger(__name__)
def update_repo(args, config):
"""
Main entrypoint for repo update operations.
"""
if args.repo_type == "local":
local_update(args.repository, args.package_paths)
else:
remote_update(args.repository, args.package_paths)
def remote_update(repo_ui, deb_paths=None):
"""
Pushes packages to a remote deb repository.
Args:
repo_uri: location of the repository.
deb_paths: list of problem deb paths to copy.
"""
if deb_paths is None:
deb_paths = []
logger.error("Currently not implemented -- sorry!")
raise FatalException
def local_update(repo_path, deb_paths=None):
"""
Updates a local deb repository by copying debs and running scanpackages.
Args:
repo_path: the path to the local repository.
dep_paths: list of problem deb paths to copy.
"""
if deb_paths is None:
deb_paths = []
if not exists(repo_path):
logger.info("Creating repository at '%s'.", repo_path)
makedirs(repo_path)
elif not isdir(repo_path):
logger.error("Repository '%s' is not a directory!", repo_path)
raise FatalException
[copy2(deb_path, repo_path) for deb_path in deb_paths]
shell = spur.LocalShell()
result = shell.run(["dpkg-scanpackages", ".", "/dev/null"], cwd=repo_path)
packages_path = join(repo_path, "Packages.gz")
with gzip.open(packages_path, "wb") as packages:
packages.write(result.output)
logger.info("Repository '%s' updated successfully. Copied %d packages.",
repo_path, len(deb_paths))
|
nilq/baby-python
|
python
|
"""
Combine results files generated by `attention_networks_testing.py` on separate
GPUs.
"""
type_category_set = input('Category-set type in {diff, sem, sim, size}: ')
version_weights = input('Version number (weights): ')
id_category_set = f'{type_category_set}_v{version_weights}'
import os
import pandas as pd
from ..utils.paths import path_results
filenames = sorted([f for f in os.listdir(path_results) if id_category_set in f])
df = pd.concat(
[pd.read_csv(path_results/f, index_col=0) for f in filenames],
ignore_index=True)
df.to_csv(path_results/f'{id_category_set}_results.csv')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: fmgr_secprof_voip
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: VOIP security profiles in FMG
description:
- Manage VOIP security profiles in FortiManager via API
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
name:
description:
- Profile name.
required: false
comment:
description:
- Comment.
required: false
sccp:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
sccp_block_mcast:
description:
- Enable/disable block multicast RTP connections.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sccp_log_call_summary:
description:
- Enable/disable log summary of SCCP calls.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sccp_log_violations:
description:
- Enable/disable logging of SCCP violations.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sccp_max_calls:
description:
- Maximum calls per minute per SCCP client (max 65535).
required: false
sccp_status:
description:
- Enable/disable SCCP.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sccp_verify_header:
description:
- Enable/disable verify SCCP header content.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
sip_ack_rate:
description:
- ACK request rate limit (per second, per policy).
required: false
sip_block_ack:
description:
- Enable/disable block ACK requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_bye:
description:
- Enable/disable block BYE requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_cancel:
description:
- Enable/disable block CANCEL requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_geo_red_options:
description:
- Enable/disable block OPTIONS requests, but OPTIONS requests still notify for redundancy.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_info:
description:
- Enable/disable block INFO requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_invite:
description:
- Enable/disable block INVITE requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_long_lines:
description:
- Enable/disable block requests with headers exceeding max-line-length.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_message:
description:
- Enable/disable block MESSAGE requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_notify:
description:
- Enable/disable block NOTIFY requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_options:
description:
- Enable/disable block OPTIONS requests and no OPTIONS as notifying message for redundancy either.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_prack:
description:
- Enable/disable block prack requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_publish:
description:
- Enable/disable block PUBLISH requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_refer:
description:
- Enable/disable block REFER requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_register:
description:
- Enable/disable block REGISTER requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_subscribe:
description:
- Enable/disable block SUBSCRIBE requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_unknown:
description:
- Block unrecognized SIP requests (enabled by default).
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_block_update:
description:
- Enable/disable block UPDATE requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_bye_rate:
description:
- BYE request rate limit (per second, per policy).
required: false
sip_call_keepalive:
description:
- Continue tracking calls with no RTP for this many minutes.
required: false
sip_cancel_rate:
description:
- CANCEL request rate limit (per second, per policy).
required: false
sip_contact_fixup:
description:
- Fixup contact anyway even if contact's IP|port doesn't match session's IP|port.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_hnt_restrict_source_ip:
description:
- Enable/disable restrict RTP source IP to be the same as SIP source IP when HNT is enabled.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_hosted_nat_traversal:
description:
- Hosted NAT Traversal (HNT).
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_info_rate:
description:
- INFO request rate limit (per second, per policy).
required: false
sip_invite_rate:
description:
- INVITE request rate limit (per second, per policy).
required: false
sip_ips_rtp:
description:
- Enable/disable allow IPS on RTP.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_log_call_summary:
description:
- Enable/disable logging of SIP call summary.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_log_violations:
description:
- Enable/disable logging of SIP violations.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_malformed_header_allow:
description:
- Action for malformed Allow header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_call_id:
description:
- Action for malformed Call-ID header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_contact:
description:
- Action for malformed Contact header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_content_length:
description:
- Action for malformed Content-Length header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_content_type:
description:
- Action for malformed Content-Type header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_cseq:
description:
- Action for malformed CSeq header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_expires:
description:
- Action for malformed Expires header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_from:
description:
- Action for malformed From header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_max_forwards:
description:
- Action for malformed Max-Forwards header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_p_asserted_identity:
description:
- Action for malformed P-Asserted-Identity header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_rack:
description:
- Action for malformed RAck header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_record_route:
description:
- Action for malformed Record-Route header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_route:
description:
- Action for malformed Route header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_rseq:
description:
- Action for malformed RSeq header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_a:
description:
- Action for malformed SDP a line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_b:
description:
- Action for malformed SDP b line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_c:
description:
- Action for malformed SDP c line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_i:
description:
- Action for malformed SDP i line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_k:
description:
- Action for malformed SDP k line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_m:
description:
- Action for malformed SDP m line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_o:
description:
- Action for malformed SDP o line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_r:
description:
- Action for malformed SDP r line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_s:
description:
- Action for malformed SDP s line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_t:
description:
- Action for malformed SDP t line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_v:
description:
- Action for malformed SDP v line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_sdp_z:
description:
- Action for malformed SDP z line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_to:
description:
- Action for malformed To header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_header_via:
description:
- Action for malformed VIA header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_malformed_request_line:
description:
- Action for malformed request line.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_max_body_length:
description:
- Maximum SIP message body length (0 meaning no limit).
required: false
sip_max_dialogs:
description:
- Maximum number of concurrent calls/dialogs (per policy).
required: false
sip_max_idle_dialogs:
description:
- Maximum number established but idle dialogs to retain (per policy).
required: false
sip_max_line_length:
description:
- Maximum SIP header line length (78-4096).
required: false
sip_message_rate:
description:
- MESSAGE request rate limit (per second, per policy).
required: false
sip_nat_trace:
description:
- Enable/disable preservation of original IP in SDP i line.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_no_sdp_fixup:
description:
- Enable/disable no SDP fix-up.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_notify_rate:
description:
- NOTIFY request rate limit (per second, per policy).
required: false
sip_open_contact_pinhole:
description:
- Enable/disable open pinhole for non-REGISTER Contact port.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_open_record_route_pinhole:
description:
- Enable/disable open pinhole for Record-Route port.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_open_register_pinhole:
description:
- Enable/disable open pinhole for REGISTER Contact port.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_open_via_pinhole:
description:
- Enable/disable open pinhole for Via port.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_options_rate:
description:
- OPTIONS request rate limit (per second, per policy).
required: false
sip_prack_rate:
description:
- PRACK request rate limit (per second, per policy).
required: false
sip_preserve_override:
description:
- Override i line to preserve original IPS (default| append).
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_provisional_invite_expiry_time:
description:
- Expiry time for provisional INVITE (10 - 3600 sec).
required: false
sip_publish_rate:
description:
- PUBLISH request rate limit (per second, per policy).
required: false
sip_refer_rate:
description:
- REFER request rate limit (per second, per policy).
required: false
sip_register_contact_trace:
description:
- Enable/disable trace original IP/port within the contact header of REGISTER requests.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_register_rate:
description:
- REGISTER request rate limit (per second, per policy).
required: false
sip_rfc2543_branch:
description:
- Enable/disable support via branch compliant with RFC 2543.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_rtp:
description:
- Enable/disable create pinholes for RTP traffic to traverse firewall.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_ssl_algorithm:
description:
- Relative strength of encryption algorithms accepted in negotiation.
- choice | high | High encryption. Allow only AES and ChaCha.
- choice | medium | Medium encryption. Allow AES, ChaCha, 3DES, and RC4.
- choice | low | Low encryption. Allow AES, ChaCha, 3DES, RC4, and DES.
required: false
choices: ["high", "medium", "low"]
sip_ssl_auth_client:
description:
- Require a client certificate and authenticate it with the peer/peergrp.
required: false
sip_ssl_auth_server:
description:
- Authenticate the server's certificate with the peer/peergrp.
required: false
sip_ssl_client_certificate:
description:
- Name of Certificate to offer to server if requested.
required: false
sip_ssl_client_renegotiation:
description:
- Allow/block client renegotiation by server.
- choice | allow | Allow a SSL client to renegotiate.
- choice | deny | Abort any SSL connection that attempts to renegotiate.
- choice | secure | Reject any SSL connection that does not offer a RFC 5746 Secure Renegotiation Indication.
required: false
choices: ["allow", "deny", "secure"]
sip_ssl_max_version:
description:
- Highest SSL/TLS version to negotiate.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
sip_ssl_min_version:
description:
- Lowest SSL/TLS version to negotiate.
- choice | ssl-3.0 | SSL 3.0.
- choice | tls-1.0 | TLS 1.0.
- choice | tls-1.1 | TLS 1.1.
- choice | tls-1.2 | TLS 1.2.
required: false
choices: ["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]
sip_ssl_mode:
description:
- SSL/TLS mode for encryption & decryption of traffic.
- choice | off | No SSL.
- choice | full | Client to FortiGate and FortiGate to Server SSL.
required: false
choices: ["off", "full"]
sip_ssl_pfs:
description:
- SSL Perfect Forward Secrecy.
- choice | require | PFS mandatory.
- choice | deny | PFS rejected.
- choice | allow | PFS allowed.
required: false
choices: ["require", "deny", "allow"]
sip_ssl_send_empty_frags:
description:
- Send empty fragments to avoid attack on CBC IV (SSL 3.0 & TLS 1.0 only).
- choice | disable | Do not send empty fragments.
- choice | enable | Send empty fragments.
required: false
choices: ["disable", "enable"]
sip_ssl_server_certificate:
description:
- Name of Certificate return to the client in every SSL connection.
required: false
sip_status:
description:
- Enable/disable SIP.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_strict_register:
description:
- Enable/disable only allow the registrar to connect.
- choice | disable | Disable status.
- choice | enable | Enable status.
required: false
choices: ["disable", "enable"]
sip_subscribe_rate:
description:
- SUBSCRIBE request rate limit (per second, per policy).
required: false
sip_unknown_header:
description:
- Action for unknown SIP header.
- choice | pass | Bypass malformed messages.
- choice | discard | Discard malformed messages.
- choice | respond | Respond with error code.
required: false
choices: ["pass", "discard", "respond"]
sip_update_rate:
description:
- UPDATE request rate limit (per second, per policy).
required: false
'''
EXAMPLES = '''
- name: DELETE Profile
community.fortios.fmgr_secprof_voip:
name: "Ansible_VOIP_Profile"
mode: "delete"
- name: Create FMGR_VOIP_PROFILE
community.fortios.fmgr_secprof_voip:
mode: "set"
adom: "root"
name: "Ansible_VOIP_Profile"
comment: "Created by Ansible"
sccp: {block-mcast: "enable", log-call-summary: "enable", log-violations: "enable", status: "enable"}
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.fortimanager import FortiManagerHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FMGBaseException
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FMGRCommon
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import prepare_dict
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_voip_profile_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/voip/profile'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/voip/profile/{name}'.format(adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"], type="str", default="add"),
name=dict(required=False, type="str"),
comment=dict(required=False, type="str"),
sccp=dict(required=False, type="dict"),
sccp_block_mcast=dict(required=False, type="str", choices=["disable", "enable"]),
sccp_log_call_summary=dict(required=False, type="str", choices=["disable", "enable"]),
sccp_log_violations=dict(required=False, type="str", choices=["disable", "enable"]),
sccp_max_calls=dict(required=False, type="int"),
sccp_status=dict(required=False, type="str", choices=["disable", "enable"]),
sccp_verify_header=dict(required=False, type="str", choices=["disable", "enable"]),
sip=dict(required=False, type="dict"),
sip_ack_rate=dict(required=False, type="int"),
sip_block_ack=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_bye=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_cancel=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_geo_red_options=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_info=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_invite=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_long_lines=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_message=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_notify=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_options=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_prack=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_publish=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_refer=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_register=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_subscribe=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_unknown=dict(required=False, type="str", choices=["disable", "enable"]),
sip_block_update=dict(required=False, type="str", choices=["disable", "enable"]),
sip_bye_rate=dict(required=False, type="int"),
sip_call_keepalive=dict(required=False, type="int"),
sip_cancel_rate=dict(required=False, type="int"),
sip_contact_fixup=dict(required=False, type="str", choices=["disable", "enable"]),
sip_hnt_restrict_source_ip=dict(required=False, type="str", choices=["disable", "enable"]),
sip_hosted_nat_traversal=dict(required=False, type="str", choices=["disable", "enable"]),
sip_info_rate=dict(required=False, type="int"),
sip_invite_rate=dict(required=False, type="int"),
sip_ips_rtp=dict(required=False, type="str", choices=["disable", "enable"]),
sip_log_call_summary=dict(required=False, type="str", choices=["disable", "enable"]),
sip_log_violations=dict(required=False, type="str", choices=["disable", "enable"]),
sip_malformed_header_allow=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_call_id=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_contact=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_content_length=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_content_type=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_cseq=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_expires=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_from=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_max_forwards=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_p_asserted_identity=dict(required=False, type="str", choices=["pass",
"discard",
"respond"]),
sip_malformed_header_rack=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_record_route=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_route=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_rseq=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_a=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_b=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_c=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_i=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_k=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_m=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_o=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_r=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_s=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_t=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_v=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_sdp_z=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_to=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_header_via=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_malformed_request_line=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_max_body_length=dict(required=False, type="int"),
sip_max_dialogs=dict(required=False, type="int"),
sip_max_idle_dialogs=dict(required=False, type="int"),
sip_max_line_length=dict(required=False, type="int"),
sip_message_rate=dict(required=False, type="int"),
sip_nat_trace=dict(required=False, type="str", choices=["disable", "enable"]),
sip_no_sdp_fixup=dict(required=False, type="str", choices=["disable", "enable"]),
sip_notify_rate=dict(required=False, type="int"),
sip_open_contact_pinhole=dict(required=False, type="str", choices=["disable", "enable"]),
sip_open_record_route_pinhole=dict(required=False, type="str", choices=["disable", "enable"]),
sip_open_register_pinhole=dict(required=False, type="str", choices=["disable", "enable"]),
sip_open_via_pinhole=dict(required=False, type="str", choices=["disable", "enable"]),
sip_options_rate=dict(required=False, type="int"),
sip_prack_rate=dict(required=False, type="int"),
sip_preserve_override=dict(required=False, type="str", choices=["disable", "enable"]),
sip_provisional_invite_expiry_time=dict(required=False, type="int"),
sip_publish_rate=dict(required=False, type="int"),
sip_refer_rate=dict(required=False, type="int"),
sip_register_contact_trace=dict(required=False, type="str", choices=["disable", "enable"]),
sip_register_rate=dict(required=False, type="int"),
sip_rfc2543_branch=dict(required=False, type="str", choices=["disable", "enable"]),
sip_rtp=dict(required=False, type="str", choices=["disable", "enable"]),
sip_ssl_algorithm=dict(required=False, type="str", choices=["high", "medium", "low"]),
sip_ssl_auth_client=dict(required=False, type="str"),
sip_ssl_auth_server=dict(required=False, type="str"),
sip_ssl_client_certificate=dict(required=False, type="str"),
sip_ssl_client_renegotiation=dict(required=False, type="str", choices=["allow", "deny", "secure"]),
sip_ssl_max_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
sip_ssl_min_version=dict(required=False, type="str", choices=["ssl-3.0", "tls-1.0", "tls-1.1", "tls-1.2"]),
sip_ssl_mode=dict(required=False, type="str", choices=["off", "full"]),
sip_ssl_pfs=dict(required=False, type="str", choices=["require", "deny", "allow"]),
sip_ssl_send_empty_frags=dict(required=False, type="str", choices=["disable", "enable"]),
sip_ssl_server_certificate=dict(required=False, type="str"),
sip_status=dict(required=False, type="str", choices=["disable", "enable"]),
sip_strict_register=dict(required=False, type="str", choices=["disable", "enable"]),
sip_subscribe_rate=dict(required=False, type="int"),
sip_unknown_header=dict(required=False, type="str", choices=["pass", "discard", "respond"]),
sip_update_rate=dict(required=False, type="int"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"name": module.params["name"],
"comment": module.params["comment"],
"sccp": {
"block-mcast": module.params["sccp_block_mcast"],
"log-call-summary": module.params["sccp_log_call_summary"],
"log-violations": module.params["sccp_log_violations"],
"max-calls": module.params["sccp_max_calls"],
"status": module.params["sccp_status"],
"verify-header": module.params["sccp_verify_header"],
},
"sip": {
"ack-rate": module.params["sip_ack_rate"],
"block-ack": module.params["sip_block_ack"],
"block-bye": module.params["sip_block_bye"],
"block-cancel": module.params["sip_block_cancel"],
"block-geo-red-options": module.params["sip_block_geo_red_options"],
"block-info": module.params["sip_block_info"],
"block-invite": module.params["sip_block_invite"],
"block-long-lines": module.params["sip_block_long_lines"],
"block-message": module.params["sip_block_message"],
"block-notify": module.params["sip_block_notify"],
"block-options": module.params["sip_block_options"],
"block-prack": module.params["sip_block_prack"],
"block-publish": module.params["sip_block_publish"],
"block-refer": module.params["sip_block_refer"],
"block-register": module.params["sip_block_register"],
"block-subscribe": module.params["sip_block_subscribe"],
"block-unknown": module.params["sip_block_unknown"],
"block-update": module.params["sip_block_update"],
"bye-rate": module.params["sip_bye_rate"],
"call-keepalive": module.params["sip_call_keepalive"],
"cancel-rate": module.params["sip_cancel_rate"],
"contact-fixup": module.params["sip_contact_fixup"],
"hnt-restrict-source-ip": module.params["sip_hnt_restrict_source_ip"],
"hosted-nat-traversal": module.params["sip_hosted_nat_traversal"],
"info-rate": module.params["sip_info_rate"],
"invite-rate": module.params["sip_invite_rate"],
"ips-rtp": module.params["sip_ips_rtp"],
"log-call-summary": module.params["sip_log_call_summary"],
"log-violations": module.params["sip_log_violations"],
"malformed-header-allow": module.params["sip_malformed_header_allow"],
"malformed-header-call-id": module.params["sip_malformed_header_call_id"],
"malformed-header-contact": module.params["sip_malformed_header_contact"],
"malformed-header-content-length": module.params["sip_malformed_header_content_length"],
"malformed-header-content-type": module.params["sip_malformed_header_content_type"],
"malformed-header-cseq": module.params["sip_malformed_header_cseq"],
"malformed-header-expires": module.params["sip_malformed_header_expires"],
"malformed-header-from": module.params["sip_malformed_header_from"],
"malformed-header-max-forwards": module.params["sip_malformed_header_max_forwards"],
"malformed-header-p-asserted-identity": module.params["sip_malformed_header_p_asserted_identity"],
"malformed-header-rack": module.params["sip_malformed_header_rack"],
"malformed-header-record-route": module.params["sip_malformed_header_record_route"],
"malformed-header-route": module.params["sip_malformed_header_route"],
"malformed-header-rseq": module.params["sip_malformed_header_rseq"],
"malformed-header-sdp-a": module.params["sip_malformed_header_sdp_a"],
"malformed-header-sdp-b": module.params["sip_malformed_header_sdp_b"],
"malformed-header-sdp-c": module.params["sip_malformed_header_sdp_c"],
"malformed-header-sdp-i": module.params["sip_malformed_header_sdp_i"],
"malformed-header-sdp-k": module.params["sip_malformed_header_sdp_k"],
"malformed-header-sdp-m": module.params["sip_malformed_header_sdp_m"],
"malformed-header-sdp-o": module.params["sip_malformed_header_sdp_o"],
"malformed-header-sdp-r": module.params["sip_malformed_header_sdp_r"],
"malformed-header-sdp-s": module.params["sip_malformed_header_sdp_s"],
"malformed-header-sdp-t": module.params["sip_malformed_header_sdp_t"],
"malformed-header-sdp-v": module.params["sip_malformed_header_sdp_v"],
"malformed-header-sdp-z": module.params["sip_malformed_header_sdp_z"],
"malformed-header-to": module.params["sip_malformed_header_to"],
"malformed-header-via": module.params["sip_malformed_header_via"],
"malformed-request-line": module.params["sip_malformed_request_line"],
"max-body-length": module.params["sip_max_body_length"],
"max-dialogs": module.params["sip_max_dialogs"],
"max-idle-dialogs": module.params["sip_max_idle_dialogs"],
"max-line-length": module.params["sip_max_line_length"],
"message-rate": module.params["sip_message_rate"],
"nat-trace": module.params["sip_nat_trace"],
"no-sdp-fixup": module.params["sip_no_sdp_fixup"],
"notify-rate": module.params["sip_notify_rate"],
"open-contact-pinhole": module.params["sip_open_contact_pinhole"],
"open-record-route-pinhole": module.params["sip_open_record_route_pinhole"],
"open-register-pinhole": module.params["sip_open_register_pinhole"],
"open-via-pinhole": module.params["sip_open_via_pinhole"],
"options-rate": module.params["sip_options_rate"],
"prack-rate": module.params["sip_prack_rate"],
"preserve-override": module.params["sip_preserve_override"],
"provisional-invite-expiry-time": module.params["sip_provisional_invite_expiry_time"],
"publish-rate": module.params["sip_publish_rate"],
"refer-rate": module.params["sip_refer_rate"],
"register-contact-trace": module.params["sip_register_contact_trace"],
"register-rate": module.params["sip_register_rate"],
"rfc2543-branch": module.params["sip_rfc2543_branch"],
"rtp": module.params["sip_rtp"],
"ssl-algorithm": module.params["sip_ssl_algorithm"],
"ssl-auth-client": module.params["sip_ssl_auth_client"],
"ssl-auth-server": module.params["sip_ssl_auth_server"],
"ssl-client-certificate": module.params["sip_ssl_client_certificate"],
"ssl-client-renegotiation": module.params["sip_ssl_client_renegotiation"],
"ssl-max-version": module.params["sip_ssl_max_version"],
"ssl-min-version": module.params["sip_ssl_min_version"],
"ssl-mode": module.params["sip_ssl_mode"],
"ssl-pfs": module.params["sip_ssl_pfs"],
"ssl-send-empty-frags": module.params["sip_ssl_send_empty_frags"],
"ssl-server-certificate": module.params["sip_ssl_server_certificate"],
"status": module.params["sip_status"],
"strict-register": module.params["sip_strict_register"],
"subscribe-rate": module.params["sip_subscribe_rate"],
"unknown-header": module.params["sip_unknown_header"],
"update-rate": module.params["sip_update_rate"],
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['sccp', 'sip']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
module.paramgram = paramgram
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_voip_profile_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from dataclasses import dataclass
from typing import Iterator
from models.displayable_pull import DisplayablePull
@dataclass
class DisplayablePulls:
pulls: Iterator[DisplayablePull]
limit: int
def for_output(self) -> str:
ready_pulls = list(filter(lambda p: p.ready, self.pulls))
omitted = ready_pulls[0:self.limit]
diff = len(ready_pulls) - len(omitted)
footer = ''
if diff > 0:
footer = 'And there are ' + str(diff) + ' pull requests...'
return '\n'.join(map(lambda p: p.for_output(), ready_pulls)) + '\n' + footer
|
nilq/baby-python
|
python
|
import os
import sys
import urllib.request
import re
import shutil
LATEST_URL = 'https://bitcoin.jonasschnelli.ch/build/nightly/latest'
BUILD_URL = 'https://bitcointools.jonasschnelli.ch/data/builds/{}/{}'
if os.getenv('TRAVIS_OS_NAME') == 'osx':
ARCHIVE_SNIP = '-osx64.tar.gz'
ARCHIVE_RE = 'bitcoin-0\.[0-9]+\.99-osx64\.tar\.gz'
ARCHIVE_EXT = 'tar.gz'
EXEEXT = ''
if os.getenv('TRAVIS_OS_NAME') == 'linux':
ARCHIVE_SNIP = '-x86_64-linux-gnu.tar.gz'
ARCHIVE_RE = 'bitcoin-0\.[0-9]+\.99-x86_64-linux-gnu(-debug)?\.tar\.gz'
ARCHIVE_EXT = 'tar.gz'
EXEEXT = ''
if os.getenv('TRAVIS_OS_NAME') == 'windows':
ARCHIVE_SNIP = '-win64.zip'
ARCHIVE_RE = 'bitcoin-0\.[0-9]+\.99-win64\.zip'
ARCHIVE_EXT = 'zip'
EXEEXT = '.exe'
def get_lines(url):
return urllib.request.urlopen(url).read().decode('utf-8').splitlines()
def main():
root_folder = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.join(root_folder, 'bitcoin', '')
print(os.getenv('PYTHONIOENCODING'))
print(sys.stdin.encoding)
print(sys.stdout.encoding)
assert 'UTF-8' == sys.stdin.encoding == sys.stdout.encoding
assert os.path.isdir(src_dir) # Make sure to git clone bitcoin
import zmq #noqa
for line in get_lines(LATEST_URL):
if 'embed-responsive-item' in line:
build_id = int(
re.sub('^.*builds/([0-9]+)/.*$', '\g<1>', line.strip()))
break
print('build id: {}'.format(build_id))
for line in get_lines(BUILD_URL.format(build_id, '')):
if ARCHIVE_SNIP in line:
archive_gitian_name = re.sub('^.*({}).*$'.format(ARCHIVE_RE), '\g<1>', line.strip())
print('filename: {}'.format(archive_gitian_name))
version = int(re.sub('bitcoin-0.(\d+).99-.*', '\g<1>', archive_gitian_name))
print('version: {}'.format(version))
archive_name = 'bitcoin-core-nightly.{}'.format(ARCHIVE_EXT)
with open(archive_name, 'wb') as archive:
archive.write(urllib.request.urlopen(BUILD_URL.format(build_id, archive_gitian_name)).read())
build_dir = os.path.join(root_folder, 'build_dir')
shutil.unpack_archive(archive_name, build_dir)
build_dir = os.path.join(build_dir, 'bitcoin-0.{}.99'.format(version), '')
build_dir_src = os.path.join(build_dir, 'src')
shutil.rmtree(build_dir_src, ignore_errors=True)
os.rename(src=os.path.join(build_dir, 'bin'), dst=build_dir_src)
config_file = os.path.join(src_dir, 'test', 'config.ini')
shutil.copyfile(os.path.join(root_folder, 'config.ini'), config_file)
with open(config_file) as f:
c = f.read() \
.replace('__BUILDDIR__', build_dir) \
.replace('__SRCDIR__', src_dir) \
.replace('__EXEEXT__', EXEEXT)
with open(config_file, 'w') as f:
f.write(c)
with open('src_dir.txt', 'w') as f:
f.write(src_dir)
with open('build_dir.txt', 'w') as f:
f.write(build_dir)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from abc import abstractmethod, ABC
class Transformer(ABC):
"""
Abstract class for transformer over data.
"""
def __init__(self):
self.__name__ = self.__class__.__name__
@abstractmethod
def transform(self, x):
"""
Method to transform a text data.
:param x: (Union[str, List]) The data to be transform.
:return: (Union[str, List]) The transformed data.
"""
pass
def fit(self, x):
"""
Empty method to be compliant with Scikit-Learn interface.
"""
return self
def __repr__(self):
return self.__name__
class NewLineStrip(Transformer):
"""
A filter to remove newline characters at the end of strings.
"""
def transform(self, x):
return [i.strip('\n') for i in x]
class EmptyLineRemoval(Transformer):
"""
A filter to remove empty lines in a list.
"""
def transform(self, x):
return list(filter(None, x))
class WhiteSpaceStrip(Transformer):
"""
A filter to remove whitespace characters at the end of strings.
"""
def transform(self, x):
return [i.strip(' ') for i in x]
class PunctuationStrip(Transformer):
"""
A filter to remove punctuation characters at the end of strings.
"""
def transform(self, x):
return [i.strip("""."',!?-""") for i in x]
class StringRemove(Transformer):
"""
A filter to remove punctuation characters in strings.
"""
def __init__(self, characters):
super().__init__()
self.characters = characters
def transform(self, x):
return [i.replace(self.characters, "") for i in x]
class PunctuationRemoval(StringRemove):
"""
A filter to remove punctuation characters in strings.
"""
def __init__(self):
super().__init__("!")
class ThinSpaceRemoval(StringRemove):
"""
A filter to remove punctuation characters in strings.
"""
def __init__(self):
super().__init__("\u2009")
class LowerCaser(Transformer):
"""
A simple wrapper for lower case strings.
"""
def transform(self, x):
return [i.lower() for i in x]
|
nilq/baby-python
|
python
|
import os
from statistics import mean
import numpy as np
import matplotlib.pyplot as pyplot
from simtk import unit
from simtk.openmm.app.pdbfile import PDBFile
from foldamers.cg_model.cgmodel import CGModel
from foldamers.parameters.reweight import *
from foldamers.ensembles.ens_build import *
from cg_openmm.simulation.rep_exch import *
from cg_openmm.simulation.tools import *
grid_size = 1
# Job settings
output_directory = "output"
if not os.path.exists(output_directory):
os.mkdir(output_directory)
# Configure Yank (replica exchange) simulation settings
print_frequency = 5 # Number of steps to skip when printing output
total_simulation_time = 500.0 * unit.picosecond
simulation_time_step = 5.0 * unit.femtosecond
output_data = str(str(output_directory) + "/output.nc")
number_replicas = 30
min_temp = 1.0 * unit.kelvin
max_temp = 400.0 * unit.kelvin
temperature_list = get_temperature_list(min_temp, max_temp, number_replicas)
# Model settings
polymer_length = 12
backbone_lengths = [1]
sidechain_lengths = [1]
sidechain_positions = [0]
include_bond_forces = False
include_bond_angle_forces = True
include_nonbonded_forces = True
include_torsion_forces = True
constrain_bonds = True
# Bond definitions
bond_length = 7.5 * unit.angstrom
bond_lengths = {
"bb_bb_bond_length": bond_length,
"bb_sc_bond_length": bond_length,
"sc_sc_bond_length": bond_length,
}
bond_force_constant = 0 * unit.kilocalorie_per_mole / unit.nanometer / unit.nanometer
bond_force_constants = {
"bb_bb_bond_k": bond_force_constant,
"bb_sc_bond_k": bond_force_constant,
"sc_sc_bond_k": bond_force_constant,
}
# Particle definitions
mass = 100.0 * unit.amu
masses = {"backbone_bead_masses": mass, "sidechain_bead_masses": mass}
r_min = 3.0 * bond_length # Lennard-Jones potential r_min
sigma = r_min / (2.0 ** (1 / 6)) # Factor of /(2.0**(1/6)) is applied to convert r_min to sigma
sigmas = {"bb_sigma": sigma, "sc_sigma": sigma}
epsilon = 0.05 * unit.kilocalorie_per_mole
epsilons = {"bb_eps": epsilon, "sc_eps": epsilon}
# Bond angle definitions
bond_angle_force_constant = 0.0001 * unit.kilocalorie_per_mole / unit.radian / unit.radian
bond_angle_force_constants = {
"bb_bb_bb_angle_k": bond_angle_force_constant,
"bb_bb_sc_angle_k": bond_angle_force_constant,
}
bb_bb_bb_equil_bond_angle = 120.0 * (
3.14 / 180.0
) # OpenMM expects angle definitions in units of radians
bb_bb_sc_equil_bond_angle = 120.0 * (3.14 / 180.0)
equil_bond_angles = {
"bb_bb_bb_angle_0": bb_bb_bb_equil_bond_angle,
"bb_bb_sc_angle_0": bb_bb_sc_equil_bond_angle,
}
# Torsion angle definitions (Used to establish a scanning range below)
torsion_force_constant = 0.01 * unit.kilocalorie_per_mole / unit.radian / unit.radian
torsion_force_constants = {
"bb_bb_bb_bb_torsion_k": torsion_force_constant,
"sc_bb_bb_sc_torsion_k": torsion_force_constant,
}
bb_bb_bb_bb_equil_torsion_angle = 78.0 * (
3.14 / 180.0
) # OpenMM defaults to units of radians for angle definitions
sc_bb_bb_sc_equil_torsion_angle = 120.0 * (3.14 / 180.0)
equil_torsion_angles = {
"bb_bb_bb_bb_torsion_0": bb_bb_bb_bb_equil_torsion_angle,
"sc_bb_bb_sc_torsion_0": sc_bb_bb_sc_equil_torsion_angle,
}
torsion_periodicities = {"bb_bb_bb_bb_period": 1, "sc_bb_bb_sc_period": 2}
# Get initial positions from local file
positions = PDBFile("helix.pdb").getPositions()
# Build a coarse grained model
cgmodel = CGModel(
polymer_length=polymer_length,
backbone_lengths=backbone_lengths,
sidechain_lengths=sidechain_lengths,
sidechain_positions=sidechain_positions,
masses=masses,
sigmas=sigmas,
epsilons=epsilons,
bond_lengths=bond_lengths,
bond_force_constants=bond_force_constants,
bond_angle_force_constants=bond_angle_force_constants,
torsion_force_constants=torsion_force_constants,
equil_bond_angles=equil_bond_angles,
equil_torsion_angles=equil_torsion_angles,
torsion_periodicities=torsion_periodicities,
include_nonbonded_forces=include_nonbonded_forces,
include_bond_forces=include_bond_forces,
include_bond_angle_forces=include_bond_angle_forces,
include_torsion_forces=include_torsion_forces,
constrain_bonds=constrain_bonds,
positions=positions,
)
if os.path.exists(output_data):
replica_energies, replica_positions, replica_states = read_replica_exchange_data(
system=cgmodel.system,
topology=cgmodel.topology,
temperature_list=temperature_list,
output_data=output_data,
print_frequency=print_frequency,
)
else:
replica_energies, replica_positions, replica_states = run_replica_exchange(
cgmodel.topology,
cgmodel.system,
cgmodel.positions,
temperature_list=temperature_list,
simulation_time_step=simulation_time_step,
total_simulation_time=total_simulation_time,
print_frequency=print_frequency,
output_data=output_data,
)
native_structure = get_native_structure(replica_positions, replica_energies, temperature_list)
native_structure_contact_distance_cutoff = 1.15 * cgmodel.get_sigma(
0
) # This distance cutoff determines which nonbonded interactions are considered 'native' contacts
native_fraction_cutoff = (
0.95 # The threshold fraction of native contacts above which a pose is considered 'native'
)
nonnative_fraction_cutoff = (
0.95 # The threshold fraction of native contacts below which a pose is considered 'nonnative'
)
native_ensemble_size = 10
nonnative_ensemble_size = 100
decorrelate = True
(
native_ensemble,
native_ensemble_energies,
nonnative_ensemble,
nonnative_ensemble_energies,
) = get_ensembles_from_replica_positions(
cgmodel,
replica_positions,
replica_energies,
temperature_list,
decorrelate=decorrelate,
native_fraction_cutoff=native_fraction_cutoff,
nonnative_fraction_cutoff=nonnative_fraction_cutoff,
native_structure_contact_distance_cutoff=native_structure_contact_distance_cutoff,
native_ensemble_size=native_ensemble_size,
nonnative_ensemble_size=nonnative_ensemble_size,
)
native_structure = get_native_structure(replica_positions, replica_energies, temperature_list)
nonnative_ensemble_directory = "nonnative_ensemble"
native_ensemble_directory = "native_ensemble"
if os.path.exists(nonnative_ensemble_directory):
nonnative_ensemble, nonnative_ensemble_energies = get_ensemble_data(
cgmodel, nonnative_ensemble_directory
)
if len(nonnative_ensemble) != nonnative_ensemble_size:
print(
"ERROR: "
+ str(len(nonnative_ensemble_energies))
+ " nonnative poses were found in existing output folders, but "
+ str(nonnative_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(nonnative_ensemble_directory)
for pose in nonnative_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=nonnative_ensemble_directory)
if os.path.exists(native_ensemble_directory):
native_ensemble, native_ensemble_energies = get_ensemble_data(
cgmodel, native_ensemble_directory
)
if len(native_ensemble_energies) != native_ensemble_size:
print(
"ERROR: "
+ str(len(native_ensemble_energies))
+ " native poses were found in existing output folders, but "
+ str(native_ensemble_size)
+ " poses were requested."
)
print(
"This probably means that the requested ensemble size changed since the script was last run."
)
exit()
else:
os.mkdir(native_ensemble_directory)
for pose in native_ensemble:
cgmodel.positions = pose
write_ensemble_pdb(cgmodel, ensemble_directory=native_ensemble_directory)
exit()
|
nilq/baby-python
|
python
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for operating the geo classes.
We store the prefix tree using pygtrie objects. Initially we consider user's
coordinate as an (x,y) tuple. We then compute a binary version of this tuple,
e.g. (x=12, y=5) => (1100, 0101) creates a prefix: ‘10/11/00/01’. We keep the
counts using vectors with positions corresponding to the ids of the leafs in the
tree. For each leaf we implement a conversion process into either the coordinate
on some level or a region on the lowest level.
"""
import dataclasses
import random
from typing import List, Any
from tqdm import tqdm
import numpy as np
import pygtrie
from sketches import CountMinSketch, hash_function
depth = 20
width = 2000
hash_functions = [hash_function(i) for i in range(depth)]
sum_sketch = CountMinSketch(depth, width, hash_functions)
# count_min = False
DEFAULT_CHILDREN = ['00', '01', '10', '11']
def get_default_children(positivity, split=None):
if positivity:
if split == 'pos':
return ['001', '011', '101', '111']
elif split == 'neg':
return ['000', '010', '100', '110']
else:
return ['000', '001', '010', '011',
'100', '101', '110', '111']
else:
return ['00', '01', '10', '11']
@dataclasses.dataclass
class AlgResult:
"""Main result object.
Attributes:
image: resulting reassembled image
sum_vector: a vector of reports on the tree leaves.
tree: a prefix trie used to convert the sum_vector into image.
tree_prefix_list: a reverse prefix matching vector coordinates to the trie.
threshold: threshold parameter used to obtain the current tree.
grid_contour: image showing the tree leafs locations on the map.
eps: current value of the epsilon in SecAgg round.
"""
image: np.ndarray
sum_vector: np.ndarray
tree: pygtrie.StringTrie
tree_prefix_list: List[str]
threshold: float
grid_contour: np.ndarray
eps: float
pos_image: np.ndarray = None
neg_image: np.ndarray = None
metric: Any = None
sampled_metric: Any = None
def coordinates_to_binary_path(xy_tuple, depth=10):
"""Transform a coordinate tuple into a binary vector.
We compute a binary version of the provided coordinate tuple,
e.g. (x=12, y=5) => (1100, 0101) creates a prefix: ‘10/11/00/01’.
Args:
xy_tuple: a tuple of (x,y) coordinates of the user location.
depth: desired length of the binary vector, e.g. max depth of the tree.
Returns:
binary version of the coordinate.
"""
if len(xy_tuple) == 2:
x_coord, y_coord = xy_tuple
positivity = False
pos = ''
else:
x_coord, y_coord, pos = xy_tuple
path = ''
for j in reversed(range(depth)):
path += f'{(x_coord >> j) & 1}{(y_coord >> j) & 1}{pos}/'
path = path[:-1]
return path
def binary_path_to_coordinates(path):
"""Using tree path to the leaf node retrieve (x, y) coordinates.
Reassembles the path into coordinates. Note that if the path is shorter,
e.g. for leafs closer to the tree root, the (x, y) coordinates would be
w.r.t. to the image of the size 2^b x 2^b, where b = `path coordinate bits`.
Args:
path: binary path of the location ('00/01')
Returns:
x coordinate, y coordinate, total bit level, pos
"""
x = 0
y = 0
pos = None
splitted_path = path.split('/')
for xy in splitted_path:
x = x << 1
y = y << 1
x += int(xy[0])
y += int(xy[1])
if len(xy) > 2:
pos = int(xy[2])
return x, y, len(splitted_path), pos
def report_coordinate_to_vector(xy, tree, tree_prefix_list, count_min):
"""Converts a coordinate tuple into a one-hot vector using tree."""
path = coordinates_to_binary_path(xy)
(sub_path, value) = tree.longest_prefix(path)
if count_min:
sketch = CountMinSketch(depth, width, hash_functions)
sketch.add(sub_path)
# print(sub_path, sketch.query(sub_path))
vector = sketch.get_matrix()
else:
vector = np.zeros([len(tree_prefix_list)])
vector[value] += 1
return vector
def init_tree(positivity=False):
"""Initializes tree to have four leaf nodes.
Creates pgtrie with leafs from `DEFAULT_CHILDREN` and assigns each node
a positional identifier using positions from the `DEFAULT_CHILDREN`.
Args:
positivity: Whether to account for pos and neg users.
Returns:
constructed pygtrie, reverse prefix of the trie.
"""
new_tree = pygtrie.StringTrie()
for i, z in enumerate(get_default_children(positivity)):
new_tree[z] = i
return new_tree, list(get_default_children(positivity))
def transform_region_to_coordinates(x_coord,
y_coord,
prefix_len,
image_bit_level=10):
"""Transforms (x,y)-bit region into a square for a final level.
This method converts a leaf on some level `prefix_len` to a square region at
the final level `2^image_bit_level`. For example, a first leaf on the
smallest prefix 2x2 will occupy (0:512, 0:512) region of the 10-bit image.
Args:
x_coord:
y_coord:
prefix_len:
image_bit_level:
Returns:
A square region coordinates.
"""
shift = image_bit_level - prefix_len
x_bot = x_coord << shift
x_top = ((x_coord + 1) << shift) - 1
y_bot = y_coord << shift
y_top = ((y_coord + 1) << shift) - 1
return (x_bot, x_top, y_bot, y_top)
def rebuild_from_vector(vector, tree, image_size, contour=False, threshold=0,
positivity=False, count_min=False):
"""Using coordinate vector and the tree produce a resulting image.
For each value in the vector it finds the corresponding prefix and plots the
value of the vector on a square region of the final image.
Args:
vector: data vector from the accumulated responses.
tree: current tree object
image_size: desired final resolution of the image.
contour: release only the contours of the grid (for debugging)
threshold: reduces noise by setting values below threshold to 0.
positivity: produce two images with positive and negative cases.
count_min: use count min sketch.
Returns:
image of the size `image_size x image_size`
"""
image_bit_level = int(np.log2(image_size))
current_image = np.zeros([image_size, image_size])
pos_image, neg_image = None, None
if positivity:
pos_image = np.zeros([image_size, image_size])
neg_image = np.zeros([image_size, image_size])
for path in sorted(tree):
if count_min:
value = sum_sketch.query(path)
else:
value = vector[tree[path]]
(x, y, prefix_len, pos) = binary_path_to_coordinates(path)
(x_bot, x_top, y_bot,
y_top) = transform_region_to_coordinates(x, y, prefix_len,
image_bit_level)
if value < threshold:
value = 0
count = value / 2 ** (1 * (image_bit_level - prefix_len))
# Build a grid image without filling the regions.
if contour:
current_image[x_bot:x_top + 1,
y_bot - max(1, 5 // prefix_len):y_bot + max(1, 5 // prefix_len)] = 1
current_image[x_bot:x_top + 1,
y_top - max(1, 5 // prefix_len):y_top + 10 // prefix_len] = 1
current_image[
x_bot - max(1, 5 // prefix_len):x_bot + 10 // prefix_len,
y_bot:y_top + 1] = 1
current_image[
x_top - max(1, 5 // prefix_len):x_top + 10 // prefix_len,
y_bot:y_top + 1] = 1
else:
current_image[x_bot:x_top + 1, y_bot:y_top + 1] += count
if positivity:
if pos == 1:
pos_image[x_bot:x_top + 1, y_bot:y_top + 1] = count
elif pos == 0:
neg_image[x_bot:x_top + 1, y_bot:y_top + 1] = count
else:
raise ValueError(f'value: {pos}')
return current_image, pos_image, neg_image
def split_regions(tree_prefix_list,
vector_counts,
threshold,
image_bit_level,
collapse_threshold=None,
positivity=False,
expand_all=False,
last_result: AlgResult=None,
count_min=False):
"""Modify the tree by splitting and collapsing the nodes.
This implementation collapses and splits nodes of the tree according to
the received responses of the users. If there are no new nodes discovered
the finished flag is returned as True.
Args:
tree_prefix_list: matches vector id to the tree prefix.
vector_counts: vector values aggregated from the users.
threshold: threshold value used to split the nodes.
image_bit_level: stopping criteria once the final resolution is reached.
collapse_threshold: threshold value used to collapse the nodes.
Returns:
new_tree, new_tree_prefix_list, finished
"""
collapsed = 0
created = 0
fresh_expand = 0
unchanged = 0
intervals = list()
new_tree_prefix_list = list()
new_tree = pygtrie.StringTrie()
if positivity:
for i in range(0, len(tree_prefix_list), 2):
if expand_all:
neg_count = threshold + 1
pos_count = threshold + 1
else:
neg_count = vector_counts[i]
pos_count = vector_counts[i+1]
neg_prefix = tree_prefix_list[i]
pos_prefix = tree_prefix_list[i+1]
# check whether the tree has reached the bottom
if len(pos_prefix.split('/')) >= image_bit_level:
continue
# total = pos_count + neg_count
# p = pos_count / total
# confidence = np.sqrt((1-p)*p/total)
# error bound propagation.
# confidence +/- noise
# pos_count/total +/- (confidence+conf_noise) => 95% interval for 95% noise interval.
if pos_count > threshold and neg_count > threshold:
neg_child = get_default_children(positivity, split='neg')
pos_child = get_default_children(positivity, split='pos')
for j in range(len(pos_child)):
new_prefix = f'{neg_prefix}/{neg_child[j]}'
if not new_tree.has_key(new_prefix):
fresh_expand += 1
new_tree[new_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(new_prefix)
new_prefix = f'{pos_prefix}/{pos_child[j]}'
new_tree[new_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(new_prefix)
else:
if collapse_threshold is not None and \
(pos_count < collapse_threshold or neg_count < collapse_threshold) and \
len(pos_prefix) > 3 and len(neg_prefix) > 3:
old_prefix = neg_prefix[:-4]
collapsed += 1
if not new_tree.has_key(old_prefix):
created += 1
new_tree[old_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(old_prefix)
old_prefix = pos_prefix[:-4]
new_tree[old_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(old_prefix)
else:
unchanged += 1
new_tree[f'{neg_prefix}'] = len(new_tree_prefix_list)
new_tree_prefix_list.append(f'{neg_prefix}')
new_tree[f'{pos_prefix}'] = len(new_tree_prefix_list)
new_tree_prefix_list.append(f'{pos_prefix}')
else:
for i in range(len(tree_prefix_list)):
if expand_all:
count = threshold + 1
else:
if count_min:
count = sum_sketch.query(tree_prefix_list[i])
else:
count = vector_counts[i]
prefix = tree_prefix_list[i]
# check whether the tree has reached the bottom
if len(prefix.split('/')) >= image_bit_level:
continue
if last_result is not None:
(last_prefix, last_prefix_pos) = last_result.tree.longest_prefix(prefix)
if last_prefix is None:
cond = False
else:
last_count = last_result.sum_vector[last_prefix_pos]
p = (last_count - count)/last_count
if p<=0 or count<5 or last_count<5:
cond = False
# print(last_prefix, prefix, last_prefix_pos, last_count,
# count)
else:
conf_int = 1.96 * np.sqrt((p*(1-p)/last_count)) * last_count
cond = conf_int < threshold
intervals.append(conf_int)
# print(last_prefix, prefix, last_prefix_pos, last_count, count, conf_int, cond)
else:
cond = count > threshold
# print(cond, threshold, count)
if cond:
for child in DEFAULT_CHILDREN:
new_prefix = f'{prefix}/{child}'
if not new_tree.has_key(new_prefix):
fresh_expand += 1
new_tree[new_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(new_prefix)
else:
if collapse_threshold is not None and \
count <= collapse_threshold and \
len(prefix) > 2:
old_prefix = prefix[:-3]
collapsed += 1
if not new_tree.has_key(old_prefix):
created += 1
new_tree[old_prefix] = len(new_tree_prefix_list)
new_tree_prefix_list.append(old_prefix)
else:
unchanged += 1
new_tree[f'{prefix}'] = len(new_tree_prefix_list)
new_tree_prefix_list.append(f'{prefix}')
finished = False
# print(f'Conf int {np.mean(intervals) if len(intervals) else 0}.')
# if collapse_threshold:
# print(f'Collapsed: {collapsed}, created when collapsing: {created},' + \
# f'new expanded: {fresh_expand},' + \
# f'unchanged: {unchanged}, total: {len(new_tree_prefix_list)}')
if fresh_expand == 0: # len(new_tree_prefix_list) <= len(tree_prefix_list):
print('Finished expanding, no new results.')
finished = True
return new_tree, new_tree_prefix_list, finished
def build_from_sample(samples, total_size):
"""Restores the image from the list of coordinate tuples."""
image = np.zeros([total_size, total_size])
for sample in samples:
x = sample[0]
y = sample[1]
image[x, y] += 1
return image
def quantize_vector(vector, left_bound, right_bound):
"""Modulo clipping of the provided vector."""
if left_bound > right_bound:
raise ValueError('Left bound is higher than the right bound.')
distance = (right_bound - left_bound)
scale = (vector - left_bound) // distance
vector -= distance * scale
return vector
def makeGaussian(image, total_size, fwhm=3, center=None,
convert=False, save=False, load=False):
""" Make a square gaussian kernel.
size is the length of a side of the square
fwhm is full-width-half-maximum, which
can be thought of as an effective radius.
"""
import torch
if load:
return torch.load(f'split_dataset_{fwhm}_{center[0]}_{center[1]}.pt')
size = image.shape[0]
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
if center is None:
x0 = y0 = size // 2
else:
x0 = center[0]
y0 = center[1]
hotspot = np.exp(-4 * np.log(2) * ((x - x0) ** 2 + (y - y0) ** 2) / fwhm ** 2)
pos_image = np.floor(hotspot * image)
pos_image = pos_image.astype(int)
neg_image = image - pos_image
if convert:
pos_dataset = convert_to_dataset(pos_image, total_size, value=1)
neg_dataset = convert_to_dataset(neg_image, total_size, value=0)
total_dataset = np.concatenate([pos_dataset, neg_dataset])
res = dict(mask=hotspot, pos_image=pos_image, neg_image=neg_image,
pos_dataset=pos_dataset, neg_dataset=neg_dataset,
total_dataset=total_dataset)
if save:
torch.save(res, f'split_dataset_{fwhm}_{center[0]}_{center[1]}.pt')
print(f'Saved to split_dataset_{fwhm}_{center[0]}_{center[1]}.pt')
return res
else:
return dict(mask=hotspot, pos_image=pos_image, neg_image=neg_image)
def convert_to_dataset(image, total_size, value=None):
if value is not None:
dataset = np.zeros(image.sum(),
dtype=[('x', np.int16), ('y', np.int16),
('pos', np.int8)])
else:
dataset = np.zeros(image.sum(),
dtype=[('x', np.int16), ('y', np.int16)])
z = 0
for i in tqdm(range(total_size), total=total_size):
for j in range(total_size):
for _ in range(int(image[i, j])):
if value is not None:
dataset[z] = (i, j, value)
else:
dataset[z] = (i, j)
z += 1
return dataset
def compute_conf_intervals(sum_vector: np.ndarray, level=95):
conf_intervals = dict()
conf_interval_weighted = dict()
if level==95:
z= 1.96
elif level == 99:
z = 2.576
elif level ==90:
z = 1.645
elif level == 98:
z = 2.326
else:
raise ValueError(f'Incorrect confidence level {level}.')
for i in range(0, sum_vector.shape[0], 2):
neg_count = sum_vector[i]
pos_count = sum_vector[i+1]
total_clients_on_map = sum_vector.sum()
total_region = neg_count + pos_count
if pos_count > 5 and neg_count > 5:
p = pos_count / total_region
conf_interval = z * np.sqrt( (1-p) * p / total_region)
conf_intervals[i] = conf_interval
conf_interval_weighted[i] = conf_interval * total_region/total_clients_on_map
return conf_intervals, conf_interval_weighted
def make_step(samples, eps, threshold, partial,
prefix_len, dropout_rate, tree, tree_prefix_list,
noiser, quantize, total_size, positivity, count_min):
samples_len = len(samples)
if count_min:
round_vector = np.zeros([partial, depth, width])
sum_sketch.M = np.zeros([depth, width], dtype=np.float64)
sum_vector = sum_sketch.get_matrix()
else:
round_vector = np.zeros([partial, prefix_len])
sum_vector = np.zeros(prefix_len)
for j, sample in enumerate(tqdm(samples, leave=False)):
if dropout_rate and random.random() <= dropout_rate:
continue
round_vector[j % partial] = report_coordinate_to_vector(
sample, tree, tree_prefix_list, count_min)
if j % partial == 0 or j == samples_len - 1:
round_vector = noiser.apply_noise(round_vector)
if quantize is not None:
round_vector = quantize_vector(round_vector,
-2 ** (
quantize - 1),
2 ** (
quantize - 1))
sum_vector += quantize_vector(
round_vector.sum(axis=0), -2 ** (quantize - 1),
2 ** (quantize - 1))
else:
sum_vector += round_vector.sum(axis=0)
if count_min:
round_vector = np.zeros([partial, depth, width])
else:
round_vector = np.zeros([partial, prefix_len])
del round_vector
rebuilder = np.copy(sum_vector)
if eps:
threshold_rebuild = threshold
else:
threshold_rebuild = 0.0
test_image, pos_image, neg_image = rebuild_from_vector(
rebuilder, tree, image_size=total_size, threshold=threshold_rebuild,
positivity=positivity, count_min=count_min)
grid_contour, _, _ = rebuild_from_vector(
sum_vector,
tree,
image_size=total_size,
contour=True,
threshold=threshold_rebuild, count_min=count_min)
result = AlgResult(
image=test_image,
sum_vector=sum_vector,
tree=tree,
tree_prefix_list=tree_prefix_list,
threshold=threshold,
grid_contour=grid_contour,
pos_image=pos_image,
neg_image=neg_image,
eps=eps)
return result, grid_contour
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
""" ``itur.utils`` is a utilities library for ITU-Rpy.
This utility library for ITU-Rpy contains methods to:
* Load data and build an interpolator object.
* Prepare the input and output arrays, and handle unit transformations.
* Compute distances and elevation angles between two points on Earth and
or space.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numbers
import numpy as np
from pyproj import Geod
from astropy import units as u
# Set the basepath for the module and the basepath for the data
dir_path = os.path.dirname(os.path.realpath(__file__))
dataset_dir = os.path.join(dir_path, 'data/')
# Define numeric types including numpy types
__NUMERIC_TYPES__ = [numbers.Number, int, float, complex,
np.float16, np.float32, np.float64,
np.int8, np.int16, np.int32, np.int64]
# Define the geodetic system using the WSG-84 ellipsoid
__wgs84_geod__ = Geod(ellps='WGS84')
def load_data_interpolator(path_lat, path_lon, path_data, interp_fcn,
flip_ud=True):
"""Load a lat-lon tabulated dataset and build an interpolator.
Parameters
----------
path_lat : string
Path for the file containing the latitude values
path_lon : string
Path for the file containing the longitude values
path_data : string
Path for the file containing the data values
interp_fcn : string
The interpolation function to be used
flip_ud : boolean
Whether to flip the latitude and data arrays along the first axis. This
is an artifact of the format that the ITU uses to encode its data,
which is inconsistent across recommendations (in some recommendations,
latitude are sorted in ascending order, in others they are sorted in
descending order).
Returns
-------
interp: interp_fcn
An interpolator that given a latitude-longitude pair, returns the
data value
"""
vals = load_data(os.path.join(dataset_dir, path_data))
lats = load_data(os.path.join(dataset_dir, path_lat))
lons = load_data(os.path.join(dataset_dir, path_lon))
if flip_ud:
return interp_fcn(np.flipud(lats), lons, np.flipud(vals))
else:
return interp_fcn(lats, lons, vals)
def load_data(path, is_text=False, **kwargs):
"""Load data files from `./itur/data/`.
Loads data from a comma-separated values file. The contents of the file
can be numeric or text-based.
Parameters
----------
path : string
Path of the data to load
is_text : bool
Indicates whether the data is text (`True`) or numerical (`False`).
Default value is `False`.
Returns
-------
data: numpy.ndarray
Numpy-array with the data. Numerical data is returned as a float
"""
# TODO: Change method to allow for h5df data too
if not os.path.isfile(path):
raise RuntimeError('The path provided is not a file - {0}'
.format(path))
_, file_extension = os.path.splitext(path)
if file_extension == '.npz':
data = np.load(path)['arr_0']
elif file_extension == '.npy':
data = np.load(path)
elif file_extension == '.txt':
if is_text:
data = np.loadtxt(path, dtype=np.string_, delimiter=',', **kwargs)
else:
data = np.genfromtxt(path, dtype=float, delimiter=',', **kwargs)
return data
def get_input_type(inpt):
"""Return the type of the input.
If the input is an object of type Quantity, it returns the type of the
associated value
Parameters
----------
inpt : object
The input object.
Returns
-------
type: type
The type of the input.
"""
if isinstance(inpt, u.Quantity):
return type(inpt.value)
else:
return type(inpt)
def prepare_input_array(input_array):
"""Format an array to be a 2-D numpy-array.
If the contents of `input_array` are 0-D or 1-D, it converts is to an
array with at least two dimensions.
Parameters
----------
input_array : numpy.ndarray, sequence, or number
The input value. It can be a scalar, 1-D array, or 2-D array.
Returns
-------
output_array : numpy.ndarray
An 2-D numpy array with the input values
"""
if input_array is None:
return None
return np.atleast_2d(input_array)
def prepare_output_array(output_array, type_input=None):
"""Format the output to have the same shape and type as the input.
This function is a generic wrapper to format the output of a function
to have the same type as the input. ITU-Rpy makes extensive use of numpy
arrays, but uses this function to return outputs having the same type
that was provided in the input of the function.
"""
# First, differentiate between the units and the value of the output_array
# since the rest of the funcion is mainly focused on casting the value
# of the output_array to the type in type_input
if isinstance(output_array, u.Quantity):
value = output_array.value
unit = output_array.unit
else:
value = output_array
unit = None
# Squeeze output array to remove singleton dimensions
if isinstance(value, np.ndarray) or isinstance(value, list):
value = np.array(value).squeeze()
type_output = get_input_type(output_array)
# First, cast the output_array to the same type of the input
# Check if the output array is a 0-D number and cast it to a float
if (type_input in __NUMERIC_TYPES__ and
(type_output in __NUMERIC_TYPES__) or
((isinstance(output_array, np.ndarray) and output_array.size == 1) or
(not type_output not in __NUMERIC_TYPES__ and
len(output_array) == 1))):
value = float(value)
# Check if the input array was a list and conver appropriately
elif type_input is list:
if isinstance(value, np.ndarray):
value = value.tolist()
else:
value = list(value)
# Otherwise, we assume that the value already has the required type
else:
value = value
# Add the units of the
if unit is not None:
return value * unit
else:
return value
def prepare_quantity(value, units=None, name_val=None):
"""Convert the input to the required units.
The function verifies that the input has the right units and converts
it to the desired units. For example, if a value is introduced in km
but posterior frequencies require this value to be in meters, this
function would be called with `units=u.m`
Parameters
----------
value : astropy.units.Quantity, number, sequence, or np.ndarry
The input value
units : astropy.units
Desired units of the output
name_val : string
Name of the variable (for debugging purposes)
Returns
-------
q : numpy.ndarray
An numpy array with the values converted to the desired units.
"""
if value is None:
return None
# If the units of the value are a temperature
if isinstance(value, u.Quantity):
if units in [u.K, u.deg_C, u.Kelvin, u.Celsius, u.imperial.deg_F]:
return value.to(units, equivalencies=u.temperature()).value
else:
return value.to(units).value
# Process numbers
elif isinstance(value, numbers.Number) and units is not None:
return value
# Process arrays and tuples
elif isinstance(value, np.ndarray) and units is not None:
return value
elif isinstance(value, list) and units is not None:
return np.array([prepare_quantity(v, units, name_val) for v in value])
elif isinstance(value, tuple) and units is not None:
return np.array([prepare_quantity(v, units, name_val) for v in value])
else:
raise ValueError('%s has not the correct format. It must be a value,'
'sequence, array, or a Quantity with %s units' %
(name_val, str(units)))
def compute_distance_earth_to_earth(lat_p, lon_p, lat_grid, lon_grid,
method=None):
"""
Compute the distance between a point and a matrix of (lat, lons).
If the number of elements in `lat_grid` is smaller than 100,000, uses the
WGS84 method, otherwise, uses the Haversine formula.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
"""
if ((method == 'WGS84' and not(method is not None)) or
(type(lat_p) in __NUMERIC_TYPES__) or
(type(lat_grid) in __NUMERIC_TYPES__) or
(len(lat_grid) < 10000) or
(isinstance(lat_grid, np.ndarray) and lat_grid.size < 1e5)):
return compute_distance_earth_to_earth_wgs84(
lat_p, lon_p, lat_grid, lon_grid)
else:
return compute_distance_earth_to_earth_haversine(
lat_p, lon_p, lat_grid, lon_grid)
def compute_distance_earth_to_earth_wgs84(lat_p, lon_p, lat_grid, lon_grid):
"""Compute the distance between points using the WGS84 inverse method.
Compute the distance between a point (P) in (`lat_p`, `lon_p`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`) using the WGS84 inverse
method.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
"""
lat_p = lat_p * np.ones_like(lat_grid)
lon_p = lon_p * np.ones_like(lon_grid)
_a, _b, d = __wgs84_geod__.inv(lon_p, lat_p, lon_grid, lat_grid)
return d / 1e3
def compute_distance_earth_to_earth_haversine(lat_p, lon_p,
lat_grid, lon_grid):
"""Compute the distance between points using the Haversine formula.
Compute the distance between a point (P) in (`lat_s`, `lon_s`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`) using the Haversine
formula.
Parameters
----------
lat_p : number
Latitude projection of the point P (degrees)
lon_p : number
Longitude projection of the point P (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the distance (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the distance (degrees)
Returns
-------
d : numpy.ndarray
Distance between the point P and each point in (lat_grid, lon_grid)
(km)
References
----------
This is based on the Haversine formula
"""
RE = 6371.0 # Radius of the Earth, km
lat1 = np.deg2rad(lat_grid)
lat2 = np.deg2rad(lat_p)
lon1 = np.deg2rad(lon_grid)
lon2 = np.deg2rad(lon_p)
dlat = lat2 - lat1
dlon = lon2 - lon1
# Compute the distance
a = np.clip((np.sin(dlat / 2.0))**2 + np.cos(lat1) * np.cos(lat2) *
(np.sin(dlon / 2))**2, -1, 1)
c = 2 * np.arcsin(np.sqrt(a))
d = RE * c
return d
def regular_lat_lon_grid(resolution_lat=1, resolution_lon=1, lon_start_0=False,
lat_min=-90, lat_max=90, lon_min=-180, lon_max=180):
"""
Build regular latitude and longitude matrices.
Builds a latitude and longitude coordinate matrix with resolution
`resolution_lat`, `resolution_lon`.
Parameters
----------
resolution_lat: number
Resolution for the latitude axis (deg)
resolution_lon: number
Resolution for the longitude axis (deg)
lon_start_0: boolean
Indicates whether the longitude is indexed using a 0 - 360 scale (True)
or using -180 - 180 scale (False). Default value is False
Returns
-------
lat: numpy.ndarray
Grid of coordinates of the latitude point
lon: numpy.ndarray
Grid of coordinates of the longitude point
"""
if lon_start_0:
lon, lat = np.meshgrid(np.arange(lon_min + 180.0, lon_max + 180.0,
resolution_lon),
np.arange(lat_max, lat_min, - resolution_lat))
else:
lon, lat = np.meshgrid(np.arange(lon_min, lon_max, resolution_lon),
np.arange(lat_max, lat_min, - resolution_lat))
return lat, lon
def elevation_angle(h, lat_s, lon_s, lat_grid, lon_grid):
"""
Compute the elevation angle between a satellite and a point on Earth.
Compute the elevation angle between a satellite located in an orbit
at height h and located above coordinates (`lat_s`, `lon_s`) and a matrix
of latitude and longitudes (`lat_grid`, `lon_grid`).
Parameters
----------
h : float
Orbital altitude of the satellite (km)
lat_s : float
Latitude of the projection of the satellite (degrees)
lon_s : float
Longitude of the projection of the satellite (degrees)
lat_grid : number, sequence of np.ndarray
Grid of latitude points to which compute the elevation angle (degrees)
lon_grid : number, sequence of np.ndarray
Grid of longitude points to which compute the elevation angle (degrees)
Returns
-------
elevation : numpy.ndarray
Elevation angle between the satellite and each point in
(lat_grid, lon_grid) (degrees)
References
----------
[1] http://www.propagation.gatech.edu/ECE6390/notes/ASD5.pdf - Slides 3, 4
"""
h = prepare_quantity(h, u.km, name_val='Orbital altitude of the satellite')
RE = 6371.0 # Radius of the Earth (km)
rs = RE + h
# Transform latitude_longitude values to radians
lat1 = np.deg2rad(lat_grid)
lat2 = np.deg2rad(lat_s)
lon1 = np.deg2rad(lon_grid)
lon2 = np.deg2rad(lon_s)
# Compute the elevation angle as described in
gamma = np.arccos(
np.clip(np.sin(lat2) * np.sin(lat1) +
np.cos(lat1) * np.cos(lat2) * np.cos(lon2 - lon1), -1, 1))
elevation = np.arccos(np.sin(gamma) /
np.sqrt(1 + (RE / rs)**2 -
2 * (RE / rs) * np.cos(gamma))) # In radians
return np.rad2deg(elevation)
|
nilq/baby-python
|
python
|
import scipy
from numpy import *
from scipy.integrate import *
from consts import *
from numpy.random import randint,random,normal,shuffle
from scipy.stats import gaussian_kde
#from pickleutils import *
try:
from astropysics.coords import ICRSCoordinates,GalacticCoordinates,FK5Coordinates
except ImportError:
pass
import numpy as np
import pylab as p
from scipy.optimize import leastsq
from scipy.interpolate import UnivariateSpline as interpolate
def iclosest(arr,val):
ind = ((arr-val).abs()).argmin()
if size(ind) > 1:
ind = ind[0]
return ind
def gr2B(g,r):
return gr2V(g,r) + 1.04*(g-r) + 0.19
def gr2V(g,r):
return r + 0.44*(g-r)-0.02
def keckSNR(vmag,t):
# mV=8, t=88s, SNR=188
return 188*sqrt(2.51**(8-vmag)*(t/88.))
def kecktexp(vmag,snr):
return 88*2.51**(vmag-8)*(snr/188.)**2
def deriv(f,c,dx=0.0001):
"""
deriv(f,c,dx) --> float
Returns f'(x), computed as a symmetric difference quotient.
"""
return (f(c+dx)-f(c-dx))/(2*dx)
def fuzzyequals(a,b,tol=0.0001):
return abs(a-b) < tol
def newton(f,c,tol=0.0001,restrict=None):
"""
newton(f,c) --> float
Returns the x closest to c such that f(x) = 0
"""
#print c
if restrict:
lo,hi = restrict
if c < lo or c > hi:
print c
c = random*(hi-lo)+lo
if fuzzyequals(f(c),0,tol):
return c
else:
try:
return newton(f,c-f(c)/deriv(f,c,tol),tol,restrict)
except:
return None
def trapznd(arr,*axes):
n = len(arr.shape)
if len(axes) != n:
raise ValueError('must provide same number of axes as number of dimensions!')
val = trapz(arr,axes[0],axis=0)
for i in arange(1,n):
val = trapz(val,axes[i],axis=0)
return val
def epkernel(u):
x = atleast_1d(u)
y = 3./4*(1-x*x)
y[where((x>1) | (x < -1))] = 0
return y
def gausskernel(u):
return 1/sqrt(2*pi)*exp(-0.5*u*u)
def tricubekernel(u):
x = atleast_1d(u)
y = 35./32*(1-x*x)**3
y[where((x > 1) | (x < -1))] = 0
return y
def kernelfn(kernel='tricube'):
if kernel=='ep':
#def fn(u):
# x = atleast_1d(u)
# y = 3./4*(1-x*x)
# y[where((x>1) | (x<-1))] = 0
# return y
#return fn
return epkernel
elif kernel=='gauss':
#return lambda x: 1/sqrt(2*pi)*exp(-0.5*x*x)
return gausskernel
elif kernel=='tricube':
#def fn(u):
# x = atleast_1d(u)
# y = 35/32.*(1-x*x)**3
# y[where((x>1) | (x<-1))] = 0
# return y
#return fn
return tricubekernel
def kerneldraw(size=1,kernel='tricube'):
if kernel=='tricube':
fn = lambda x: 1./2 + 35./32*x - 35./32*x**3 + 21./32*x**5 - 5./32*x**7
u = random(size=size)
rets = zeros(size)
for i in arange(size):
f = lambda x: u[i]-fn(x)
rets[i] = newton(f,0,restrict=(-1,1))
return rets
class composite_kde(object):
def __init__(self,kde1,kde2,operation='add'):
self.operation = operation
if self.operation == 'add':
self.comp1 = kde1
self.comp2 = kde2
self.norm = self.comp1.norm + self.comp2.norm
prop = self.comp1.properties.copy()
prop.update(self.comp2.properties)
self.properties = prop
def __call__(self,x):
if self.operation == 'add':
return (self.comp1(x) + self.comp2(x))/self.norm
def integrate_box(self,lo,hi,forcequad=False):
return self.comp1.integrate_box(lo,hi,forcequad=forcequad) + self.comp2.integrate_box(lo,hi,forcequad=forcequad)
def resample(self,size=1):
f1 = float(self.comp1.norm)/(self.comp1.norm+self.comp2.norm)
n1 = sum(random(size=size) < f1)
n2 = size-n1
samples = concatenate((self.comp1.resample(n1),self.comp2.resample(n2)))
shuffle(samples)
return samples
class kde(object):
def __init__(self,dataset,kernel='tricube',adaptive=True,k=None,lo=None,hi=None,\
fast=None,norm=None,bandwidth=None,weights=None):
self.dataset = atleast_1d(dataset)
self.weights = weights
self.n = size(dataset)
self.kernel = kernelfn(kernel)
self.kernelname = kernel
self.bandwidth = bandwidth
if k:
self.k = k
else:
self.k = self.n/4
if not norm:
self.norm=1.
else:
self.norm=norm
self.adaptive = adaptive
self.fast = fast
if adaptive:
if fast==None:
fast = self.n < 5001
if fast:
d1,d2 = meshgrid(self.dataset,self.dataset)
diff = abs(d1-d2)
diffsort = sort(diff,axis=0)
self.h = diffsort[self.k,:]
##Attempt to handle larger datasets more easily:
else:
sortinds = argsort(self.dataset)
x = self.dataset[sortinds]
h = zeros(len(x))
for i in arange(len(x)):
lo = i - self.k
hi = i + self.k + 1
if lo < 0:
lo = 0
if hi > len(x):
hi = len(x)
diffs = abs(x[lo:hi]-x[i])
h[sortinds[i]] = sort(diffs)[self.k]
self.h = h
else:
self.gauss_kde = gaussian_kde(self.dataset)
self.properties=dict()
self.lo = lo
self.hi = hi
def shifted(self,x):
new = kde(self.dataset+x,self.kernel,self.adaptive,self.k,self.lo,self.hi,self.fast,self.norm)
return new
def renorm(self,norm):
self.norm = norm
def evaluate(self,points):
if not self.adaptive:
return self.gauss_kde(points)*self.norm
points = atleast_1d(points).astype(self.dataset.dtype)
k = self.k
npts = size(points)
h = self.h
X,Y = meshgrid(self.dataset,points)
H = resize(h,(npts,self.n))
U = (X-Y)/H.astype(float)
result = 1./self.n*1./H*self.kernel(U)
return sum(result,axis=1)*self.norm
__call__ = evaluate
def __imul__(self,factor):
self.renorm(factor)
return self
def __add__(self,other):
return composite_kde(self,other)
__radd__ = __add__
def integrate_box(self,low,high,npts=500,forcequad=False):
if not self.adaptive and not forcequad:
return self.gauss_kde.integrate_box_1d(low,high)*self.norm
pts = linspace(low,high,npts)
return quad(self.evaluate,low,high)[0]
def draw(self,size=None):
return self.resample(size)
def resample(self,size=None):
size=int(size)
if not self.adaptive:
return squeeze(self.gauss_kde.resample(size=size))
if size is None:
size = self.n
indices = randint(0,self.n,size=size)
means = self.dataset[indices]
h = self.h[indices]
fuzz = kerneldraw(size,self.kernelname)*h
return squeeze(means + fuzz)
class generalpdf(object):
def __add__(self,other):
return compositepdf(self,other)
__radd__ = __add__
def __mul__(self,scale):
return scaledpdf(self,scale)
__rmul__ = __mul__
def renorm(self,factor=None):
self.norm *= factor
def __imul__(self,factor):
self.renorm(factor)
return self
class compositepdf(generalpdf):
def __init__(self,comp1,comp2):
self.comp1 = comp1
self.comp2 = comp2
self.norm = self.comp1.norm + self.comp2.norm
def __call__(self,x):
return self.comp1(x) + self.comp2(x)
def draw(self,size=1):
f1 = float(self.comp1.norm)/(self.comp1.norm+self.comp2.norm)
n1 = sum(random(size=size) < f1)
n2 = size-n1
samples = concatenate((self.comp1.draw(n1),self.comp2.draw(n2)))
shuffle(samples)
return samples
class scaledpdf(generalpdf):
def __init__(self,pdf,scale):
self.pdf = pdf
self.scale = scale
self.norm = scale * pdf.norm
def __call__(self,x):
return self.scale * self.pdf(x)
def draw(self,size=1):
return self.pdf.draw(size)
class powerlaw(generalpdf):
def __init__(self,alpha,xmin=0.5,xmax=10,norm=1.0):
self.alpha = alpha
self.xmin = xmin
self.xmax = xmax
self.norm = norm
self.plnorm = powerlawnorm(alpha,xmin,xmax)
def __call__(self,inpx):
x = atleast_1d(inpx)
y = self.norm*self.plnorm*x**self.alpha
y[where((x < self.xmin) | (x > self.xmax))] = 0
return y
def draw(self,size=1):
u = random(size=size)
a = self.alpha
if a==-1:
a = -1.00001 #hack to avoid -1...
C = self.plnorm
return ((u*(a+1))/C + self.xmin**(a+1))**(1./(a+1))
class polynomial(generalpdf):
def __init__(self,c,xmin=0.5,xmax=20,norm=1.0):
self.c = c
self.xmin = xmin
self.xmax = xmax
self.norm = norm
def __call__(self,x):
return np.polyval(self.c,x)
class triple_powerlaw(generalpdf):
def __init__(self,alpha1,alpha2,alpha3,xbreak1,xbreak2,xmin=0.5,xmax=20,norm=1.0):
self.alpha1 = alpha1
self.alpha2 = alpha2
self.alpha3 = alpha3
self.xbreak1 = xbreak1
self.xbreak2 = xbreak2
self.xmin = xmin
self.xmax = xmax
self.norm = norm
x1 = xbreak1; x2 = xbreak2
a1 = alpha1; a2 = alpha2; a3 = alpha3
if a1==-1:
a1 = -1.000001
if a2==-1:
a2 = -1.000001
if a3==-1:
a3 = -1.000001
self.A = (self.norm)/((x1**(a1 + 1) - xmin**(a1 + 1))/(a1 + 1) +
(x1**(a1 - a2)*(x2**(a2 +1) - x1**(a2+1)))/(a2 + 1) +
(x1**(a1 - a2)*(x2**(a2 - a3))*(xmax**(a3 + 1) -
x2**(a3 + 1)))/(a3 + 1))
self.B = self.A * x1**(a1 - a2)
self.C = self.B * x2**(a2 - a3)
self.f1 = quad(self,xmin,x1)[0]/self.norm
self.f2 = quad(self,x1,x2)[0]/self.norm
self.f3 = quad(self,x2,xmax)[0]/self.norm
self.plaw1 = powerlaw(alpha1,xmin,xbreak1)
self.plaw2 = powerlaw(alpha2,xbreak1,xbreak2)
self.plaw3 = powerlaw(alpha3,xbreak2,xmax)
def __call__(self,inpx):
x = atleast_1d(inpx)
lo = (x < self.xbreak1)
mid = (x >= self.xbreak1) & (x < self.xbreak2)
hi = (x >= self.xbreak2)
x1 = self.xbreak1; x2 = self.xbreak2
a1 = self.alpha1; a2 = self.alpha2; a3 = self.alpha3
return (lo * self.A * x**self.alpha1 +
mid * self.B * x**self.alpha2 +
hi * self.C * x**self.alpha3)
def draw(self,size=1):
u = random(size=size)
lo = (u < self.f1)
mid = (u >= self.f1) & (u < self.f2)
hi = (u >= self.f2)
return (self.plaw1.draw(size)*lo +
self.plaw2.draw(size)*mid +
self.plaw3.draw(size)*hi)
class broken_powerlaw(generalpdf):
def __init__(self,alpha1,alpha2,xbreak,xmin=0.5,xmax=20,norm=1.0):
self.alpha1 = alpha1
self.alpha2 = alpha2
self.xbreak = xbreak
self.xmin = xmin
self.xmax = xmax
self.norm = norm
def fn(x):
if x < xbreak:
return (x/xbreak)**alpha1
else:
return (x/xbreak)**alpha2
self.plawnorm = quad(fn,xmin,xmax)[0]/self.norm
self.f1 = quad(self,xmin,xbreak)[0]/self.norm
self.f2 = quad(self,xbreak,xmax)[0]/self.norm
self.plaw1 = powerlaw(alpha1,xmin,xbreak)
self.plaw2 = powerlaw(alpha2,xbreak,xmax)
def __call__(self,inpx):
x = atleast_1d(inpx)
lo = (x < self.xbreak)
hi = (x >= self.xbreak)
xb = self.xbreak
return 1./self.plawnorm * (lo*(x/xb)**self.alpha1 + hi*(x/xb)**self.alpha2)
def draw(self,size=1):
u = random(size=size)
lo = (u < self.f1)
hi = (u >= self.f1)
return self.plaw1.draw(size)*lo + self.plaw2.draw(size)*hi
class lognorm(generalpdf):
def __init__(self,mu,sig):
self.mu = mu*log(10)
self.sig = sig*log(10)
self.norm = 1.
def __call__(self,inpx):
mu,sig = (self.mu,self.sig)
x = atleast_1d(inpx)
return 1/(x*sig*sqrt(2*pi))*exp(-(log(x)-mu)**2/(2*sig*sig))
def draw(self,size=1):
rand = normal(size=size) * self.sig + self.mu
return exp(rand)
class uniform(generalpdf):
def __init__(self,xmin,xmax):
self.xmin=xmin
self.xmax=xmax
self.norm=1.0
def __call__(self,inpx):
x = atleast_1d(inpx)
return x*1./(xmax-xmin)
def draw(self,size=1):
rand = random(size)
return rand*(xmax-xmin)+xmin
class gaussian(generalpdf):
def __init__(self,mu,sig,norm=1):
self.mu = mu
self.sig = sig
self.norm = norm
def __call__(self,inpx):
x = atleast_1d(inpx)
return self.norm*1/sqrt(2*pi*self.sig**2)*exp(-(x-self.mu)**2/(2*self.sig**2))
#needs draw() written!
#class uniform_gausscutoffhi(generalpdf):
# def __init__(self,xmin,xmax,sig=0.1):
# self.xmin=xmin
# self.xmax=xmax
# self.sig=sig
# self.norm=1.0
# def __call__(self,inpx):
# x = atleast_1d(inpx)
def powerlawfn(alpha,xmin=.01,xmax=50,normed=True):
# if alpha == -1:
# C = 1/log(xmax/xmin)
# else:
# C = (1+alpha)/(xmax**(1+alpha)-xmin**(1+alpha))
# return C*x**(alpha)
if normed:
C = powerlawnorm(alpha,xmin,xmax)
else:
C=1
def fn(inpx):
x = atleast_1d(inpx)
y = C*x**(alpha)
y[where((x < xmin) | (x > xmax))] = 0
return y
return fn
def powerlawnorm(alpha,xmin,xmax):
if size(alpha)==1:
if alpha == -1:
C = 1/log(xmax/xmin)
else:
C = (1+alpha)/(xmax**(1+alpha)-xmin**(1+alpha))
else:
C = zeros(size(alpha))
w = where(alpha==-1)
if len(w[0]>0):
C[w] = 1./log(xmax/xmin)*ones(len(w[0]))
nw = where(alpha != -1)
C[nw] = (1+alpha[nw])/(xmax**(1+alpha[nw])-xmin**(1+alpha[nw]))
else:
C = (1+alpha)/(xmax**(1+alpha)-xmin**(1+alpha))
return C
def eq2gal(r,d):
eq = FK5Coordinates(r,d)
gal = eq.convert(GalacticCoordinates)
return gal.l.degrees,gal.b.degrees
#A = cos(d*pi/180)*cos((r-282.25)*pi/180)
#B = sin(d*pi/180)*sin(62.6*pi/180) + cos(d*pi/180)*sin((r-282.25)*pi/180)*cos(62.6*pi/180)
#C = sin(d*pi/180)*cos(62.6*pi/180) - cos(d*pi/180)*sin((r-282.25)*pi/180)*sin(62.6*pi/180)
#b = arcsin(C)
#l = arccos(A/cos(b))*180/pi + 33
#b = b*180/pi
#return l,b
def append_field(rec,name,arr,dt=None):
arr = asarray(arr)
if dt is None:
dt = arr.dtype
newdtype = dtype(rec.dtype.descr + [(name,dt)])
newrec = empty(rec.shape,dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
newrec[name] = arr
return np.core.records.array(newrec)
def expfunc(p,x):
return p[2] + p[0]*exp(-x/p[1])
def fitexp(x,y,p0=[1,10,0.03]):
errfunc = lambda p,x,y: expfunc(p,x)-y
p1,success = leastsq(errfunc,p0[:],args=(x,y))
return p1
def save(obj,filename):
f = open(filename,'wb')
pickle.dump(obj,f)
f.close()
def load(filename):
f = open(filename,'rb')
obj = pickle.load(f)
f.close()
return obj
def dict2arr(indict):
keys = indict.keys()
keysarr = array(keys)
maxind = keysarr.max()
arr = zeros(maxind+1)
for key in keysarr:
arr[key] = indict[key]
return arr
def repeats(arr,return_index=False,return_counts=False):
#add "return_counts" something....i.e. saying how many there are of each
already = dict()
ininds=dict()
n=0
inds=[]
i=0
for el in arr:
if el in already:
already[el]+=1
if not el in ininds:
inds.append(i)
n+=1
ininds[el]=1
else:
ininds[el]+=1
else:
already[el] = 1
i+=1
if return_index:
return n,inds
if return_counts:
nreps = dict2arr(already)
return n,inds,nreps
else:
return n
def confreg(x,Lin,conf=0.68,tol=0.005):
L = Lin/trapz(Lin,x) #normalize likelihood
imax = argmax(L)
if imax==0:
imax=1
if imax==len(L)-1:
imax = len(L)-2
Lmax = L[imax]
xlo = x[0:imax]
xhi = x[imax:]
Llo = L[0:imax]
Lhi = L[imax:]
prob = 0
level=Lmax
dL = Lmax/1000.
while prob < conf:
level -= dL
i1 = argmin(abs(Llo-level))
i2 = argmin(abs(Lhi-level))+imax
prob = trapz(L[i1:i2],x[i1:i2])
if level < 0:
print 'error in calculating confidence interval: only reached %.2f\% of probability' % prob
return nan,nan
return x[i1],x[i2]
def pctile(x,q):
q /= 100.
s = sort(x)
n = size(x)
i = s[int(n*q)]
return x[i]
def qstd(x,quant=0.05,top=False,bottom=False):
"""returns std, ignoring outer 'quant' pctiles
"""
s = sort(x)
n = size(x)
lo = s[int(n*quant)]
hi = s[int(n*(1-quant))]
if top:
w = where(x>=lo)
elif bottom:
w = where(x<=hi)
else:
w = where((x>=lo)&(x<=hi))
return std(x[w])
def meshgrid3d(x,y,z):
gridx = x + 0*y[:,newaxis] + 0*z[:,newaxis,newaxis]
gridy = 0*x + y[:,newaxis] + 0*z[:,newaxis,newaxis]
gridz = 0*x + 0*y[:,newaxis] + z[:,newaxis,newaxis]
return gridx,gridy,gridz
### classes defining statitistical distributions
class Distribution(object):
def __init__(self,pdf,cdf=None,name='',minval=-np.inf,maxval=np.inf,norm=None,
no_cdf=False,cdf_pts=100):
self.name = name
self.pdf = pdf
self.cdf = cdf
self.minval = minval
self.maxval = maxval
if not hasattr(self,'Ndists'):
self.Ndists = 1
if norm is None:
self.norm = quad(pdf,minval,maxval,full_output=1)[0]
else:
self.norm = norm
if cdf is None and not no_cdf and minval != -np.inf and maxval != np.inf:
pts = np.linspace(minval,maxval,cdf_pts)
pdfgrid = self(pts)
cdfgrid = pdfgrid.cumsum()/pdfgrid.cumsum().max()
cdf_fn = interpolate(pts,cdfgrid,s=0)
def cdf(x):
x = np.atleast_1d(x)
y = np.atleast_1d(cdf_fn(x))
y[np.where(x < self.minval)] = 0
y[np.where(x > self.maxval)] = 1
return y
self.cdf = cdf
def pctile(self,pct,res=1000):
grid = np.arange(self.minval,self.maxval,(self.maxval-self.minval)/float(res))
return grid[np.argmin(np.absolute(pct-self.cdf(grid)))]
def __add__(self,other):
return Combined_Distribution((self,other))
def __radd__(self,other):
return self.__add__(other)
def __call__(self,x):
y = self.pdf(x)
x = np.atleast_1d(x)
y = np.atleast_1d(y)
w = np.where((x < self.minval) | (x > self.maxval))
y[w] = 0
return y/self.norm
def plot(self,minval=None,maxval=None,fig=None,log=False,npts=500,**kwargs):
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to plot. (set minval, maxval kws)')
if log:
xs = np.logspace(np.log10(minval),np.log10(maxval),npts)
else:
xs = np.linspace(minval,maxval,npts)
plu.setfig(fig)
plt.plot(xs,self(xs),**kwargs)
plt.xlabel(self.name)
plt.ylim(ymin=0)
def resample(self,N,minval=None,maxval=None,log=False,res=1e4):
if minval is None:
minval = self.minval
if maxval is None:
maxval = self.maxval
if maxval==np.inf or minval==-np.inf:
raise ValueError('must have finite upper and lower bounds to resample. (set minval, maxval kws)')
u = rand.random(size=N)
if log:
vals = np.logspace(log10(minval),log10(maxval),res)
else:
vals = np.linspace(minval,maxval,res)
ys = self.cdf(vals)
inds = np.digitize(u,ys)
return vals[inds]
class DoubleGauss_Distribution(Distribution):
def __init__(self,mu,siglo,sighi,**kwargs):
self.mu = mu
self.siglo = siglo
self.sighi = sighi
def pdf(x):
x = np.atleast_1d(x)
A = 1./(np.sqrt(2*np.pi)*(siglo+sighi)/2.)
ylo = A*np.exp(-(x-mu)**2/(2*siglo**2))
yhi = A*np.exp(-(x-mu)**2/(2*sighi**2))
y = x*0
wlo = np.where(x < mu)
whi = np.where(x >= mu)
y[wlo] = ylo[wlo]
y[whi] = yhi[whi]
return y
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*siglo
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sighi
Distribution.__init__(self,pdf,**kwargs)
def __str__(self):
return '%s = %.1f +%.1f -%.1f' % (self.name,self.mu,self.sighi,self.siglo)
def resample(self,N,**kwargs):
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
whi = np.where(u < float(self.sighi)/(self.sighi + self.siglo))
wlo = np.where(u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[whi] = hivals[whi]
vals[wlo] = lovals[wlo]
return vals
return rand.normal(size=N)*self.sig + self.mu
class Gaussian_Distribution(Distribution):
def __init__(self,mu,sig,**kwargs):
self.mu = mu
self.sig = sig
def pdf(x):
return 1./np.sqrt(2*np.pi*sig**2)*np.exp(-(x-mu)**2/(2*sig**2))
if 'minval' not in kwargs:
kwargs['minval'] = mu - 5*sig
if 'maxval' not in kwargs:
kwargs['maxval'] = mu + 5*sig
Distribution.__init__(self,pdf,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.mu,self.sig)
def resample(self,N,**kwargs):
return rand.normal(size=N)*self.sig + self.mu
class KDE_Distribution(Distribution):
def __init__(self,samples,**kwargs):
self.samples = samples
self.kde = gaussian_kde(samples)
Distribution.__init__(self,self.kde,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def resample(N,**kwargs):
return self.kde.resample(N)
class Hist_Distribution(Distribution):
def __init__(self,samples,bins=10,smooth=0,**kwargs):
self.samples = samples
hist,bins = np.histogram(samples,bins=bins,normed=True)
self.bins = bins
self.hist = hist #debug
bins = (bins[1:] + bins[:-1])/2.
pdf = interpolate(bins,hist,s=smooth)
cdf = interpolate(bins,hist.cumsum()/hist.cumsum().max(),s=smooth)
if 'maxval' not in kwargs:
kwargs['maxval'] = samples.max()
if 'minval' not in kwargs:
kwargs['minval'] = samples.min()
Distribution.__init__(self,pdf,cdf,**kwargs)
def __str__(self):
return '%s = %.1f +/- %.1f' % (self.name,self.samples.mean(),self.samples.std())
def plothist(self,fig=None,**kwargs):
plu.setfig(fig)
plt.hist(self.samples,bins=self.bins,**kwargs)
def resample(self,N):
inds = rand.randint(len(self.samples),size=N)
return self.samples[inds]
class Box_Distribution(Distribution):
def __init__(self,lo,hi,**kwargs):
self.lo = lo
self.hi = hi
def pdf(x):
return 1./(hi-lo) + 0*x
def cdf(x):
x = np.atleast_1d(x)
y = (x - lo) / (hi - lo)
y[np.where(x < lo)] = 0
y[np.where(x > hi)] = 1
return y
Distribution.__init__(self,pdf,cdf,minval=lo,maxval=hi,**kwargs)
def __str__(self):
return '%.1f < %s < %.1f' % (self.lo,self.name,self.hi)
def resample(self,N):
return rand.random(size=N)*(self.maxval - self.minval) + self.minval
class Combined_Distribution(Distribution):
def __init__(self,dist_list,minval=-np.inf,maxval=np.inf,**kwargs):
self.dist_list = list(dist_list)
#self.Ndists = len(dist_list)
N = 0
for dist in dist_list:
N += dist.Ndists
self.Ndists = N
self.minval = minval
self.maxval = maxval
def pdf(x):
y = x*0
for dist in dist_list:
y += dist(x)
return y/N
Distribution.__init__(self,pdf,minval=minval,maxval=maxval,**kwargs)
def __getitem__(self,ind):
return self.dist_list[ind]
#def __add__(self,other):
# def pdf(x):
# return (self(x) + other(x))/(self.Ndists + other.Ndists)
# self.dist_list.append(other)
# maxval = max(self.maxval,other.maxval)
# minval = min(self.minval,other.minval)
# Distribution.__init__(self,pdf,maxval=maxval,minval=minval)
#def __radd__(self,other):
# return self.__add__(other)
|
nilq/baby-python
|
python
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from typing import List
from botbuilder.core import CardFactory, MessageFactory
from botbuilder.schema import ActionTypes, Activity, CardAction, HeroCard, InputHints
from . import Channel, Choice, ChoiceFactoryOptions
class ChoiceFactory:
@staticmethod
def for_channel(
channel_id: str,
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
) -> Activity:
if channel_id is None:
channel_id = ""
if choices is None:
choices = []
# Find maximum title length
max_title_length = 0
for choice in choices:
if choice.action is not None and choice.action.title not in (None, ""):
l = len(choice.action.title)
else:
l = len(choice.value)
if l > max_title_length:
max_title_length = l
# Determine list style
supports_suggested_actions = Channel.supports_suggested_actions(
channel_id, len(choices)
)
supports_card_actions = Channel.supports_card_actions(channel_id, len(choices))
max_action_title_length = Channel.max_action_title_length(channel_id)
long_titles = max_title_length > max_action_title_length
if not long_titles and not supports_suggested_actions and supports_card_actions:
# SuggestedActions is the preferred approach, but for channels that don't
# support them (e.g. Teams, Cortana) we should use a HeroCard with CardActions
return ChoiceFactory.hero_card(choices, text, speak)
elif not long_titles and supports_suggested_actions:
# We always prefer showing choices using suggested actions. If the titles are too long, however,
# we'll have to show them as a text list.
return ChoiceFactory.suggested_action(choices, text, speak)
elif not long_titles and len(choices) <= 3:
# If the titles are short and there are 3 or less choices we'll use an inline list.
return ChoiceFactory.inline(choices, text, speak, options)
else:
# Show a numbered list.
return List(choices, text, speak, options)
@staticmethod
def inline(
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
) -> Activity:
if choices is None:
choices = []
if options is None:
options = ChoiceFactoryOptions()
opt = ChoiceFactoryOptions(
inline_separator=options.inline_separator or ", ",
inline_or=options.inline_or or " or ",
inline_or_more=options.inline_or_more or ", or ",
include_numbers=options.include_numbers or True,
)
# Format list of choices
connector = ""
txt_builder: List[str] = [text]
txt_builder.append(" ")
for index, choice in enumerate(choices):
title = (
choice.action.title
if (choice.action is not None and choice.action.title is not None)
else choice.value
)
txt_builder.append(connector)
if opt.include_numbers is True:
txt_builder.append("(")
txt_builder.append(f"{index + 1}")
txt_builder.append(") ")
txt_builder.append(title)
if index == (len(choices) - 2):
connector = opt.inline_or if index == 0 else opt.inline_or_more
connector = connector or ""
else:
connector = opt.inline_separator or ""
# Return activity with choices as an inline list.
return MessageFactory.text(
"".join(txt_builder), speak, InputHints.expecting_input
)
@staticmethod
def list_style(
choices: List[Choice],
text: str = None,
speak: str = None,
options: ChoiceFactoryOptions = None,
):
if choices is None:
choices = []
if options is None:
options = ChoiceFactoryOptions()
if options.include_numbers is None:
include_numbers = True
else:
include_numbers = options.include_numbers
# Format list of choices
connector = ""
txt_builder = [text]
txt_builder.append("\n\n ")
for index, choice in enumerate(choices):
title = (
choice.action.title
if choice.action is not None and choice.action.title is not None
else choice.value
)
txt_builder.append(connector)
if include_numbers:
txt_builder.append(f"{index + 1}")
txt_builder.append(". ")
else:
txt_builder.append("- ")
txt_builder.append(title)
connector = "\n "
# Return activity with choices as a numbered list.
txt = "".join(txt_builder)
return MessageFactory.text(txt, speak, InputHints.expecting_input)
@staticmethod
def suggested_action(
choices: List[Choice], text: str = None, speak: str = None
) -> Activity:
# Return activity with choices as suggested actions
return MessageFactory.suggested_actions(
ChoiceFactory._extract_actions(choices),
text,
speak,
InputHints.expecting_input,
)
@staticmethod
def hero_card(
choices: List[Choice], text: str = None, speak: str = None
) -> Activity:
attachment = CardFactory.hero_card(
HeroCard(text=text, buttons=ChoiceFactory._extract_actions(choices))
)
# Return activity with choices as HeroCard with buttons
return MessageFactory.attachment(
attachment, None, speak, InputHints.expecting_input
)
@staticmethod
def _to_choices(choices: List[str]) -> List[Choice]:
if choices is None:
return []
else:
return [Choice(value=choice.value) for choice in choices]
@staticmethod
def _extract_actions(choices: List[Choice]) -> List[CardAction]:
if choices is None:
choices = []
card_actions: List[CardAction] = []
for choice in choices:
if choice.action is not None:
card_action = choice.action
else:
card_action = CardAction(
type=ActionTypes.im_back, value=choice.value, title=choice.value
)
card_actions.append(card_action)
return card_actions
|
nilq/baby-python
|
python
|
class DealResult(object):
'''
Details of a deal that has taken place.
'''
def __init__(self):
self.proposer = None
self.proposee = None
self.properties_transferred_to_proposer = []
self.properties_transferred_to_proposee = []
self.cash_transferred_from_proposer_to_proposee = 0
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
#
# Copyright 2011-2015 Jeff Bush
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import re
import subprocess
import sys
sys.path.insert(0, '..')
import test_harness
class EmulatorProcess(object):
"""
This spawns the emulator process and LLDB in MI (machine interface) mode.
It allows communication with LLDB with it via stdin and stdout. It has the
__enter__ and __exit__ methods allowing it to be used in the 'with'
construct so it will automatically be torn down when the test is done.
"""
def __init__(self, hexfile):
self.hexfile = hexfile
self.elf_file = os.path.splitext(hexfile)[0] + '.elf'
self.output = None
self.emulator_proc = None
self.lldb_proc = None
self.outstr = None
self.instr = None
def __enter__(self):
emulator_args = [
test_harness.EMULATOR_PATH,
'-m',
'gdb',
'-v',
self.hexfile
]
if test_harness.DEBUG:
self.output = None
else:
self.output = open(os.devnull, 'w')
self.emulator_proc = subprocess.Popen(emulator_args, stdout=self.output,
stderr=subprocess.STDOUT)
lldb_args = [
test_harness.COMPILER_DIR + 'lldb-mi'
]
# XXX race condition: the emulator needs to be ready before
# lldb tries to connect to it.
try:
self.lldb_proc = subprocess.Popen(lldb_args, stdout=subprocess.PIPE,
stdin=subprocess.PIPE)
self.outstr = self.lldb_proc.stdin
self.instr = self.lldb_proc.stdout
except:
test_harness.kill_gently(self.emulator_proc)
raise
return self
def __exit__(self, *unused):
test_harness.kill_gently(self.emulator_proc)
test_harness.kill_gently(self.lldb_proc)
def send_command(self, cmd):
if test_harness.DEBUG:
print('LLDB send: ' + cmd)
self.outstr.write(str.encode(cmd + '\n'))
self.outstr.flush()
return self.wait_response()
def wait_response(self):
response = ''
while True:
response += self.instr.read(1).decode('utf-8')
if response.endswith('^done'):
break
if test_harness.DEBUG:
print('LLDB recv: ' + response)
return response
def wait_stop(self):
current_line = ''
while True:
inchar = self.instr.read(1).decode('utf-8')
current_line += inchar
if inchar == '\n':
if test_harness.DEBUG:
print('LLDB recv: ' + current_line[:-1])
if current_line.startswith('*stopped'):
break
current_line = ''
FRAME_RE = re.compile(
'frame #[0-9]+:( 0x[0-9a-f]+)? [a-zA-Z_\\.0-9]+`(?P<function>[a-zA-Z_0-9][a-zA-Z_0-9]+)')
AT_RE = re.compile(' at (?P<filename>[a-z_A-Z][a-z\\._A-Z]+):(?P<line>[0-9]+)')
def parse_stack_crawl(response):
"""
Given text response from the debugger containing a stack crawl, this will
return a list of tuples where each entry represents the function name,
filename, and line number of the call site.
"""
stack_info = []
for line in response.split('\\n'):
frame_match = FRAME_RE.search(line)
if frame_match:
func = frame_match.group('function')
at_match = AT_RE.search(line)
if at_match:
stack_info += [(func, at_match.group('filename'),
int(at_match.group('line')))]
else:
stack_info += [(func, '', 0)]
return stack_info
@test_harness.test(['emulator'])
def lldb(*unused):
"""This mainly validates that LLDB is reading symbols correctly."""
hexfile = test_harness.build_program(
['test_program.c'], opt_level='-O0', cflags=['-g'])
with EmulatorProcess(hexfile) as conn:
conn.send_command('file "' + test_harness.WORK_DIR + '/program.elf"')
conn.send_command('gdb-remote 8000\n')
response = conn.send_command(
'breakpoint set --file test_program.c --line 27')
if 'Breakpoint 1: where = program.elf`func2 + 100 at test_program.c:27' not in response:
raise test_harness.TestException(
'breakpoint: did not find expected value ' + response)
conn.send_command('c')
conn.wait_stop()
expected_stack = [
('func2', 'test_program.c', 27),
('func1', 'test_program.c', 35),
('main', 'test_program.c', 41),
('do_main', '', 0)
]
response = conn.send_command('bt')
crawl = parse_stack_crawl(response)
if crawl != expected_stack:
raise test_harness.TestException(
'stack crawl mismatch ' + str(crawl))
response = conn.send_command('print value')
if '= 67' not in response:
raise test_harness.TestException(
'print value: Did not find expected value ' + response)
response = conn.send_command('print result')
if '= 128' not in response:
raise test_harness.TestException(
'print result: Did not find expected value ' + response)
# Up to previous frame
conn.send_command('frame select --relative=1')
response = conn.send_command('print a')
if '= 12' not in response:
raise test_harness.TestException(
'print a: Did not find expected value ' + response)
response = conn.send_command('print b')
if '= 67' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
conn.send_command('step')
conn.wait_stop()
response = conn.send_command('print result')
if '= 64' not in response:
raise test_harness.TestException(
'print b: Did not find expected value ' + response)
test_harness.execute_tests()
|
nilq/baby-python
|
python
|
import time, pickle
from meta_mb.logger import logger
from meta_mb.workers.base import Worker
class WorkerData(Worker):
def __init__(self, simulation_sleep):
super().__init__()
self.simulation_sleep = simulation_sleep
self.env = None
self.env_sampler = None
self.dynamics_sample_processor = None
self.samples_data_arr = []
def construct_from_feed_dict(
self,
policy_pickle,
env_pickle,
baseline_pickle,
dynamics_model_pickle,
feed_dict
):
from meta_mb.samplers.sampler import Sampler
from meta_mb.samplers.mb_sample_processor import ModelSampleProcessor
env = pickle.loads(env_pickle)
policy = pickle.loads(policy_pickle)
baseline = pickle.loads(baseline_pickle)
self.env = env
self.env_sampler = Sampler(env=env, policy=policy, **feed_dict['env_sampler'])
self.dynamics_sample_processor = ModelSampleProcessor(
baseline=baseline,
**feed_dict['dynamics_sample_processor']
)
def prepare_start(self):
initial_random_samples = self.queue.get()
self.step(initial_random_samples)
self.push()
def step(self, random=False):
time_step = time.time()
'''------------- Obtaining samples from the environment -----------'''
if self.verbose:
logger.log("Data is obtaining samples...")
env_paths = self.env_sampler.obtain_samples(
log=True,
random=random,
log_prefix='Data-EnvSampler-',
)
'''-------------- Processing environment samples -------------------'''
if self.verbose:
logger.log("Data is processing environment samples...")
samples_data = self.dynamics_sample_processor.process_samples(
env_paths,
log=True,
log_prefix='Data-EnvTrajs-',
)
self.samples_data_arr.append(samples_data)
time_step = time.time() - time_step
time_sleep = max(self.simulation_sleep - time_step, 0)
time.sleep(time_sleep)
logger.logkv('Data-TimeStep', time_step)
logger.logkv('Data-TimeSleep', time_sleep)
def _synch(self, policy_state_pickle):
time_synch = time.time()
policy_state = pickle.loads(policy_state_pickle)
assert isinstance(policy_state, dict)
self.env_sampler.policy.set_shared_params(policy_state)
time_synch = time.time() - time_synch
logger.logkv('Data-TimeSynch', time_synch)
def push(self):
time_push = time.time()
self.queue_next.put(pickle.dumps(self.samples_data_arr))
self.samples_data_arr = []
time_push = time.time() - time_push
logger.logkv('Data-TimePush', time_push)
def set_stop_cond(self):
if self.itr_counter >= self.n_itr:
self.stop_cond.set()
|
nilq/baby-python
|
python
|
# encoding: utf-8
"""
Step implementations for paragraph format-related features.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from behave import given, then, when
from docx import Document
from docx.enum.text import WD_ALIGN_PARAGRAPH, WD_LINE_SPACING
from docx.shared import Pt
from docx.text.tabstops import TabStops
from helpers import test_docx
# given ===================================================
@given('a paragraph format')
def given_a_paragraph_format(context):
document = Document(test_docx('tab-stops'))
context.paragraph_format = document.paragraphs[0].paragraph_format
@given('a paragraph format having {prop_name} set {setting}')
def given_a_paragraph_format_having_prop_set(context, prop_name, setting):
style_name = {
'to inherit': 'Normal',
'On': 'Base',
'Off': 'Citation',
}[setting]
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format
@given('a paragraph format having {setting} line spacing')
def given_a_paragraph_format_having_setting_line_spacing(context, setting):
style_name = {
'inherited': 'Normal',
'14 pt': 'Base',
'double': 'Citation',
}[setting]
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format
@given('a paragraph format having {setting} space {side}')
def given_a_paragraph_format_having_setting_spacing(context, setting, side):
style_name = 'Normal' if setting == 'inherited' else 'Base'
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format
@given('a paragraph format having {type} alignment')
def given_a_paragraph_format_having_align_type_alignment(context, type):
style_name = {
'inherited': 'Normal',
'center': 'Base',
'right': 'Citation',
}[type]
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format
@given('a paragraph format having {type} indent of {value}')
def given_a_paragraph_format_having_type_indent_value(context, type, value):
style_name = {
'inherit': 'Normal',
'18 pt': 'Base',
'17.3 pt': 'Base',
'-17.3 pt': 'Citation',
'46.1 pt': 'Citation',
}[value]
document = Document(test_docx('sty-known-styles'))
context.paragraph_format = document.styles[style_name].paragraph_format
# when ====================================================
@when('I assign {value} to paragraph_format.line_spacing')
def when_I_assign_value_to_paragraph_format_line_spacing(context, value):
new_value = {
'Pt(14)': Pt(14),
'2': 2,
}.get(value)
new_value = float(value) if new_value is None else new_value
context.paragraph_format.line_spacing = new_value
@when('I assign {value} to paragraph_format.line_spacing_rule')
def when_I_assign_value_to_paragraph_format_line_rule(context, value):
new_value = {
'None': None,
'WD_LINE_SPACING.EXACTLY': WD_LINE_SPACING.EXACTLY,
'WD_LINE_SPACING.MULTIPLE': WD_LINE_SPACING.MULTIPLE,
'WD_LINE_SPACING.SINGLE': WD_LINE_SPACING.SINGLE,
'WD_LINE_SPACING.DOUBLE': WD_LINE_SPACING.DOUBLE,
'WD_LINE_SPACING.AT_LEAST': WD_LINE_SPACING.AT_LEAST,
'WD_LINE_SPACING.ONE_POINT_FIVE': WD_LINE_SPACING.ONE_POINT_FIVE,
}[value]
paragraph_format = context.paragraph_format
paragraph_format.line_spacing_rule = new_value
@when('I assign {value} to paragraph_format.alignment')
def when_I_assign_value_to_paragraph_format_alignment(context, value):
new_value = {
'None': None,
'WD_ALIGN_PARAGRAPH.CENTER': WD_ALIGN_PARAGRAPH.CENTER,
'WD_ALIGN_PARAGRAPH.RIGHT': WD_ALIGN_PARAGRAPH.RIGHT,
}[value]
paragraph_format = context.paragraph_format
paragraph_format.alignment = new_value
@when('I assign {value} to paragraph_format.space_{side}')
def when_I_assign_value_to_paragraph_format_space(context, value, side):
paragraph_format = context.paragraph_format
prop_name = 'space_%s' % side
new_value = {
'None': None,
'Pt(12)': Pt(12),
'Pt(18)': Pt(18),
}[value]
setattr(paragraph_format, prop_name, new_value)
@when('I assign {value} to paragraph_format.{type_}_indent')
def when_I_assign_value_to_paragraph_format_indent(context, value, type_):
paragraph_format = context.paragraph_format
prop_name = '%s_indent' % type_
value = None if value == 'None' else Pt(float(value.split()[0]))
setattr(paragraph_format, prop_name, value)
@when('I assign {value} to paragraph_format.{prop_name}')
def when_I_assign_value_to_paragraph_format_prop(context, value, prop_name):
paragraph_format = context.paragraph_format
value = {'None': None, 'True': True, 'False': False}[value]
setattr(paragraph_format, prop_name, value)
# then =====================================================
@then('paragraph_format.tab_stops is a TabStops object')
def then_paragraph_format_tab_stops_is_a_tabstops_object(context):
tab_stops = context.paragraph_format.tab_stops
assert isinstance(tab_stops, TabStops)
@then('paragraph_format.alignment is {value}')
def then_paragraph_format_alignment_is_value(context, value):
expected_value = {
'None': None,
'WD_ALIGN_PARAGRAPH.LEFT': WD_ALIGN_PARAGRAPH.LEFT,
'WD_ALIGN_PARAGRAPH.CENTER': WD_ALIGN_PARAGRAPH.CENTER,
'WD_ALIGN_PARAGRAPH.RIGHT': WD_ALIGN_PARAGRAPH.RIGHT,
}[value]
paragraph_format = context.paragraph_format
assert paragraph_format.alignment == expected_value
@then('paragraph_format.line_spacing is {value}')
def then_paragraph_format_line_spacing_is_value(context, value):
expected_value = (
None if value == 'None' else
float(value) if '.' in value else
int(value)
)
paragraph_format = context.paragraph_format
if expected_value is None or isinstance(expected_value, int):
assert paragraph_format.line_spacing == expected_value
else:
assert abs(paragraph_format.line_spacing - expected_value) < 0.001
@then('paragraph_format.line_spacing_rule is {value}')
def then_paragraph_format_line_spacing_rule_is_value(context, value):
expected_value = {
'None': None,
'WD_LINE_SPACING.EXACTLY': WD_LINE_SPACING.EXACTLY,
'WD_LINE_SPACING.MULTIPLE': WD_LINE_SPACING.MULTIPLE,
'WD_LINE_SPACING.SINGLE': WD_LINE_SPACING.SINGLE,
'WD_LINE_SPACING.DOUBLE': WD_LINE_SPACING.DOUBLE,
'WD_LINE_SPACING.AT_LEAST': WD_LINE_SPACING.AT_LEAST,
'WD_LINE_SPACING.ONE_POINT_FIVE': WD_LINE_SPACING.ONE_POINT_FIVE,
}[value]
paragraph_format = context.paragraph_format
assert paragraph_format.line_spacing_rule == expected_value
@then('paragraph_format.space_{side} is {value}')
def then_paragraph_format_space_side_is_value(context, side, value):
expected_value = None if value == 'None' else int(value)
prop_name = 'space_%s' % side
paragraph_format = context.paragraph_format
actual_value = getattr(paragraph_format, prop_name)
assert actual_value == expected_value
@then('paragraph_format.{type_}_indent is {value}')
def then_paragraph_format_type_indent_is_value(context, type_, value):
expected_value = None if value == 'None' else int(value)
prop_name = '%s_indent' % type_
paragraph_format = context.paragraph_format
actual_value = getattr(paragraph_format, prop_name)
assert actual_value == expected_value
@then('paragraph_format.{prop_name} is {value}')
def then_paragraph_format_prop_name_is_value(context, prop_name, value):
expected_value = {'None': None, 'True': True, 'False': False}[value]
paragraph_format = context.paragraph_format
actual_value = getattr(paragraph_format, prop_name)
assert actual_value == expected_value
|
nilq/baby-python
|
python
|
line_one = "The sky has given over"
line_one_words = line_one.split()
|
nilq/baby-python
|
python
|
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .distribution import NoDistribution
from .tree import Tree, SplitNode, LeafNode
__all__ = ["BART"]
class BaseBART(NoDistribution):
def __init__(self, X, Y, m=200, alpha=0.25, *args, **kwargs):
self.X = X
self.Y = Y
super().__init__(shape=X.shape[0], dtype="float64", testval=0, *args, **kwargs)
if self.X.ndim != 2:
raise ValueError("The design matrix X must have two dimensions")
if self.Y.ndim != 1:
raise ValueError("The response matrix Y must have one dimension")
if self.X.shape[0] != self.Y.shape[0]:
raise ValueError(
"The design matrix X and the response matrix Y must have the same number of elements"
)
if not isinstance(m, int):
raise ValueError("The number of trees m type must be int")
if m < 1:
raise ValueError("The number of trees m must be greater than zero")
if alpha <= 0 or 1 <= alpha:
raise ValueError(
"The value for the alpha parameter for the tree structure "
"must be in the interval (0, 1)"
)
self.num_observations = X.shape[0]
self.num_variates = X.shape[1]
self.m = m
self.alpha = alpha
self.trees = self.init_list_of_trees()
self.mean = fast_mean()
self.prior_prob_leaf_node = compute_prior_probability(alpha)
def init_list_of_trees(self):
initial_value_leaf_nodes = self.Y.mean() / self.m
initial_idx_data_points_leaf_nodes = np.array(range(self.num_observations), dtype="int32")
list_of_trees = []
for i in range(self.m):
new_tree = Tree.init_tree(
tree_id=i,
leaf_node_value=initial_value_leaf_nodes,
idx_data_points=initial_idx_data_points_leaf_nodes,
)
list_of_trees.append(new_tree)
# Diff trick to speed computation of residuals. From Section 3.1 of Kapelner, A and Bleich, J.
# bartMachine: A Powerful Tool for Machine Learning in R. ArXiv e-prints, 2013
# The sum_trees_output will contain the sum of the predicted output for all trees.
# When R_j is needed we subtract the current predicted output for tree T_j.
self.sum_trees_output = np.full_like(self.Y, self.Y.mean())
return list_of_trees
def __iter__(self):
return iter(self.trees)
def __repr_latex(self):
raise NotImplementedError
def get_available_predictors(self, idx_data_points_split_node):
possible_splitting_variables = []
for j in range(self.num_variates):
x_j = self.X[idx_data_points_split_node, j]
x_j = x_j[~np.isnan(x_j)]
for i in range(1, len(x_j)):
if x_j[i - 1] != x_j[i]:
possible_splitting_variables.append(j)
break
return possible_splitting_variables
def get_available_splitting_rules(self, idx_data_points_split_node, idx_split_variable):
x_j = self.X[idx_data_points_split_node, idx_split_variable]
x_j = x_j[~np.isnan(x_j)]
values, indices = np.unique(x_j, return_index=True)
# The last value is not consider since if we choose it as the value of
# the splitting rule assignment, it would leave the right subtree empty.
return values[:-1], indices[:-1]
def grow_tree(self, tree, index_leaf_node):
# This can be unsuccessful when there are not available predictors
current_node = tree.get_node(index_leaf_node)
available_predictors = self.get_available_predictors(current_node.idx_data_points)
if not available_predictors:
return False, None
index_selected_predictor = discrete_uniform_sampler(len(available_predictors))
selected_predictor = available_predictors[index_selected_predictor]
available_splitting_rules, _ = self.get_available_splitting_rules(
current_node.idx_data_points, selected_predictor
)
index_selected_splitting_rule = discrete_uniform_sampler(len(available_splitting_rules))
selected_splitting_rule = available_splitting_rules[index_selected_splitting_rule]
new_split_node = SplitNode(
index=index_leaf_node,
idx_split_variable=selected_predictor,
split_value=selected_splitting_rule,
)
left_node_idx_data_points, right_node_idx_data_points = self.get_new_idx_data_points(
new_split_node, current_node.idx_data_points
)
left_node_value = self.draw_leaf_value(left_node_idx_data_points)
right_node_value = self.draw_leaf_value(right_node_idx_data_points)
new_left_node = LeafNode(
index=current_node.get_idx_left_child(),
value=left_node_value,
idx_data_points=left_node_idx_data_points,
)
new_right_node = LeafNode(
index=current_node.get_idx_right_child(),
value=right_node_value,
idx_data_points=right_node_idx_data_points,
)
tree.grow_tree(index_leaf_node, new_split_node, new_left_node, new_right_node)
return True, index_selected_predictor
def get_new_idx_data_points(self, current_split_node, idx_data_points):
idx_split_variable = current_split_node.idx_split_variable
split_value = current_split_node.split_value
left_idx = self.X[idx_data_points, idx_split_variable] <= split_value
left_node_idx_data_points = idx_data_points[left_idx]
right_node_idx_data_points = idx_data_points[~left_idx]
return left_node_idx_data_points, right_node_idx_data_points
def get_residuals(self):
"""Compute the residuals."""
R_j = self.Y - self.sum_trees_output
return R_j
def get_residuals_loo(self, tree):
"""Compute the residuals without leaving the passed tree out."""
R_j = self.Y - (self.sum_trees_output - tree.predict_output(self.num_observations))
return R_j
def draw_leaf_value(self, idx_data_points):
""" Draw the residual mean."""
R_j = self.get_residuals()[idx_data_points]
draw = self.mean(R_j)
return draw
def compute_prior_probability(alpha):
"""
Calculate the probability of the node being a LeafNode (1 - p(being SplitNode)).
Taken from equation 19 in [Rockova2018].
Parameters
----------
alpha : float
Returns
-------
list with probabilities for leaf nodes
References
----------
.. [Rockova2018] Veronika Rockova, Enakshi Saha (2018). On the theory of BART.
arXiv, `link <https://arxiv.org/abs/1810.00787>`__
"""
prior_leaf_prob = [0]
depth = 1
while prior_leaf_prob[-1] < 1:
prior_leaf_prob.append(1 - alpha ** depth)
depth += 1
return prior_leaf_prob
def fast_mean():
"""If available use Numba to speed up the computation of the mean."""
try:
from numba import jit
except ImportError:
return np.mean
@jit
def mean(a):
count = a.shape[0]
suma = 0
for i in range(count):
suma += a[i]
return suma / count
return mean
def discrete_uniform_sampler(upper_value):
"""Draw from the uniform distribution with bounds [0, upper_value)."""
return int(np.random.random() * upper_value)
class BART(BaseBART):
"""
BART distribution.
Distribution representing a sum over trees
Parameters
----------
X :
The design matrix.
Y :
The response vector.
m : int
Number of trees
alpha : float
Control the prior probability over the depth of the trees. Must be in the interval (0, 1),
altought it is recomenned to be in the interval (0, 0.5].
"""
def __init__(self, X, Y, m=200, alpha=0.25):
super().__init__(X, Y, m, alpha)
def _str_repr(self, name=None, dist=None, formatting="plain"):
if dist is None:
dist = self
X = (type(self.X),)
Y = (type(self.Y),)
alpha = self.alpha
m = self.m
if "latex" in formatting:
return f"$\\text{{{name}}} \\sim \\text{{BART}}(\\text{{alpha = }}\\text{{{alpha}}}, \\text{{m = }}\\text{{{m}}})$"
else:
return f"{name} ~ BART(alpha = {alpha}, m = {m})"
|
nilq/baby-python
|
python
|
PANDA_MODELS = dict(
gt_joints='dream-panda-gt_joints--495831',
predict_joints='dream-panda-predict_joints--173472',
)
KUKA_MODELS = dict(
gt_joints='dream-kuka-gt_joints--192228',
predict_joints='dream-kuka-predict_joints--990681',
)
BAXTER_MODELS = dict(
gt_joints='dream-baxter-gt_joints--510055',
predict_joints='dream-baxter-predict_joints--519984',
)
OWI_MODELS = dict(
predict_joints='craves-owi535-predict_joints--295440',
)
PANDA_ABLATION_REFERENCE_POINT_MODELS = dict(
link0='dream-panda-gt_joints-reference_point=link0--864695',
link1='dream-panda-gt_joints-reference_point=link1--499756',
link2='dream-panda-gt_joints-reference_point=link2--905185',
link4='dream-panda-gt_joints-reference_point=link4--913645',
link5='dream-panda-gt_joints-reference_point=link5--669469',
link9='dream-panda-gt_joints-reference_point=hand--588677',
)
PANDA_ABLATION_ANCHOR_MODELS = dict(
link0='dream-panda-predict_joints-anchor=link0--90648',
link1='dream-panda-predict_joints-anchor=link1--375503',
link2='dream-panda-predict_joints-anchor=link2--463951',
link4='dream-panda-predict_joints-anchor=link4--388856',
link5='dream-panda-predict_joints-anchor=link5--249745',
link9='dream-panda-predict_joints-anchor=link9--106543',
random_all='dream-panda-predict_joints-anchor=random_all--116995',
random_top3='dream-panda-predict_joints-anchor=random_top_3_largest--65378',
random_top5=PANDA_MODELS['predict_joints'],
)
PANDA_ABLATION_ITERATION_MODELS = {
'n_train_iter=1': 'dream-panda-predict_joints-n_train_iter=1--752826',
'n_train_iter=2': 'dream-panda-predict_joints-n_train_iter=2--949003',
'n_train_iter=5': 'dream-panda-predict_joints-n_train_iter=5--315150',
}
RESULT_ID = 1804
DREAM_PAPER_RESULT_IDS = [
f'dream-{robot}-dream-all-models--{RESULT_ID}' for robot in ('panda', 'kuka', 'baxter')
]
DREAM_KNOWN_ANGLES_RESULT_IDS = [
f'dream-{robot}-knownq--{RESULT_ID}' for robot in ('panda', 'kuka', 'baxter')
]
DREAM_UNKNOWN_ANGLES_RESULT_IDS = [
f'dream-{robot}-unknownq--{RESULT_ID}' for robot in ('panda', 'kuka', 'baxter')
]
PANDA_KNOWN_ANGLES_ITERATIVE_RESULT_IDS = [
f'dream-panda-orb-knownq--{RESULT_ID}',
f'dream-panda-orb-knownq-online--{RESULT_ID}'
]
CRAVES_LAB_RESULT_IDS = [
f'craves-lab-unknownq--{RESULT_ID}'
]
CRAVES_YOUTUBE_RESULT_IDS = [
f'craves-youtube-unknownq-focal={focal}--{RESULT_ID}' for focal in (500, 750, 1000, 1250, 1500, 1750, 2000)
]
PANDA_KNOWN_ANGLES_ABLATION_RESULT_IDS = [
f'dream-panda-orb-knownq-link{link_id}--{RESULT_ID}' for link_id in (0, 1, 2, 4, 5, 9)
]
PANDA_UNKNOWN_ANGLES_ABLATION_RESULT_IDS = [
f'dream-panda-orb-unknownq-{anchor}--{RESULT_ID}'
for anchor in ('link5', 'link2', 'link1', 'link0', 'link4', 'link9', 'random_all', 'random_top5', 'random_top3')
]
PANDA_ITERATIONS_ABLATION_RESULT_IDS = [
f'dream-panda-orb-train_K={train_K}--{RESULT_ID}'
for train_K in (1, 2, 3, 5)
]
|
nilq/baby-python
|
python
|
from datetime import datetime
import psycopg2
from app.database_config import init_db
from app.api.v2.models.user_models import UsersModel
from app.api.v2.views.authentication import SignIn
def get_timestamp():
return datetime.now().strftime(("%Y-%m-%d %H:%M:%S"))
class IncidentsModel():
""" Docstring for my incidents model """
def __init__(self):
self.db = init_db()
self.status = "Draft"
self.createdOn = datetime.now().strftime(("%Y-%m-%d %H:%M:%S"))
""" save our data and appends to the database """
def save(self, comment, location, images, videos, createdBy, incidentType):
incident_data = {
"comment": comment,
"createdBy": createdBy,
"createdOn": self.createdOn,
"images": images,
"location": location,
"status": self.status,
"incidentType": incidentType,
"videos": videos
}
query = """INSERT INTO incidents (location, comment, createdBy,
status, createdOn, images, videos, incidentType) VALUES (
%(location)s, %(comment)s, %(createdBy)s, %(status)s,
%(createdOn)s, %(images)s, %(videos)s, %(incidentType)s)"""
currsor = self.db.cursor()
currsor.execute(query, incident_data)
self.db.commit()
return incident_data
"""get all the incidents """
def get_incidents(self):
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute("""SELECT incidents_id, incidentType, status, comment,
createdBy, createdOn, location, images, videos
FROM incidents""")
data = currsor.fetchall()
response = []
for key, records in enumerate(data):
incidents_id, incidentType, status, comment, createdBy, createdOn, location, images, videos = records
datar = dict(
incidents_id=int(incidents_id),
incidentType=incidentType,
status=status,
comment=comment,
createdBy=createdBy,
createdOn=createdOn,
location=location,
images=images,
videos=videos
)
response.append(datar)
return response
def delete_redflag(self, id):
""" To delete redflag and incident details """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(f"DELETE FROM incidents WHERE incidents_id = {id};")
db_connection.commit()
return "Incident record has been deleted"
def edit_redflags(self, incidents_id, createdBy):
""" Query to edit redflag details """
query = """ UPDATE incidents
SET createdBy = %s
WHERE incidents_id = %s"""
db_connection = self.db
currsor = db_connection.cursor()
if UsersModel().check_if_admin():
currsor.execute(query, (createdBy, incidents_id))
else:
currsor.execute(query, (createdBy, incidents_id))
db_connection.commit()
def get_incident_by_id(self, id):
""" Get redflag or interevention details by id"""
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(f"SELECT * FROM incidents WHERE incidents_id = {id};")
incident = currsor.fetchall()
response = []
for key, records in enumerate(incident):
incidents_id, incidentType, status, comment, createdBy, createdOn, location, images, videos = records
datar = dict(
incidents_id=int(incidents_id),
incidentType=incidentType,
status=status,
comment=comment,
createdBy=createdBy,
createdOn=createdOn,
location=location,
images=images,
videos=videos
)
response.append(datar)
return response
def update_location(self, location, incidents_id):
""" Query to update user location details """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
""" UPDATE Incidents
SET location = %s
WHERE incidents_id=%s""", (location, incidents_id)
)
db_connection.commit()
def update_comment(self, comment, incidents_id):
""" Query to update user comment details """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
""" UPDATE Incidents
SET comment = %s
WHERE incidents_id=%s""", (comment, incidents_id)
)
db_connection.commit()
def check_existing_comment(self, comment):
""" To check comment isn't the same """
user_connection = self.db
currsor = user_connection.cursor()
currsor.execute("""SELECT * FROM users WHERE comment=%s""", (comment, ))
comment = currsor.fetchone()
user_connection.commit()
if comment:
return True
False
def update_status(self, status, incidents_id):
""" Query for admin to update status details """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
""" UPDATE Incidents
SET status = %s
WHERE incidents_id=%s""", (status, incidents_id)
)
db_connection.commit()
def get_user_role(self, current_user):
""" Check if admin or not """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
"""SELECT * FROM Users WHERE username = %s;""", (current_user,)
)
admin_status = currsor.fetchall()[0][8]
return admin_status
def get_created_by(self, current_user):
""" Get who created """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
"""SELECT * FROM Users WHERE username = %s;""", (current_user,)
)
created_by = currsor.fetchall()[0][4]
return created_by
def get_email_update(self, id):
""" Get email after a certain update """
db_connection = self.db
currsor = db_connection.cursor()
currsor.execute(
"""SELECT createdBy FROM Incidents WHERE incidents_id = %s;""", (id,)
)
data = currsor.fetchone()
|
nilq/baby-python
|
python
|
from pathlib import Path
from classy_config import ConfigValue, register_loader
from classy_config.config import register_config
def _stub_loader(filepath: Path) -> dict:
output = {}
with filepath.open() as f:
for line in f.readlines():
key, value = line.split(">")
output[key] = value.strip()
return output
class TestRegisterAndUseLoader:
def test_register_loader(self):
register_loader(".test", _stub_loader)
def test_use_loader(self):
register_config("tests/configs/test-config.test", prefix="test-loader")
def test_loaded_values(self):
assert ConfigValue("test-loader.value0", int) == 0
assert ConfigValue("test-loader.value1", int) == 1
assert ConfigValue("test-loader.value2", int) == 2
assert ConfigValue("test-loader.value3", int) == 3
|
nilq/baby-python
|
python
|
from veem.configuration import ConfigLoader
from veem.client.payment import PaymentClient
from veem.client.requests.payment import PaymentRequest
from veem.client.authentication import AuthenticationClient
if __name__ == '__main__':
# loading SDK configuration from your yaml file
config = ConfigLoader(yaml_file='/path/to/your/configuration.yaml')
# login to Veem server with client credentials
AuthenticationClient(config).getTokenFromClientCredentials()
# define an PaymentRequest
request = PaymentRequest(payee=dict(type='Business',
email='username@yourbusiness.com',
firstName='Joe',
lastName='Doe',
businessName='Your Business Inc.',
countryCode='US',
phoneCountryCode='1',
phone='02222222222'),
amount=dict(number=50, currency='USD'))
# create a Draft payment
payment = PaymentClient(config).create(request)
# send the Drafted payment
payment = PaymentClient(config).send(payment.id)
|
nilq/baby-python
|
python
|
# pylint: disable=missing-module-docstring
from setuptools import setup
# The install configuration lies in setup.cfg
setup()
|
nilq/baby-python
|
python
|
"""
===========================================
Comparison of F-test and mutual information
===========================================
This example illustrates the differences between univariate F-test statistics
and mutual information.
We consider 3 features x_1, x_2, x_3 distributed uniformly over [0, 1], the
target depends on them as follows:
y = x_1 + sin(6 * pi * x_2) + 0.1 * N(0, 1), that is the third features is completely irrelevant.
The code below plots the dependency of y against individual x_i and normalized
values of univariate F-tests statistics and mutual information.
As F-test captures only linear dependency, it rates x_1 as the most
discriminative feature. On the other hand, mutual information can capture any
kind of dependency between variables and it rates x_2 as the most
discriminative feature, which probably agrees better with our intuitive
perception for this example. Both methods correctly marks x_3 as irrelevant.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_selection import f_regression, mutual_info_regression
np.random.seed(0)
X = np.random.rand(1000, 3)
y = X[:, 0] + np.sin(6 * np.pi * X[:, 1]) + 0.1 * np.random.randn(1000)
f_test, _ = f_regression(X, y)
f_test /= np.max(f_test)
mi = mutual_info_regression(X, y)
mi /= np.max(mi)
plt.figure(figsize=(15, 5))
for i in range(3):
plt.subplot(1, 3, i + 1)
plt.scatter(X[:, i], y)
plt.xlabel("$x_{}$".format(i + 1), fontsize=14)
if i == 0:
plt.ylabel("$y$", fontsize=14)
plt.title("F-test={:.2f}, MI={:.2f}".format(f_test[i], mi[i]),
fontsize=16)
plt.show()
|
nilq/baby-python
|
python
|
from .projects_api import ProjectsApi
from .timer_api import TimerApi
from .workspaces_api import WorkspacesApi
from enum import Enum
class TopLevelApis(Enum):
""" Represents all the top level Apis that can be accessed """
projects = ProjectsApi
timer = TimerApi
workspaces = WorkspacesApi
|
nilq/baby-python
|
python
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ReferenceInputMssqlArgs', 'ReferenceInputMssql']
@pulumi.input_type
class ReferenceInputMssqlArgs:
def __init__(__self__, *,
database: pulumi.Input[str],
full_snapshot_query: pulumi.Input[str],
password: pulumi.Input[str],
refresh_type: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
server: pulumi.Input[str],
stream_analytics_job_name: pulumi.Input[str],
username: pulumi.Input[str],
delta_snapshot_query: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_interval_duration: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ReferenceInputMssql resource.
:param pulumi.Input[str] database: The MS SQL database name where the reference data exists.
:param pulumi.Input[str] full_snapshot_query: The query used to retrieve the reference data from the MS SQL database.
:param pulumi.Input[str] password: The username to connect to the MS SQL database.
:param pulumi.Input[str] refresh_type: Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully qualified domain name of the MS SQL server.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The username to connect to the MS SQL database.
:param pulumi.Input[str] delta_snapshot_query: The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
:param pulumi.Input[str] name: The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
:param pulumi.Input[str] refresh_interval_duration: The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
"""
pulumi.set(__self__, "database", database)
pulumi.set(__self__, "full_snapshot_query", full_snapshot_query)
pulumi.set(__self__, "password", password)
pulumi.set(__self__, "refresh_type", refresh_type)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server", server)
pulumi.set(__self__, "stream_analytics_job_name", stream_analytics_job_name)
pulumi.set(__self__, "username", username)
if delta_snapshot_query is not None:
pulumi.set(__self__, "delta_snapshot_query", delta_snapshot_query)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_interval_duration is not None:
pulumi.set(__self__, "refresh_interval_duration", refresh_interval_duration)
@property
@pulumi.getter
def database(self) -> pulumi.Input[str]:
"""
The MS SQL database name where the reference data exists.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: pulumi.Input[str]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="fullSnapshotQuery")
def full_snapshot_query(self) -> pulumi.Input[str]:
"""
The query used to retrieve the reference data from the MS SQL database.
"""
return pulumi.get(self, "full_snapshot_query")
@full_snapshot_query.setter
def full_snapshot_query(self, value: pulumi.Input[str]):
pulumi.set(self, "full_snapshot_query", value)
@property
@pulumi.getter
def password(self) -> pulumi.Input[str]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: pulumi.Input[str]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="refreshType")
def refresh_type(self) -> pulumi.Input[str]:
"""
Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_type")
@refresh_type.setter
def refresh_type(self, value: pulumi.Input[str]):
pulumi.set(self, "refresh_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def server(self) -> pulumi.Input[str]:
"""
The fully qualified domain name of the MS SQL server.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: pulumi.Input[str]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="streamAnalyticsJobName")
def stream_analytics_job_name(self) -> pulumi.Input[str]:
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "stream_analytics_job_name")
@stream_analytics_job_name.setter
def stream_analytics_job_name(self, value: pulumi.Input[str]):
pulumi.set(self, "stream_analytics_job_name", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@property
@pulumi.getter(name="deltaSnapshotQuery")
def delta_snapshot_query(self) -> Optional[pulumi.Input[str]]:
"""
The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
"""
return pulumi.get(self, "delta_snapshot_query")
@delta_snapshot_query.setter
def delta_snapshot_query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delta_snapshot_query", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshIntervalDuration")
def refresh_interval_duration(self) -> Optional[pulumi.Input[str]]:
"""
The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_interval_duration")
@refresh_interval_duration.setter
def refresh_interval_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "refresh_interval_duration", value)
@pulumi.input_type
class _ReferenceInputMssqlState:
def __init__(__self__, *,
database: Optional[pulumi.Input[str]] = None,
delta_snapshot_query: Optional[pulumi.Input[str]] = None,
full_snapshot_query: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
refresh_interval_duration: Optional[pulumi.Input[str]] = None,
refresh_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering ReferenceInputMssql resources.
:param pulumi.Input[str] database: The MS SQL database name where the reference data exists.
:param pulumi.Input[str] delta_snapshot_query: The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
:param pulumi.Input[str] full_snapshot_query: The query used to retrieve the reference data from the MS SQL database.
:param pulumi.Input[str] name: The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The username to connect to the MS SQL database.
:param pulumi.Input[str] refresh_interval_duration: The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] refresh_type: Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully qualified domain name of the MS SQL server.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The username to connect to the MS SQL database.
"""
if database is not None:
pulumi.set(__self__, "database", database)
if delta_snapshot_query is not None:
pulumi.set(__self__, "delta_snapshot_query", delta_snapshot_query)
if full_snapshot_query is not None:
pulumi.set(__self__, "full_snapshot_query", full_snapshot_query)
if name is not None:
pulumi.set(__self__, "name", name)
if password is not None:
pulumi.set(__self__, "password", password)
if refresh_interval_duration is not None:
pulumi.set(__self__, "refresh_interval_duration", refresh_interval_duration)
if refresh_type is not None:
pulumi.set(__self__, "refresh_type", refresh_type)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if server is not None:
pulumi.set(__self__, "server", server)
if stream_analytics_job_name is not None:
pulumi.set(__self__, "stream_analytics_job_name", stream_analytics_job_name)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def database(self) -> Optional[pulumi.Input[str]]:
"""
The MS SQL database name where the reference data exists.
"""
return pulumi.get(self, "database")
@database.setter
def database(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "database", value)
@property
@pulumi.getter(name="deltaSnapshotQuery")
def delta_snapshot_query(self) -> Optional[pulumi.Input[str]]:
"""
The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
"""
return pulumi.get(self, "delta_snapshot_query")
@delta_snapshot_query.setter
def delta_snapshot_query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "delta_snapshot_query", value)
@property
@pulumi.getter(name="fullSnapshotQuery")
def full_snapshot_query(self) -> Optional[pulumi.Input[str]]:
"""
The query used to retrieve the reference data from the MS SQL database.
"""
return pulumi.get(self, "full_snapshot_query")
@full_snapshot_query.setter
def full_snapshot_query(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "full_snapshot_query", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def password(self) -> Optional[pulumi.Input[str]]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "password")
@password.setter
def password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "password", value)
@property
@pulumi.getter(name="refreshIntervalDuration")
def refresh_interval_duration(self) -> Optional[pulumi.Input[str]]:
"""
The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_interval_duration")
@refresh_interval_duration.setter
def refresh_interval_duration(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "refresh_interval_duration", value)
@property
@pulumi.getter(name="refreshType")
def refresh_type(self) -> Optional[pulumi.Input[str]]:
"""
Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_type")
@refresh_type.setter
def refresh_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "refresh_type", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def server(self) -> Optional[pulumi.Input[str]]:
"""
The fully qualified domain name of the MS SQL server.
"""
return pulumi.get(self, "server")
@server.setter
def server(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "server", value)
@property
@pulumi.getter(name="streamAnalyticsJobName")
def stream_analytics_job_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "stream_analytics_job_name")
@stream_analytics_job_name.setter
def stream_analytics_job_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stream_analytics_job_name", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class ReferenceInputMssql(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database: Optional[pulumi.Input[str]] = None,
delta_snapshot_query: Optional[pulumi.Input[str]] = None,
full_snapshot_query: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
refresh_interval_duration: Optional[pulumi.Input[str]] = None,
refresh_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages a Stream Analytics Reference Input from MS SQL. Reference data (also known as a lookup table) is a finite data set that is static or slowly changing in nature, used to perform a lookup or to correlate with your data stream. Learn more [here](https://docs.microsoft.com/en-us/azure/stream-analytics/stream-analytics-use-reference-data#azure-sql-database).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.get_resource_group(name="example-resources")
example_job = azure.streamanalytics.get_job(name="example-job",
resource_group_name=azurerm_resource_group["example"]["name"])
example_server = azure.mssql.Server("exampleServer",
resource_group_name=azurerm_resource_group["example"]["name"],
location=azurerm_resource_group["example"]["location"],
version="12.0",
administrator_login="admin",
administrator_login_password="password")
example_database = azure.mssql.Database("exampleDatabase", server_id=example_server.id)
example_reference_input_mssql = azure.streamanalytics.ReferenceInputMssql("exampleReferenceInputMssql",
resource_group_name=azurerm_stream_analytics_job["example"]["resource_group_name"],
stream_analytics_job_name=azurerm_stream_analytics_job["example"]["name"],
server=example_server.fully_qualified_domain_name,
database=example_database.name,
username="exampleuser",
password="examplepassword",
refresh_type="RefreshPeriodicallyWithFull",
refresh_interval_duration="00:20:00",
full_snapshot_query=\"\"\" SELECT *
INTO [YourOutputAlias]
FROM [YourInputAlias]
\"\"\")
```
## Import
Stream Analytics can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:streamanalytics/referenceInputMssql:ReferenceInputMssql example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.StreamAnalytics/streamingjobs/job1/inputs/input1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] database: The MS SQL database name where the reference data exists.
:param pulumi.Input[str] delta_snapshot_query: The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
:param pulumi.Input[str] full_snapshot_query: The query used to retrieve the reference data from the MS SQL database.
:param pulumi.Input[str] name: The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The username to connect to the MS SQL database.
:param pulumi.Input[str] refresh_interval_duration: The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] refresh_type: Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully qualified domain name of the MS SQL server.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The username to connect to the MS SQL database.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ReferenceInputMssqlArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages a Stream Analytics Reference Input from MS SQL. Reference data (also known as a lookup table) is a finite data set that is static or slowly changing in nature, used to perform a lookup or to correlate with your data stream. Learn more [here](https://docs.microsoft.com/en-us/azure/stream-analytics/stream-analytics-use-reference-data#azure-sql-database).
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.get_resource_group(name="example-resources")
example_job = azure.streamanalytics.get_job(name="example-job",
resource_group_name=azurerm_resource_group["example"]["name"])
example_server = azure.mssql.Server("exampleServer",
resource_group_name=azurerm_resource_group["example"]["name"],
location=azurerm_resource_group["example"]["location"],
version="12.0",
administrator_login="admin",
administrator_login_password="password")
example_database = azure.mssql.Database("exampleDatabase", server_id=example_server.id)
example_reference_input_mssql = azure.streamanalytics.ReferenceInputMssql("exampleReferenceInputMssql",
resource_group_name=azurerm_stream_analytics_job["example"]["resource_group_name"],
stream_analytics_job_name=azurerm_stream_analytics_job["example"]["name"],
server=example_server.fully_qualified_domain_name,
database=example_database.name,
username="exampleuser",
password="examplepassword",
refresh_type="RefreshPeriodicallyWithFull",
refresh_interval_duration="00:20:00",
full_snapshot_query=\"\"\" SELECT *
INTO [YourOutputAlias]
FROM [YourInputAlias]
\"\"\")
```
## Import
Stream Analytics can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:streamanalytics/referenceInputMssql:ReferenceInputMssql example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.StreamAnalytics/streamingjobs/job1/inputs/input1
```
:param str resource_name: The name of the resource.
:param ReferenceInputMssqlArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ReferenceInputMssqlArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
database: Optional[pulumi.Input[str]] = None,
delta_snapshot_query: Optional[pulumi.Input[str]] = None,
full_snapshot_query: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
refresh_interval_duration: Optional[pulumi.Input[str]] = None,
refresh_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ReferenceInputMssqlArgs.__new__(ReferenceInputMssqlArgs)
if database is None and not opts.urn:
raise TypeError("Missing required property 'database'")
__props__.__dict__["database"] = database
__props__.__dict__["delta_snapshot_query"] = delta_snapshot_query
if full_snapshot_query is None and not opts.urn:
raise TypeError("Missing required property 'full_snapshot_query'")
__props__.__dict__["full_snapshot_query"] = full_snapshot_query
__props__.__dict__["name"] = name
if password is None and not opts.urn:
raise TypeError("Missing required property 'password'")
__props__.__dict__["password"] = password
__props__.__dict__["refresh_interval_duration"] = refresh_interval_duration
if refresh_type is None and not opts.urn:
raise TypeError("Missing required property 'refresh_type'")
__props__.__dict__["refresh_type"] = refresh_type
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if server is None and not opts.urn:
raise TypeError("Missing required property 'server'")
__props__.__dict__["server"] = server
if stream_analytics_job_name is None and not opts.urn:
raise TypeError("Missing required property 'stream_analytics_job_name'")
__props__.__dict__["stream_analytics_job_name"] = stream_analytics_job_name
if username is None and not opts.urn:
raise TypeError("Missing required property 'username'")
__props__.__dict__["username"] = username
super(ReferenceInputMssql, __self__).__init__(
'azure:streamanalytics/referenceInputMssql:ReferenceInputMssql',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
database: Optional[pulumi.Input[str]] = None,
delta_snapshot_query: Optional[pulumi.Input[str]] = None,
full_snapshot_query: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
password: Optional[pulumi.Input[str]] = None,
refresh_interval_duration: Optional[pulumi.Input[str]] = None,
refresh_type: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
server: Optional[pulumi.Input[str]] = None,
stream_analytics_job_name: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'ReferenceInputMssql':
"""
Get an existing ReferenceInputMssql resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] database: The MS SQL database name where the reference data exists.
:param pulumi.Input[str] delta_snapshot_query: The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
:param pulumi.Input[str] full_snapshot_query: The query used to retrieve the reference data from the MS SQL database.
:param pulumi.Input[str] name: The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
:param pulumi.Input[str] password: The username to connect to the MS SQL database.
:param pulumi.Input[str] refresh_interval_duration: The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] refresh_type: Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
:param pulumi.Input[str] server: The fully qualified domain name of the MS SQL server.
:param pulumi.Input[str] stream_analytics_job_name: The name of the Stream Analytics Job. Changing this forces a new resource to be created.
:param pulumi.Input[str] username: The username to connect to the MS SQL database.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ReferenceInputMssqlState.__new__(_ReferenceInputMssqlState)
__props__.__dict__["database"] = database
__props__.__dict__["delta_snapshot_query"] = delta_snapshot_query
__props__.__dict__["full_snapshot_query"] = full_snapshot_query
__props__.__dict__["name"] = name
__props__.__dict__["password"] = password
__props__.__dict__["refresh_interval_duration"] = refresh_interval_duration
__props__.__dict__["refresh_type"] = refresh_type
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["server"] = server
__props__.__dict__["stream_analytics_job_name"] = stream_analytics_job_name
__props__.__dict__["username"] = username
return ReferenceInputMssql(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def database(self) -> pulumi.Output[str]:
"""
The MS SQL database name where the reference data exists.
"""
return pulumi.get(self, "database")
@property
@pulumi.getter(name="deltaSnapshotQuery")
def delta_snapshot_query(self) -> pulumi.Output[Optional[str]]:
"""
The query used to retrieve incremental changes in the reference data from the MS SQL database. Cannot be set when `refresh_type` is `Static`.
"""
return pulumi.get(self, "delta_snapshot_query")
@property
@pulumi.getter(name="fullSnapshotQuery")
def full_snapshot_query(self) -> pulumi.Output[str]:
"""
The query used to retrieve the reference data from the MS SQL database.
"""
return pulumi.get(self, "full_snapshot_query")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the Reference Input MS SQL data. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def password(self) -> pulumi.Output[str]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "password")
@property
@pulumi.getter(name="refreshIntervalDuration")
def refresh_interval_duration(self) -> pulumi.Output[Optional[str]]:
"""
The frequency in `hh:mm:ss` with which the reference data should be retrieved from the MS SQL database e.g. `00:20:00` for every 20 minutes. Must be set when `refresh_type` is `RefreshPeriodicallyWithFull` or `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_interval_duration")
@property
@pulumi.getter(name="refreshType")
def refresh_type(self) -> pulumi.Output[str]:
"""
Defines whether and how the reference data should be refreshed. Accepted values are `Static`, `RefreshPeriodicallyWithFull` and `RefreshPeriodicallyWithDelta`.
"""
return pulumi.get(self, "refresh_type")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Stream Analytics Job should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def server(self) -> pulumi.Output[str]:
"""
The fully qualified domain name of the MS SQL server.
"""
return pulumi.get(self, "server")
@property
@pulumi.getter(name="streamAnalyticsJobName")
def stream_analytics_job_name(self) -> pulumi.Output[str]:
"""
The name of the Stream Analytics Job. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "stream_analytics_job_name")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
"""
The username to connect to the MS SQL database.
"""
return pulumi.get(self, "username")
|
nilq/baby-python
|
python
|
import unittest
from threading import Lock
from dummyserver.server import (
TornadoServerThread, SocketServerThread,
DEFAULT_CERTS,
)
# TODO: Change ports to auto-allocated?
class SocketDummyServerTestCase(unittest.TestCase):
"""
A simple socket-based server is created for this class that is good for
exactly one request.
"""
scheme = 'http'
host = 'localhost'
port = 18080
@classmethod
def _start_server(cls, socket_handler):
ready_lock = Lock()
ready_lock.acquire()
cls.server_thread = SocketServerThread(socket_handler=socket_handler,
ready_lock=ready_lock,
host=cls.host, port=cls.port)
cls.server_thread.start()
# Lock gets released by thread above
ready_lock.acquire()
class HTTPDummyServerTestCase(unittest.TestCase):
scheme = 'http'
host = 'localhost'
host_alt = '127.0.0.1' # Some tests need two hosts
port = 18081
certs = DEFAULT_CERTS
@classmethod
def _start_server(cls):
cls.server_thread = TornadoServerThread(host=cls.host, port=cls.port,
scheme=cls.scheme,
certs=cls.certs)
cls.server_thread.start()
# TODO: Loop-check here instead
import time
time.sleep(0.1)
@classmethod
def _stop_server(cls):
cls.server_thread.stop()
@classmethod
def setUpClass(cls):
cls._start_server()
@classmethod
def tearDownClass(cls):
cls._stop_server()
class HTTPSDummyServerTestCase(HTTPDummyServerTestCase):
scheme = 'https'
host = 'localhost'
port = 18082
certs = DEFAULT_CERTS
|
nilq/baby-python
|
python
|
# Copyright 2018 Northwest University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import h5py
def feature_model_choose(data, each_person_sample):
# input :-data: recognition results of the expert selector training sets
# -each_person_sample: The number of samples per user
# output: choose_sample: the correctly classified samples and its corresponding label of experts
m=data.shape[0]
person_num=int(m/each_person_sample)
label = []
for k in range(person_num):
label1 = []
for j in range(each_person_sample):
label1.append(k+1)
row_vec = np.array(label1)
dataLabels = np.array([row_vec]).T
dataLabels = np.array(dataLabels)
if k == 0:
label = dataLabels
else:
label = np.vstack((label, dataLabels))
choose_model_sample = []
choose_sample_row = []
for k in range(m):
if int(data[k]) == label[k,0]:
choose_model_sample.append(1)
choose_sample_row.append(k)
else:
choose_model_sample.append(0)
row_vec1 = np.array(choose_sample_row)
choose_sample = np.array([row_vec1]).T
return np.array(choose_sample)
def main():
# input : -data: recognition results of the expert selector training sets
# output: -feature_model_sample: the correctly classified samples
# -feature_model_label: The label of experts
# feature_name: Four kinds of wireless signal features used
feature_name = ["Stat", "Comp","Spec","Tran"]
# model_name: Six classification techniques used
model_name = ["NB", "RF", "SVM", "LinearSVM","KNN","Adaboost"]
# pathname: modified according to your path
pathname = "./scene1/"
# each_person_sample: The number of samples per user in training sets
each_person_sample=10
# feature_model_num: Record the number of experts
feature_model_num=0
feature_model_sample = []
for i in range(len(feature_name)):
for j in range(len(model_name)):
f_feature = feature_name[i]
m_model = model_name[j]
# the accuracy is tested on expert selector training sets, it contains the predicted labels
filename = pathname + "accuracy_" + m_model + "_" + f_feature + ".mat"
print(filename)
f = h5py.File(filename)
predict = np.transpose(f['predict'])
data = predict[:,0]
data = np.array(data)
choose_sample = feature_model_choose(data, each_person_sample)
feature_model_sample.append(choose_sample)
print(feature_model_num)
feature_model_num = feature_model_num + 1
feature_model_label = []
for i in range(feature_model_num):
feature_model_label.append(i+1)
print(feature_model_sample[i].shape)
print(feature_model_label[i])
np.save('feature_model_sample.npy',feature_model_sample)
np.save('feature_model_label.npy', feature_model_label)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import logging
from rest_framework import decorators, permissions, status
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project, ProjectRelationship
from readthedocs.search.lib import search_file, search_project, search_section
from readthedocs.restapi import utils
log = logging.getLogger(__name__)
@decorators.api_view(['POST'])
@decorators.permission_classes((permissions.IsAdminUser,))
@decorators.renderer_classes((JSONRenderer,))
def index_search(request):
"""Add things to the search index"""
data = request.DATA['data']
version_pk = data['version_pk']
commit = data.get('commit')
version = Version.objects.get(pk=version_pk)
project_scale = 1
page_scale = 1
utils.index_search_request(
version=version, page_list=data['page_list'], commit=commit,
project_scale=project_scale, page_scale=page_scale)
return Response({'indexed': True})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def search(request):
"""Perform search, supplement links by resolving project domains"""
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
query = request.GET.get('q', None)
if project_slug is None or query is None:
return Response({'error': 'Need project and q'},
status=status.HTTP_400_BAD_REQUEST)
try:
project = Project.objects.get(slug=project_slug)
except Project.DoesNotExist:
return Response({'error': 'Project not found'},
status=status.HTTP_404_NOT_FOUND)
log.debug("(API Search) %s", query)
results = search_file(request=request, project_slug=project_slug,
version_slug=version_slug, query=query)
# Supplement result paths with domain information on project
hits = results.get('hits', {}).get('hits', [])
for (n, hit) in enumerate(hits):
fields = hit.get('fields', {})
search_project = fields.get('project')[0]
search_version = fields.get('version')[0]
path = fields.get('path')[0]
canonical_url = project.get_docs_url(version_slug=version_slug)
if search_project != project_slug:
try:
subproject = project.subprojects.get(child__slug=search_project)
canonical_url = subproject.child.get_docs_url(
version_slug=search_version
)
except ProjectRelationship.DoesNotExist:
pass
results['hits']['hits'][n]['fields']['link'] = (
canonical_url + path
)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def project_search(request):
query = request.GET.get('q', None)
if query is None:
return Response({'error': 'Need project and q'}, status=status.HTTP_400_BAD_REQUEST)
log.debug("(API Project Search) %s", (query))
results = search_project(request=request, query=query)
return Response({'results': results})
@decorators.api_view(['GET'])
@decorators.permission_classes((permissions.AllowAny,))
@decorators.renderer_classes((JSONRenderer,))
def section_search(request):
"""Section search
Queries with query ``q`` across all documents and projects. Queries can be
limited to a single project or version by using the ``project`` and
``version`` GET arguments in your request.
When you search, you will have a ``project`` facet, which includes the
number of matching sections per project. When you search inside a project,
the ``path`` facet will show the number of matching sections per page.
Possible GET args
-----------------
q **(required)**
The query string **Required**
project
A project slug
version
A version slug
path
A file path slug
Example::
GET /api/v2/search/section/?q=virtualenv&project=django
"""
query = request.GET.get('q', None)
if not query:
return Response(
{'error': 'Search term required. Use the "q" GET arg to search. '},
status=status.HTTP_400_BAD_REQUEST)
project_slug = request.GET.get('project', None)
version_slug = request.GET.get('version', LATEST)
path = request.GET.get('path', None)
log.debug("(API Section Search) [%s:%s] %s", project_slug, version_slug,
query)
results = search_section(
request=request,
query=query,
project_slug=project_slug,
version_slug=version_slug,
path=path,
)
return Response({'results': results})
|
nilq/baby-python
|
python
|
import luigi
from exaslct_src.lib.build_config import build_config
from exaslct_src.lib.stoppable_task import StoppableTask
# This task is needed because ExportContainerTask and SpawnTestContainer
# requires the releases directory which stores the exported container.
# However, we wanted to avoid that SpawnTestContainer depends on ExportContainerTask,
# because ExportContainerTask has a high runtime and SpawnTestContainer is port of SpawnTestEnvironment
# which has a long runtime, too.
class CreateExportDirectory(StoppableTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def output(self):
self.directory = "%s/exports/" % build_config().output_directory
release_directory = luigi.LocalTarget(self.directory + ".created")
return release_directory
def run(self):
with self.output().open("w") as f:
f.write(self.directory)
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import random
def compare(*args, width=3, height=3, dataset=None) -> None:
"""
Used to compare matplotlib images to eachother.
Args:
*args: All the images given to the function
width: Used to tell the maximum images you want on one row.
dataset(x,y): If the user wants to compare their image
to a bunch of dataset images they can feed in the whole
dataset and the remaing image spot will be filled.
Return:
Subplot of all the images.
Example:
>>> compare(image1,image2,image3)
Returns subplot of image1,image2,image3
>>> compare(image, dataset = dataset)
Returns subplot of users image and the rest fill in
from the datasets images.
"""
# Check if all the images are the same size.
# If not raise - Images are not in the same format or size.
# Maybe this should also be a model function to add simplicity to the function.
# This is because each one changes its requirements based on the type of the image.
args_count = len(args)
for i in range(args_count):
# define subplot
plt.subplot(width * 110 + 1 + i)
# plot raw pixel data
plt.imshow(args[i].reshape([28, 28]), cmap=plt.get_cmap('gray'))
if dataset:
for i in range(args_count, width * height):
# define subplot
plt.subplot(width * 110 + 1 + i)
# plot raw pixel data
plt.imshow(random.choice(dataset[0]).reshape([28, 28]), cmap=plt.get_cmap('gray'))
# show the figure
plt.show()
|
nilq/baby-python
|
python
|
import pytest
import numpy.testing as npt
from xbout.load import _auto_open_mfboutdataset
class TestAccuracyAgainstOldCollect:
@pytest.mark.skip
def test_single_file(self):
from boutdata import collect
var = 'n'
expected = collect(var, path='./tests/data/dump_files/single',
prefix='equilibrium', xguards=False)
ds, metadata = _auto_open_mfboutdataset('./tests/data/dump_files/single/equilibrium.nc')
print(ds)
actual = ds[var].values
assert expected.shape == actual.shape
npt.assert_equal(actual, expected)
@pytest.mark.skip
def test_multiple_files_along_x(self):
from boutdata import collect
var = 'n'
expected = collect(var, path='./tests/data/dump_files/',
prefix='BOUT.dmp', xguards=False)
ds, metadata = _auto_open_mfboutdataset('./tests/data/dump_files/BOUT.dmp.*.nc')
actual = ds[var].values
assert expected.shape == actual.shape
npt.assert_equal(actual, expected)
@pytest.mark.skip
def test_multiple_files_along_x(self):
...
@pytest.mark.skip
def test_metadata(self):
...
@pytest.mark.skip
class test_speed_against_old_collect:
...
|
nilq/baby-python
|
python
|
import numpy as np
import logging
logger = logging.getLogger(__name__)
def create_model(args, initial_mean_value, overal_maxlen, vocab):
import keras.backend as K
from keras.layers.embeddings import Embedding
from keras.models import Sequential, Model
from keras.layers.core import Dense, Dropout, Activation
from nea.my_layers import Attention, MeanOverTime, Conv1DWithMasking
###############################################################################################################################
## Recurrence unit type
#
if args.recurrent_unit == 'lstm':
from keras.layers.recurrent import LSTM as RNN
elif args.recurrent_unit == 'gru':
from keras.layers.recurrent import GRU as RNN
elif args.recurrent_unit == 'simple':
from keras.layers.recurrent import SimpleRNN as RNN
###############################################################################################################################
## Create Model
#
dropout_W = 0.5 # default=0.5
dropout_U = 0.1 # default=0.1
cnn_border_mode='same'
if initial_mean_value.ndim == 0:
initial_mean_value = np.expand_dims(initial_mean_value, axis=1)
num_outputs = len(initial_mean_value)
if args.model_type == 'cls':
raise NotImplementedError
elif args.model_type == 'reg':
logger.info('Building a REGRESSION model')
model = Sequential()
model.add(Embedding(args.vocab_size, args.emb_dim, mask_zero=True))
if args.cnn_dim > 0:
model.add(Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1))
if args.rnn_dim > 0:
model.add(RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U))
if args.dropout_prob > 0:
model.add(Dropout(args.dropout_prob))
model.add(Dense(num_outputs))
if not args.skip_init_bias:
bias_value = (np.log(initial_mean_value) - np.log(1 - initial_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
model.add(Activation('sigmoid'))
model.emb_index = 0
elif args.model_type == 'regp':
logger.info('Building a REGRESSION model with POOLING')
model = Sequential()
model.add(Embedding(args.vocab_size, args.emb_dim, mask_zero=True))
if args.cnn_dim > 0:
model.add(Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1))
if args.rnn_dim > 0:
model.add(RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U))
if args.dropout_prob > 0:
model.add(Dropout(args.dropout_prob))
if args.aggregation == 'mot':
model.add(MeanOverTime(mask_zero=True))
elif args.aggregation.startswith('att'):
model.add(Attention(op=args.aggregation, activation='tanh', init_stdev=0.01))
model.add(Dense(num_outputs))
if not args.skip_init_bias:
bias_value = (np.log(initial_mean_value) - np.log(1 - initial_mean_value)).astype(K.floatx())
model.layers[-1].b.set_value(bias_value)
model.add(Activation('sigmoid'))
model.emb_index = 0
elif args.model_type == 'breg':
logger.info('Building a BIDIRECTIONAL REGRESSION model')
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, merge
model = Sequential()
sequence = Input(shape=(overal_maxlen,), dtype='int32')
output = Embedding(args.vocab_size, args.emb_dim, mask_zero=True)(sequence)
if args.cnn_dim > 0:
output = Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1)(output)
if args.rnn_dim > 0:
forwards = RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U)(output)
backwards = RNN(args.rnn_dim, return_sequences=False, dropout_W=dropout_W, dropout_U=dropout_U, go_backwards=True)(output)
if args.dropout_prob > 0:
forwards = Dropout(args.dropout_prob)(forwards)
backwards = Dropout(args.dropout_prob)(backwards)
merged = merge([forwards, backwards], mode='concat', concat_axis=-1)
densed = Dense(num_outputs)(merged)
if not args.skip_init_bias:
raise NotImplementedError
score = Activation('sigmoid')(densed)
model = Model(input=sequence, output=score)
model.emb_index = 1
elif args.model_type == 'bregp':
logger.info('Building a BIDIRECTIONAL REGRESSION model with POOLING')
from keras.layers import Dense, Dropout, Embedding, LSTM, Input, merge
model = Sequential()
sequence = Input(shape=(overal_maxlen,), dtype='int32')
output = Embedding(args.vocab_size, args.emb_dim, mask_zero=True)(sequence)
if args.cnn_dim > 0:
output = Conv1DWithMasking(nb_filter=args.cnn_dim, filter_length=args.cnn_window_size, border_mode=cnn_border_mode, subsample_length=1)(output)
if args.rnn_dim > 0:
forwards = RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U)(output)
backwards = RNN(args.rnn_dim, return_sequences=True, dropout_W=dropout_W, dropout_U=dropout_U, go_backwards=True)(output)
if args.dropout_prob > 0:
forwards = Dropout(args.dropout_prob)(forwards)
backwards = Dropout(args.dropout_prob)(backwards)
forwards_mean = MeanOverTime(mask_zero=True)(forwards)
backwards_mean = MeanOverTime(mask_zero=True)(backwards)
merged = merge([forwards_mean, backwards_mean], mode='concat', concat_axis=-1)
densed = Dense(num_outputs)(merged)
if not args.skip_init_bias:
raise NotImplementedError
score = Activation('sigmoid')(densed)
model = Model(input=sequence, output=score)
model.emb_index = 1
logger.info(' Done')
###############################################################################################################################
## Initialize embeddings if requested
#
if args.emb_path:
from w2vEmbReader import W2VEmbReader as EmbReader
logger.info('Initializing lookup table')
emb_reader = EmbReader(args.emb_path, emb_dim=args.emb_dim)
model.layers[model.emb_index].W.set_value(emb_reader.get_emb_matrix_given_vocab(vocab, model.layers[model.emb_index].W.get_value()))
logger.info(' Done')
return model
|
nilq/baby-python
|
python
|
from plugins import * # Importing all the plugins from plugins/ folder
from settings_base import BaseSettings # Importing base settings
class BotSettings(BaseSettings):
# See README.md for details!
USERS = (
("user", "ТУТ ТОКЕН ПОЛЬЗОВАТЕЛЯ",),
)
# Default settings for plugins
DEFAULTS["PREFIXES"] = DEFAULT_PREFIXES = ("/",)
DEFAULTS["ADMINS"] = DEFAULT_ADMINS = (87641997, )
# You can setup plugins any way you like. See plugins's classes and README.md.
# All available plugins can be found in folder `plugins` or in file `PLUGINS.md`.
# Bot will use all plugins inside PLUGINS variable.
help_plugin = HelpPlugin("помощь", "команды", "?", prefixes=DEFAULT_PREFIXES)
# List of active plugins
PLUGINS = (
StoragePlugin(in_memory=True, save_to_file=True),
StaffControlPlugin(prefixes=DEFAULT_PREFIXES, admins=DEFAULT_ADMINS, set_admins=True),
ChatMetaPlugin(),
UserMetaPlugin(),
StatisticsPlugin(),
VoterPlugin(prefixes=DEFAULT_PREFIXES),
FacePlugin("сделай", prefixes=DEFAULT_PREFIXES),
SmileWritePlugin(),
JokePlugin(),
GraffitiPlugin(),
QuoteDoerPlugin(),
WikiPlugin(),
AnagramsPlugin(),
MembersPlugin(),
PairPlugin(),
WhoIsPlugin(),
YandexNewsPlugin(),
AboutPlugin(),
BirthdayPlugin(),
TimePlugin(),
MemeDoerPlugin(),
QRCodePlugin(),
ChatKickerPlugin(admins_only=True),
RandomPostPlugin({"kitties": -145935681, "random": -111759315,
"savehouse": -96322217, "octavia": -36007583}),
CalculatorPlugin(),
VideoPlugin(),
DispatchPlugin(),
NamerPlugin(),
help_plugin,
# Needs tokens (see plugin's codes, some have defaults):
SayerPlugin(),
# Plugins for bot's control
AntifloodPlugin(),
NoQueuePlugin(),
CommandAttacherPlugin(),
ForwardedCheckerPlugin(),
)
help_plugin.add_plugins(PLUGINS)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# !/usr/bin/env python
from abc import ABC
from settings import config
from peewee import SqliteDatabase, MySQLDatabase, Model
class SqliteFKDatabase(SqliteDatabase, ABC):
def initialize_connection(self, conn):
self.execute_sql('PRAGMA foreign_keys=ON;')
db = MySQLDatabase(host=config.DB_HOST, user=config.DB_USER,
passwd=config.DB_PASSWORD, database=config.DB_NAME,
charset='utf8')
class BaseModel(Model):
class Meta:
database = db
|
nilq/baby-python
|
python
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
import logging
import sys
from pyflink.table import (EnvironmentSettings, TableEnvironment, DataTypes, TableDescriptor,
Schema)
from pyflink.table.udf import udf
def process_json_data_with_udf():
t_env = TableEnvironment.create(EnvironmentSettings.in_streaming_mode())
# define the source
table = t_env.from_elements(
elements=[
(1, '{"name": "Flink", "tel": 123, "addr": {"country": "Germany", "city": "Berlin"}}'),
(2, '{"name": "hello", "tel": 135, "addr": {"country": "China", "city": "Shanghai"}}'),
(3, '{"name": "world", "tel": 124, "addr": {"country": "USA", "city": "NewYork"}}'),
(4, '{"name": "PyFlink", "tel": 32, "addr": {"country": "China", "city": "Hangzhou"}}')
],
schema=['id', 'data'])
# define the sink
t_env.create_temporary_table(
'sink',
TableDescriptor.for_connector('print')
.schema(Schema.new_builder()
.column('id', DataTypes.BIGINT())
.column('data', DataTypes.STRING())
.build())
.build())
# update json columns
@udf(result_type=DataTypes.STRING())
def update_tel(data):
json_data = json.loads(data)
json_data['tel'] += 1
return json.dumps(json_data)
table = table.select(table.id, update_tel(table.data))
# execute
table.execute_insert('sink') \
.wait()
# remove .wait if submitting to a remote cluster, refer to
# https://nightlies.apache.org/flink/flink-docs-stable/docs/dev/python/faq/#wait-for-jobs-to-finish-when-executing-jobs-in-mini-cluster
# for more details
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format="%(message)s")
process_json_data_with_udf()
|
nilq/baby-python
|
python
|
from astroquery import alfa
# Test Case: A Seyfert 1 galaxy
RA = '0h8m05.63s'
DEC = '14d50m23.3s'
def test_alfa_catalog():
cat = alfa.get_catalog()
def test_alfa_spectrum():
sp = alfa.get_spectrum(ra=RA, dec=DEC, counterpart=True)
if __name__ == '__main__':
test_alfa_catalog()
test_alfa_spectrum()
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import QMainWindow,QMessageBox
from PyQt5.QtGui import QImage,QPixmap
from istanbul_city_surveillance_cameras_Gui_python import Ui_MainWindow
from src.camera_list import selected_camera
from src.yolov4_pred import YOLOv4
import os
import time
import cv2
class istanbul_city_surveillance_cameras(QMainWindow):
def __init__(self):
super().__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.ui.pushButton.clicked.connect(self.start_predict)
self.ui.comboBox_2.currentIndexChanged[int].connect(self.select_camera)
self.ui.pushButton_2.clicked.connect(self.page_menu)
self.ui.stackedWidget.setCurrentIndex(0)
def page_menu(self):
self.ui.stackedWidget.setCurrentIndex(0)
self.stop = False
#==================== Tespit etmek istediğiniz kamera bölgesi seçiliyor ==========================
def select_camera(self,index):
if index != 0:
self.camera_index = index
self.camera_name = self.ui.comboBox_2.itemText(index)
self.url_cam = selected_camera(self.camera_index)
print('url adresi',self.url_cam)
#=========== işlenmiş görüntüyü göster ================================
def show_images_area(self,img):
geo = self.ui.label_mobese.geometry()
w,h = geo.getRect()[2:]
image = cv2.resize(img,(w,h))
frame = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
image = QImage(frame,frame.shape[1],frame.shape[0],frame.strides[0],QImage.Format_RGB888)
self.ui.label_mobese.setPixmap(QPixmap.fromImage(image))
#=================== Seçilen mobese üzerinden predict yapmaya başlar ============================
def start_predict(self):
self.ui.stackedWidget.setCurrentIndex(1)
self.stop = True
#======= Yolov4 ağırlıkları yüklenmektedir ========================
weightsPath = "yolo_weight/yolov4-obj.weights"
configPath = "yolo_weight/yolov4-obj.cfg"
net = cv2.dnn.readNet(weightsPath, configPath)
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA_FP16)
cnt=0
frames_to_count=20
st=0
vid = cv2.VideoCapture(self.url_cam)
if vid.isOpened() == False:
QMessageBox.about(self,'Error','Can not connect camera. Turn back menu page')
#============ kamera açıldıysa görüntüyü aktar ===========
while(vid.isOpened()):
ret,frame = vid.read()
predicted_img,car_count,people_count = YOLOv4(net,frame)
if car_count>20 :
car_crowd = 'Crowded'
else:
car_crowd = 'Normal'
if people_count>20 :
people_crowd = 'Crowded'
else:
people_crowd = 'Normal'
text = f'Predictions on {self.camera_name} camera People count: {people_count} -- {people_crowd} , Car count: {car_count} -- {car_crowd} '
self.ui.label.setText(text)
if cnt == frames_to_count:
try:
print(frames_to_count/(time.time()-st),'FPS')
fps = round(frames_to_count/(time.time()-st))
st = time.time()
cnt=0
except:
pass
cnt+=1
self.show_images_area(predicted_img)
'''cv2.namedWindow('predict_video',cv2.WINDOW_NORMAL)
cv2.resizeWindow('predict_video',1400,750)
cv2.imshow("predict_video",frame)'''
if cv2.waitKey(1) & self.stop == False:
self.ui.label_mobese.clear()
break
vid.release()
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
# Copyright 2018 The trfl Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Unit tests for discrete-action Policy Gradient functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from absl.testing import parameterized
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow.compat.v1 as tf
import tree as nest
from trfl import discrete_policy_gradient_ops as pg_ops
class EntropyCostTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_entropy op."""
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropy(self, is_multi_actions):
with self.test_session() as sess:
# Large values check numerical stability through the logs
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
self.assertEqual(entropy.get_shape(), tf.TensorShape(6))
# Get these reference values in Torch with:
# c = nnd.EntropyCriterion()
# s = nn.LogSoftMax()
# result = c:forward(s:forward(logits))
expected_entropy = num_action_components * np.array(
[0.58220309, 0.58220309, 0.36533386, 0.69314718, 0, 0])
self.assertAllClose(sess.run(entropy),
expected_entropy,
atol=1e-4)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testGradient(self, is_multi_actions):
with self.test_session() as sess:
policy_logits_np = np.array([[0, 1], [1, 2], [0, 2], [1, 1], [0, -1000],
[0, 1000]])
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(policy_logits_nest)
entropy = entropy_op.extra.entropy
# Counterintuitively, the gradient->0 as policy->deterministic, that's why
# the gradients for the large logit cases are `[0, 0]`. They should
# strictly be >0, but they get truncated when we run out of precision.
expected_gradients = np.array([[0.1966119, -0.1966119],
[0.1966119, -0.1966119],
[0.2099872, -0.2099872],
[0, 0],
[0, 0],
[0, 0]])
for policy_logits in nest.flatten(policy_logits_nest):
gradients = tf.gradients(entropy, policy_logits)
grad_policy_logits = sess.run(gradients[0])
self.assertAllClose(grad_policy_logits,
expected_gradients,
atol=1e-4)
@parameterized.named_parameters(('TwoActions', 2),
('FiveActions', 5),
('TenActions', 10),
('MixedMultiActions', [2, 5, 10]))
def testNormalisation(self, num_actions):
with self.test_session() as sess:
if isinstance(num_actions, list):
policy_logits = [tf.constant([[1.0] * n], dtype=tf.float32)
for n in num_actions]
else:
policy_logits = tf.constant(
[[1.0] * num_actions], dtype=tf.float32)
entropy_op = pg_ops.discrete_policy_entropy_loss(
policy_logits, normalise=True)
self.assertAllClose(sess.run(entropy_op.loss), [-1.0])
@parameterized.named_parameters(
('Fixed', 5, 4, 3, False),
('DynamicLength', None, 4, 3, False),
('DynamicBatch', 5, None, 3, False),
('DynamicBatchAndLength', None, None, 3, False),
('DynamicAll', None, None, None, False),
('NormFixed', 5, 4, 3, True),
('NormDynamicLength', None, 4, 3, True),
('NormDynamicBatch', 5, None, 3, True),
('NormDynamicBatchAndLength', None, None, 3, True),
('NormDynamicAll', None, None, None, True))
def testShapeInference3D(self, sequence_length, batch_size, num_actions,
normalise):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
op = pg_ops.discrete_policy_entropy_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([T, B])
op.loss.get_shape().assert_is_compatible_with([T, B])
@parameterized.named_parameters(
('Fixed2D', 4, 3, False),
('DynamicBatch2D', None, 3, False),
('DynamicAll2D', None, None, False),
('NormFixed2D', 4, 3, True),
('NormDynamicBatch2D', None, 3, True),
('NormDynamicAll2D', None, None, True))
def testShapeInference2D(self, batch_size, num_actions, normalise):
policy_logits = tf.placeholder(tf.float32, shape=[batch_size, num_actions])
op = pg_ops.discrete_policy_entropy_loss(policy_logits, normalise=normalise)
op.extra.entropy.get_shape().assert_is_compatible_with([batch_size])
op.loss.get_shape().assert_is_compatible_with([batch_size])
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
class DiscretePolicyGradientLossTest(parameterized.TestCase, tf.test.TestCase):
"""Tests for discrete_policy_gradient_loss op."""
def _setUpLoss(self, is_multi_actions):
policy_logits_np = np.array([[[0, 1], [0, 1]],
[[1, 1], [0, 100]]])
actions_np = np.array([[0, 0],
[1, 1]], dtype=np.int32)
if is_multi_actions:
self._num_action_components = 3
self._policy_logits_nest = [
tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self._num_action_components)]
self._actions_nest = [tf.constant(actions_np, dtype=tf.int32)
for _ in xrange(self._num_action_components)]
else:
self._num_action_components = 1
self._policy_logits_nest = tf.constant(policy_logits_np, dtype=tf.float32)
self._actions_nest = tf.constant(actions_np, dtype=tf.int32)
self._action_values = tf.constant([[0, 1], [2, 1]], dtype=tf.float32)
self._loss = pg_ops.discrete_policy_gradient_loss(
self._policy_logits_nest, self._actions_nest, self._action_values)
def testLoss(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
self.assertEqual(self._loss.get_shape(), tf.TensorShape(2)) # [B]
self.assertAllClose(
sess.run(self._loss),
# computed by summing expected losses from DiscretePolicyGradientTest
# over the two sequences of length two which I've split the batch
# into:
self._num_action_components * np.array([1.386294, 1.313262]))
def testGradients(self, is_multi_actions):
self._setUpLoss(is_multi_actions)
with self.test_session() as sess:
total_loss = tf.reduce_sum(self._loss)
gradients = tf.gradients(
[total_loss], nest.flatten(self._policy_logits_nest))
grad_policy_logits_nest = sess.run(gradients)
for grad_policy_logits in grad_policy_logits_nest:
self.assertAllClose(grad_policy_logits,
[[[0, 0], [-0.731, 0.731]],
[[1, -1], [0, 0]]], atol=1e-4)
dead_grads = tf.gradients(
[total_loss],
nest.flatten(self._actions_nest) + [self._action_values])
for grad in dead_grads:
self.assertIsNone(grad)
class DiscretePolicyGradientTest(tf.test.TestCase):
"""Tests for discrete_policy_gradient op."""
def testLoss(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape(), tf.TensorShape(4))
# Calculate the targets with:
# loss = action_value*(-logits[action] + log(sum_a(exp(logits[a]))))
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(sess.run(loss), [0, 1.313262, 1.386294, 0])
def testGradients(self):
with self.test_session() as sess:
policy_logits = tf.constant([[0, 1], [0, 1], [1, 1], [0, 100]],
dtype=tf.float32)
action_values = tf.constant([0, 1, 2, 1], dtype=tf.float32)
actions = tf.constant([0, 0, 1, 1], dtype=tf.int32)
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
total_loss = tf.reduce_sum(loss)
gradients = tf.gradients([total_loss], [policy_logits])
grad_policy_logits = sess.run(gradients[0])
# The final case (with large logits), runs out of precision and gets
# truncated to 0, but isn't `nan`.
self.assertAllClose(grad_policy_logits,
[[0, 0], [-0.731, 0.731], [1, -1], [0, 0]], atol=1e-4)
self.assertAllEqual(tf.gradients([total_loss], [actions, action_values]),
[None, None])
def testDynamicBatchSize(self):
policy_logits = tf.placeholder(tf.float32, shape=[None, 3])
action_values = tf.placeholder(tf.float32, shape=[None])
actions = tf.placeholder(tf.int32, shape=[None])
loss = pg_ops.discrete_policy_gradient(policy_logits, actions,
action_values)
self.assertEqual(loss.get_shape().as_list(), [None])
gradients = tf.gradients(tf.reduce_sum(loss), [policy_logits])
self.assertAllEqual(gradients[0].get_shape().as_list(), [None, 3])
class SequenceAdvantageActorCriticLossTest(parameterized.TestCase,
tf.test.TestCase):
@parameterized.named_parameters(
('SingleActionEntropyNormalise', False, True),
('SingleActionNoEntropyNormalise', False, False),
('MultiActionsEntropyNormalise', True, True),
('MultiActionsNoEntropyNormalise', True, False),
)
def testLossSequence(self, is_multi_actions, normalise_entropy):
# A sequence of length 2, batch size 1, 3 possible actions.
num_actions = 3
policy_logits = [[[0., 0., 1.]], [[0., 1., 0.]]]
actions = [[0], [1]]
baseline_values = [[0.2], [0.3]]
rewards = [[0.4], [0.5]]
pcontinues = [[0.9], [0.8]]
bootstrap_value = [0.1]
baseline_cost = 0.15
entropy_cost = 0.25
if is_multi_actions:
num_action_components = 3
policy_logits_nest = [tf.constant(policy_logits, dtype=tf.float32)
for _ in xrange(num_action_components)]
actions_nest = [tf.constant(actions, dtype=tf.int32)
for _ in xrange(num_action_components)]
else:
num_action_components = 1
policy_logits_nest = tf.constant(policy_logits, dtype=tf.float32)
actions_nest = tf.constant(actions, dtype=tf.int32)
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits_nest,
tf.constant(baseline_values, dtype=tf.float32),
actions_nest,
tf.constant(rewards, dtype=tf.float32),
tf.constant(pcontinues, dtype=tf.float32),
tf.constant(bootstrap_value, dtype=tf.float32),
baseline_cost=baseline_cost,
entropy_cost=entropy_cost,
normalise_entropy=normalise_entropy)
# Manually calculate the discounted returns.
return1 = 0.5 + 0.8 * 0.1
return0 = 0.4 + 0.9 * return1
with self.test_session() as sess:
# Discounted returns
self.assertAllClose(sess.run(extra.discounted_returns),
[[return0], [return1]])
# Advantages
advantages = [return0 - baseline_values[0][0],
return1 - baseline_values[1][0]]
self.assertAllClose(sess.run(extra.advantages),
[[adv] for adv in advantages])
# Baseline
expected_baseline_loss = baseline_cost*sum([0.5 * adv**2 for adv in
advantages])
self.assertAllClose(
sess.run(extra.baseline_loss), [expected_baseline_loss])
# Policy Gradient loss
# loss = sum_t(action_value*(-logits[action] +
# log(sum_a(exp(logits[a])))))
#
# The below takes advantage of there only being one minibatch dim.
normalise = lambda logits: np.log(np.exp(logits).sum())
batch = 0
expected_policy_gradient_loss = num_action_components * sum([
advantages[0]*(-(policy_logits[0][batch][actions[0][batch]]) +
normalise(policy_logits[0])),
advantages[1]*(-(policy_logits[1][batch][actions[1][batch]]) +
normalise(policy_logits[1])),
])
self.assertAllClose(sess.run(extra.policy_gradient_loss),
[expected_policy_gradient_loss])
# Entropy, calculated as per discrete_policy_entropy tests.
expected_entropy = num_action_components*0.97533*2
expected_entropy_loss = -entropy_cost*expected_entropy
if normalise_entropy:
expected_entropy_loss /= (num_action_components * np.log(num_actions))
self.assertAllClose(sess.run(extra.entropy),
[expected_entropy], atol=1e-4)
self.assertAllClose(sess.run(extra.entropy_loss), [expected_entropy_loss],
atol=1e-4)
# Total loss
expected_loss = [expected_entropy_loss + expected_policy_gradient_loss +
expected_baseline_loss]
self.assertAllClose(sess.run(loss), expected_loss, atol=1e-4)
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInference(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
@parameterized.named_parameters(('Fixed', 5, 4, 3),
('DynamicLength', None, 4, 3),
('DynamicBatch', 5, None, 3),
('DynamicBatchAndLength', None, None, 3),
('DynamicAll', None, None, None))
def testShapeInferenceGAE(self, sequence_length, batch_size, num_actions):
T, B, A = sequence_length, batch_size, num_actions # pylint: disable=invalid-name
loss, extra = pg_ops.sequence_advantage_actor_critic_loss(
policy_logits=tf.placeholder(tf.float32, shape=[T, B, A]),
baseline_values=tf.placeholder(tf.float32, shape=[T, B]),
actions=tf.placeholder(tf.int32, shape=[T, B]),
rewards=tf.placeholder(tf.float32, shape=[T, B]),
pcontinues=tf.placeholder(tf.float32, shape=[T, B]),
bootstrap_value=tf.placeholder(tf.float32, shape=[B]),
lambda_=0.9,
entropy_cost=1)
extra.discounted_returns.get_shape().assert_is_compatible_with([T, B])
extra.advantages.get_shape().assert_is_compatible_with([T, B])
extra.baseline_loss.get_shape().assert_is_compatible_with([B])
extra.policy_gradient_loss.get_shape().assert_is_compatible_with([B])
extra.entropy.get_shape().assert_is_compatible_with([B])
extra.entropy_loss.get_shape().assert_is_compatible_with([B])
loss.get_shape().assert_is_compatible_with([B])
class SequenceAdvantageActorCriticLossGradientTest(parameterized.TestCase,
tf.test.TestCase):
def setUp(self):
super(SequenceAdvantageActorCriticLossGradientTest, self).setUp()
self.num_actions = 3
self.num_action_components = 5
policy_logits_np = np.array([[[0., 0., 1.]], [[0., 1., 0.]]])
self.policy_logits = tf.constant(policy_logits_np, dtype=tf.float32)
self.multi_policy_logits = [tf.constant(policy_logits_np, dtype=tf.float32)
for _ in xrange(self.num_action_components)]
self.baseline_values = tf.constant([[0.2], [0.3]])
actions_np = np.array([[0], [1]])
actions = tf.constant(actions_np)
multi_actions = [tf.constant(actions_np)
for _ in xrange(self.num_action_components)]
rewards = tf.constant([[0.4], [0.5]])
pcontinues = tf.constant([[0.9], [0.8]])
bootstrap_value = tf.constant([0.1])
baseline_cost = 0.15
entropy_cost = 0.25
self.op = pg_ops.sequence_advantage_actor_critic_loss(
self.policy_logits, self.baseline_values, actions, rewards, pcontinues,
bootstrap_value, baseline_cost=baseline_cost, entropy_cost=entropy_cost)
self.multi_op = pg_ops.sequence_advantage_actor_critic_loss(
self.multi_policy_logits, self.baseline_values, multi_actions, rewards,
pcontinues, bootstrap_value, baseline_cost=baseline_cost,
entropy_cost=entropy_cost)
self.invalid_grad_inputs = [actions, rewards, pcontinues, bootstrap_value]
self.invalid_grad_outputs = [None]*len(self.invalid_grad_inputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testPolicyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.policy_gradient_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.policy_gradient_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testNonDifferentiableDiscountedReturns(self):
self.assertAllEqual(tf.gradients(self.op.extra.discounted_returns,
self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testEntropyGradients(self, is_multi_actions):
if is_multi_actions:
loss = self.multi_op.extra.entropy_loss
policy_logits_nest = self.multi_policy_logits
else:
loss = self.op.extra.entropy_loss
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(loss, policy_logits)[0] * self.num_actions
for policy_logits in nest.flatten(policy_logits_nest)]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
self.assertAllEqual(tf.gradients(loss, self.baseline_values), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
def testBaselineGradients(self):
loss = self.op.extra.baseline_loss
grad_baseline = tf.gradients(loss, self.baseline_values)[0]
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllEqual(tf.gradients(loss, self.policy_logits), [None])
self.assertAllEqual(tf.gradients(loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
@parameterized.named_parameters(('SingleAction', False),
('MultiActions', True))
def testTotalLossGradients(self, is_multi_actions):
with self.test_session() as sess:
if is_multi_actions:
total_loss = tf.reduce_sum(self.multi_op.loss)
policy_logits_nest = self.multi_policy_logits
else:
total_loss = tf.reduce_sum(self.op.loss)
policy_logits_nest = self.policy_logits
grad_policy_list = [
tf.gradients(total_loss, policy_logits)[0]
for policy_logits in nest.flatten(policy_logits_nest)]
grad_baseline = tf.gradients(total_loss, self.baseline_values)[0]
for grad_policy in grad_policy_list:
self.assertEqual(grad_policy.get_shape(), tf.TensorShape([2, 1, 3]))
# These values were just generated once and hard-coded here to check for
# regressions. Calculating by hand would be too time-consuming,
# error-prone and unreadable.
self.assertAllClose(sess.run(grad_policy),
[[[-0.5995, 0.1224, 0.4770]],
[[0.0288, -0.0576, 0.0288]]],
atol=1e-4)
self.assertEqual(grad_baseline.get_shape(), tf.TensorShape([2, 1]))
self.assertAllClose(sess.run(grad_baseline), [[-0.1083], [-0.0420]],
atol=1e-4)
self.assertAllEqual(tf.gradients(total_loss, self.invalid_grad_inputs),
self.invalid_grad_outputs)
if __name__ == '__main__':
tf.test.main()
|
nilq/baby-python
|
python
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CacheExpirationActionParameters(Model):
"""Defines the parameters for the cache expiration action.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar odatatype: Required. Default value:
"Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters" .
:vartype odatatype: str
:param cache_behavior: Required. Caching behavior for the requests that
include query strings. Possible values include: 'BypassCache', 'Override',
'SetIfMissing'
:type cache_behavior: str or ~azure.mgmt.cdn.models.enum
:ivar cache_type: Required. The level at which the content needs to be
cached. Default value: "All" .
:vartype cache_type: str
:param cache_duration: The duration for which the the content needs to be
cached. Allowed format is [d.]hh:mm:ss
:type cache_duration: str
"""
_validation = {
'odatatype': {'required': True, 'constant': True},
'cache_behavior': {'required': True},
'cache_type': {'required': True, 'constant': True},
}
_attribute_map = {
'odatatype': {'key': '@odata\\.type', 'type': 'str'},
'cache_behavior': {'key': 'cacheBehavior', 'type': 'str'},
'cache_type': {'key': 'cacheType', 'type': 'str'},
'cache_duration': {'key': 'cacheDuration', 'type': 'str'},
}
odatatype = "Microsoft.Azure.Cdn.Models.DeliveryRuleCacheExpirationActionParameters"
cache_type = "All"
def __init__(self, *, cache_behavior, cache_duration: str=None, **kwargs) -> None:
super(CacheExpirationActionParameters, self).__init__(**kwargs)
self.cache_behavior = cache_behavior
self.cache_duration = cache_duration
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os
from pathlib import Path
import flash
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy as sp
datadir = Path("/kaggle/input")
from flash.core.utilities.imports import _ICEVISION_AVAILABLE
from flash.image.data import IMG_EXTENSIONS, NP_EXTENSIONS, image_loader
if _ICEVISION_AVAILABLE:
from icevision.core.record import BaseRecord
from icevision.core.record_components import ClassMapRecordComponent, FilepathRecordComponent, tasks
from icevision.data.data_splitter import SingleSplitSplitter
from icevision.parsers.parser import Parser
else:
assert 0, "with ice please"
# In[108]:
from pathlib import Path
from icevision.all import *
from icevision.core.record_defaults import KeypointsRecord
class CustomBIWIKeypointsMetadata(KeypointsMetadata):
labels = ["center"] # , "apex", "root"]
class BiwiPNG:
# TODO cache calibration for each subject (avoid loading for every frame)
def load_keypoints(self, impath):
name = str(impath)[:-8]
pose = np.loadtxt(name + "_pose.txt")
R = pose[:3, :3] # pose rotation from standard pose to this
centre_biwi = pose[3, :]
cal_rgb = os.path.join(os.path.split(name)[0], "rgb.cal")
cal_rgb_P = np.eye(4)
cal_rgb_P[:3, :3] = np.genfromtxt(cal_rgb, skip_header=5, skip_footer=2)
cal_rgb_P[:3, 3] = np.genfromtxt(cal_rgb, skip_header=9, skip_footer=1)
cal_rgb = np.genfromtxt(cal_rgb, skip_footer=6)
def biwi2img(vec, camera_cal=True):
if camera_cal: # RGB camera calibration
x, y, z = cal_rgb_P[:3, :3] @ vec + cal_rgb_P[:3, 3]
else:
x, y, z = vec
# BIWI world to image conversion
# x <--> v
# y <--> u
# z == d
v = x * cal_rgb[0, 0] / z + cal_rgb[0, 2]
u = y * cal_rgb[1, 1] / z + cal_rgb[1, 2]
return u, v
centre = biwi2img(centre_biwi)
# assuming the standard orientation of the nose is frontal upright, apex and root distance and directions are guesses
dist = 50.0
apex = biwi2img(centre_biwi + dist * R @ np.array([0, 0, -1.0]))
root = biwi2img(
centre_biwi + dist / np.sqrt(2) * R @ np.array([0, -1.0, -1.0])
) # guessed 45 degree angle towards root
return {"center": centre, "apex": apex, "root": root}
class CustomParser(Parser):
def __init__(self, img_dir: Union[str, Path], imgID_annotations: Dict, idmap=None):
super().__init__(template_record=self.template_record(), idmap=idmap)
self.img_dir = Path(img_dir)
self.class_map = ClassMap(CustomBIWIKeypointsMetadata().labels)
self.annotations_dict = imgID_annotations
def __iter__(self):
yield from self.annotations_dict.items()
def __len__(self):
return len(self.annotations_dict)
def template_record(self) -> BaseRecord:
return KeypointsRecord()
def record_id(self, o):
return o[0]
def filepath(self, o):
return self.img_dir / o[0]
def keypoints(self, o):
return [
KeyPoints.from_xyv([x, y, 1], CustomBIWIKeypointsMetadata) for y, x in o[1]
] # TODO check coordinate flip
def image_width_height(self, o) -> Tuple[int, int]:
return get_img_size(self.filepath(o))
def labels(self, o) -> List[Hashable]:
return list(range(1, len(CustomBIWIKeypointsMetadata().labels) + 1))
def bboxes(self, o) -> List[BBox]:
w, h = get_img_size(self.filepath(o))
return [BBox.from_xywh(0, 0, w, h)] * (len(CustomBIWIKeypointsMetadata().labels))
def parse_fields(self, o, record, is_new):
if is_new:
record.set_filepath(self.filepath(o))
record.set_img_size(self.image_width_height(o))
record.detection.set_class_map(self.class_map)
record.detection.add_labels_by_id(self.labels(o))
record.detection.add_bboxes(self.bboxes(o))
record.detection.add_keypoints(self.keypoints(o))
def parser(data_dir: Path):
images = sorted(Path(data_dir).glob("??/frame_*_rgb.png"))[:100] # TODO remove truncation
imgID_annotations = {}
biwi = BiwiPNG()
for im in images:
keypoints = biwi.load_keypoints(im)
imgID_annotations[str(im.relative_to(data_dir))] = [keypoints["center"]] # TODO add other keypoints
return CustomParser(img_dir=data_dir, imgID_annotations=imgID_annotations)
if True:
p = parser(datadir)
p.parse()
for s in p:
break
r = KeypointsRecord()
p.parse_fields(s, r, True)
for kp in p.keypoints(s):
print(kp.xyv)
print(s, r)
# In[109]:
from flash.image import KeypointDetectionData, KeypointDetector
datamodule = KeypointDetectionData.from_icedata(parser=parser, train_folder=datadir, batch_size=8)
model = KeypointDetector(
head="keypoint_rcnn",
backbone="resnet18_fpn",
num_keypoints=3,
num_classes=3,
)
trainer = flash.Trainer(max_epochs=2, gpus=1)
trainer.finetune(model, datamodule=datamodule, strategy="freeze")
# In[110]:
sample = datamodule.train_dataset[0]
sample
# In[111]:
from flash.core.data.io.input import DataKeys
plt.imshow(sample[DataKeys.INPUT])
plt.scatter(
sample[DataKeys.TARGET]["keypoints"][0][0]["x"], sample[DataKeys.TARGET]["keypoints"][0][0]["y"], marker="+"
)
sample
# In[ ]:
# In[ ]:
# In[ ]:
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''
Das eigentliche starten der app wird über run erledigt
'''
__author__ = "R. Bauer"
__copyright__ = "MedPhyDO - Machbarkeitsstudien des Instituts für Medizinische Strahlenphysik und Strahlenschutz am Klinikum Dortmund im Rahmen von Bachelor und Masterarbeiten an der TU-Dortmund / FH-Dortmund"
__credits__ = ["R.Bauer", "K.Loot"]
__license__ = "MIT"
__version__ = "0.1.5"
__status__ = "Prototype"
import logging
from isp.config import ispConfig
from app.db import gqadb
from app.api import gqa
from isp.webapp import ispBaseWebApp
from isp.safrs import db, system
class system( system ):
@classmethod
def _extendedSystemCheck(self):
"""filled Stub Function for api_list (Systeminformationen)
Returns
-------
dict, string
"""
import os
import json
def checkPath( path, info ):
html = ""
if os.path.exists( path ):
info_class = "success"
info_text = "{} ist vorhanden.".format( info )
else:
info_class = "danger"
info_text = "{} ist nicht vorhanden.".format( info )
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
if os.path.isdir( path ):
info_class = "success"
info_text = "{} ist ein Verzeichnis.".format( info )
else:
info_class = "danger"
info_text = "{} ist kein Verzeichnis.".format( info )
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
if os.access(path, os.W_OK):
info_class = "success"
info_text = "{} ist beschreibbar.".format( info )
else:
info_class = "danger"
info_text = "{} ist nicht beschreibbar.".format( info )
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
return html
config = ispConfig()
html = "<h4>System Check</h4>"
from isp.config import dict_merge
# --------------- Aria Datenbank
from app.aria import ariaClass
_database_key = config.get( "database.servername", "" )
connect = None
aria = None
loaded_config = config.get( ["database", _database_key] )
db_config = {
"dbname":"notset",
"host":"notset/unused",
"user":"notset",
"password":"notset",
"engine":"notset/unused",
"dsn":"notset/unused"
}
if loaded_config:
db_config = dict_merge( db_config, loaded_config.toDict() )
aria = ariaClass( _database_key, config )
connect = aria.openDatabase( _database_key )
else:
html += '<div class="alert alert-danger">Kein passender Eintrag für <b>server.database.servername</b></div>'
html += '<div class="alert alert-dark" >Prüfe Datenbankzugriff <span class="badge badge-info">database.servername</span>: <b>{}</b> - Konfiguration:'.format( _database_key )
db_config_copy = db_config.copy()
db_config_copy["password"] = "******"
html += '</br> <pre>{}</pre>'.format( json.dumps( db_config_copy, indent=2 ) )
info_text = "Der Zugriff auf die Datenbank dbname:<b>{dbname}</b>, user:<b>{user}</b>".format( **db_config )
if not connect:
info_class = "danger"
info_text = "{} ist nicht möglich.".format( info_text )
else:
info_class = "success"
info_text = "{} ist möglich.".format( info_text )
db_version = aria.getDbVersion( )
info_text += "<br>{}".format( db_version.replace( "\\n", "<br>").replace('\\t', '') )
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
if connect:
html += '<div class="alert alert-dark" >Prüfe Patienten für <span class="badge badge-info">units</span> - Konfiguration:'
html += '</br> <pre>{}</pre>'.format( json.dumps( config.get( "units" ).toDict(), indent=2 ) )
for name, unit in config.get( "units" ).items():
sql = "SELECT PatientSer, PatientId, FirstName, LastName FROM [{dbname}].[dbo].[Patient] [Patient]"
sql = sql + " WHERE [PatientId] = '{}' ".format( name )
result = aria.execute( sql )
html += aria.lastExecuteSql
info_text = "PatientId: <b>{}</b>".format( name )
if len( result ) > 0:
info_text = "{} ist vorhanden.".format( info_text )
info_text += '</br> <pre>{}</pre>'.format( json.dumps( result[0], indent=2 ) )
info_class = "success"
else:
info_text = "{} ist nicht vorhanden.".format( info_text )
info_class = "danger"
html += '<div class="alert alert-{} ">{}'.format( info_class, info_text )
if len( result ) > 0:
html += "<br>Prüfe Tags im Datenbankfeld '[Radiation].[Comment]' für PatientId: <b>{}</b> ".format( name )
tags = aria.getTags( name )
if tags and len( tags ) > 0:
info_text = "<b>{}</b> Tags sind vorhanden.".format( len( tags ) )
info_class = "success"
else:
info_text = "Keine Tags vorhanden."
info_class = "danger"
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
html += "</div>"
html += "</div>"
html += "</div>"
# --------------- DICOM
from app.ariadicom import ariaDicomClass
_dicom_key = config.get( "dicom.servername", "" )
adc = ariaDicomClass( _database_key, _dicom_key, config )
loaded_config = config.get( ["dicom", _dicom_key] )
dicom_config = {
"aec" : "notset",
"server_ip": "notset",
"server_port": "notset",
"aet": "notset",
"listen_port": "notset"
}
if loaded_config:
dicom_config = dict_merge(dicom_config, loaded_config.toDict() )
else:
html += '<div class="alert alert-danger">Kein passender Eintrag für <b>server.dicom.servername</b></div>'
html += '<div class="alert alert-dark" >Prüfe Dicom <span class="badge badge-info">dicom.servername</span>: <b>{}</b> - Konfiguration:'.format( _dicom_key )
html += '<pre>{}</pre>'.format( json.dumps( dicom_config, indent=2 ) )
html += '<br>Server Settings - AE Title (aec): <b>{aec}</b> - IP (server_ip): <b>{server_ip}</b> - Port (server_port): <b>{server_port}</b><br>'.format( **dicom_config )
html += '<br>Application Entity Map Entry - AE Title (aet): <b>{aet}</b> - Port (listen_port): <b>{listen_port}</b>'.format( **dicom_config )
html += '<div class="alert alert-dark" >Prüfe Verzeichnis: <span class="badge badge-info">dicom.{}.local_dir</span>'.format( _dicom_key )
html += checkPath( dicom_config.get("local_dir", "notset" ) , '<span class="badge badge-info">dicom.{}.local_dir</span>'.format(_dicom_key))
html += "</div>"
status = adc.initAE()
dicom_info = adc.getInfo()
adc.closeAE()
if status == 0x0000:
info_class = "success"
info_text = "Dicom Zugriff ist möglich. Associations: "
for association in dicom_info["associations"]:
association["ae_title"] = association["ae_title"].decode().strip()
info_text += '</br> <pre>{}</pre>'.format( json.dumps( association, indent=2 ) )
else:
info_class = "danger"
info_text = "Dicom Zugriff ist nicht möglich. ErrorCode: 0x{0:04x}.".format( status )
html += '<div class="alert alert-{} ">{}</div>'.format( info_class, info_text )
html += "</div>"
# --------------- resultsPath
resultsPath = adc.initResultsPath()
html += '<div class="alert alert-dark" >Prüfe <span class="badge badge-info">resultsPath</span>: <b>{}</b>'.format( resultsPath )
html += checkPath(resultsPath, '<span class="badge badge-info">resultsPath</span>')
html += "</div>"
# --------------- MQTT
mqtt_config = config.get( "server.mqtt" )
mqtt_config_copy = mqtt_config.copy()
mqtt_config_copy.password = "********"
if mqtt_config_copy.get("host", "") == "":
html += '<div class="alert alert-info" >MQTT deaktiviert'
else:
html += '<div class="alert alert-dark" >Prüfe <span class="badge badge-info">server.mqtt</span> - Konfiguration:'
html += '<pre>{}</pre>'.format( json.dumps( mqtt_config_copy.toDict(), indent=2 ) )
mqtt = config.mqttGetHandler()
if not mqtt:
info_class = "danger"
info_text = "MQTT Zugriff ist nicht möglich."
else:
info_class = "info"
info_text = 'MQTT Zugriff ist eingerichtet. <button type="button" class="btn btn-primary" onClick="mqttTest( this )">Prüfen</button>'
html += '<div id="MQTT-checkline" class="alert alert-{} ">{}<div id="MQTT-results" class"alert"></div></div>'.format( info_class, info_text )
html += "</div>"
html += '''
<script>
var box = document.querySelector("#MQTT-checkline");
var result_box = document.querySelector("#MQTT-results");
if ( typeof app.clientMqtt === "object" ) {
app.clientMqtt.subscribe( "MQTT/test", function( msg ) {
box.className = "alert alert-success";
result_box.className = "alert alert-success";
result_box.innerHTML = "MQTT Test erfolgreich";
} );
}
function mqttTest( btn ){
box.className = "alert alert-info";
result_box.className = "";
if ( typeof app.clientMqtt === "object" ) {
result_box.className = "alert alert-danger";
result_box.innerHTML = "MQTT Test nicht erfolgreich.";
app.clientMqtt.publish( "MQTT/test", { "test":"MQTT" } );
} else {
result_box.className = "alert alert-warning";
result_box.innerHTML = "kein clientMqtt vorhanden";
}
}
</script>
'''
return {}, html
# -----------------------------------------------------------------------------
def run( overlay:dict={}, load_tests_db:bool=False ):
''' Startet ispBaseWebApp mit zusätzlichen config Angaben
Parameters
----------
overlay : dict, optional
Overlay Angaben für config. The default is {}.
load_tests_db: bool, optional
load also testdb
Returns
-------
webApp : ispBaseWebApp
Die gestartete WebApplication
'''
# Konfiguration öffnen
_config = ispConfig( mqttlevel=logging.WARNING )
_apiConfig = {
"models": [ gqa, gqadb, system ],
}
if load_tests_db: # pragma: no cover
import tests.testdb as testdb
_apiConfig["models"].append( testdb.dbtests )
# Webserver starten
webApp = ispBaseWebApp( _config, db, apiconfig=_apiConfig, overlay=overlay )
# mqtt in config schließen
_config.mqttCleanup( )
return webApp
|
nilq/baby-python
|
python
|
import open3d as o3d
import numpy as np
import random
import copy
from aux import *
import aux.aux_ekf as a_ekf
from aux.aux_octree import *
from aux.qhull_2d import *
from aux.min_bounding_rect import *
from aux.aux_voxel_grid import *
import matplotlib.pyplot as plt
import pickle
from timeit import default_timer as timer
import settings
class Plane:
def __init__(self):
self.inliers = []
self.inliersId = []
self.equation = []
self.color = []
self.nPoints = 0
self.centroid = []
self.store_point_bucket = settings.get_setting('save_point_cloud')
self.store_octree_model = settings.get_setting('save_octree')
self.store_voxel_grid_model = settings.get_setting('save_voxel_grid')
self.bucket_octree = []
self.bucket_voxel_grid = []
self.inlier_bucket = o3d.geometry.PointCloud()
self.inlier_bucket.points = o3d.utility.Vector3dVector([])
self.bucket = o3d.geometry.PointCloud()
self.bucket.points = o3d.utility.Vector3dVector([])
self.bucket_pos = o3d.geometry.PointCloud()
self.bucket_pos.points = o3d.utility.Vector3dVector([])
self.bucket_odom = o3d.geometry.PointCloud()
self.bucket_odom.points = o3d.utility.Vector3dVector([])
self.t__bucket =0
self.t__bucket_debug =0
def findPlane(self, pts, thresh=0.05, minPoints=3, maxIteration=1000):
n_points = np.asarray(pts.points).shape[0]
self.nPoints = n_points
#print(n_points)
best_eq = []
best_inliers = []
valid = False
pcd = pts
plane_model, inliers = pcd.segment_plane(distance_threshold=thresh,ransac_n=3,num_iterations=maxIteration)
[a, b, c, d] = plane_model
best_eq = [a, b, c, d]
print(f"Plane equation: {a:.2f}x + {b:.2f}y + {c:.2f}z + {d:.2f} = 0")
if self.store_point_bucket or self.store_octree_model or self.store_voxel_grid_model:
self.inlier_bucket = pcd.select_by_index(inliers)
self.inliers = np.asarray(pcd.select_by_index(inliers).points)
self.inliersId = np.asarray(inliers)
self.equation = [a, b, c, d]
self.centroid = np.mean(self.inliers, axis=0)
#print("Plano tem esse número de pontos como inliers: ", self.inliers.shape[0])
if(int(self.inliers.shape[0]) > 2000):
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(self.inliers)
# with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:
# labels = np.array(pcd.cluster_dbscan(eps=0.5, min_points=int(self.inliers.shape[0]/400), print_progress=False))
# max_label = labels.max()
# colors = plt.get_cmap("tab20")(labels / (max_label if max_label > 0 else 1))
# colors[labels < 0] = 0
# pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])
# o3d.visualization.draw_geometries([pcd])
# if(max_label > 1):
# self.equation = []
# self.best_inliers = []
pcd = pcd.select_by_index(inliers)
#o3d.visualization.draw_geometries([pcd])
pcd = pcd.voxel_down_sample(voxel_size=settings.get_setting('plane_density_voxel_filter'))
cl, ind = pcd.remove_statistical_outlier(nb_neighbors=int(50*0.1/settings.get_setting('plane_density_voxel_filter')), std_ratio=0.1)
# pcd = pcd.voxel_down_sample(voxel_size=0.1)
# cl, ind = pcd.remove_statistical_outlier(nb_neighbors=50, std_ratio=0.1)
pcd = pcd.select_by_index(ind)
#o3d.visualization.draw_geometries([pcd])
#aux.display_inlier_outlier(pcd, ind)
#aux.display_inlier_outlier(pcd, ind)
if self.store_octree_model or self.store_point_bucket or self.store_voxel_grid_model:
self.inlier_bucket = pcd
self.inliers = np.asarray(pcd.points)
#self.inliersId = ind
self.equation = best_eq
self.centroid = np.mean(self.inliers, axis=0)
if(self.equation):
if self.equation[3] < 0:
self.equation[0] = -self.equation[0]
self.equation[1] = -self.equation[1]
self.equation[2] = -self.equation[2]
self.equation[3] = -self.equation[3]
# # Simplificação em plano xy ou plano z
# print("eq: ", self.equation)
# vec_eq = [self.equation[0], self.equation[1], self.equation[2]]
# imin = vec_eq.index(min(vec_eq))
# vec_eq[imin] = 0
# vec_eq = vec_eq / np.linalg.norm(vec_eq)
# self.equation[0], self.equation[1], self.equation[2] = vec_eq[0], vec_eq[1], vec_eq[2]
# print("nova eeq: ", self.equation)
centroid_pontos = np.mean(self.inliers, axis=0)
center_point, rot_angle, width, height, inliers_plano_desrotacionado = self.update_geometry(self.inliers)
centroid_retangulo = np.mean(inliers_plano_desrotacionado, axis=0)
dimin = np.amin([width, height])
# pcd = o3d.geometry.PointCloud()
# pcd.points = o3d.utility.Vector3dVector(self.inliers)
# mesh_frame1 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0]).translate(centroid_pontos)
# mesh_frame2 = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.5, origin=[0, 0, 0]).translate(centroid_retangulo)
# o3d.visualization.draw_geometries([mesh_frame1, mesh_frame2, pcd])
if(np.linalg.norm(centroid_pontos-centroid_retangulo)<dimin*0.3):
print("GATE DE VALIDAÇÃO DE SIMETRIA OK")
self.center2d = center_point
self.rot_angle = rot_angle
self.width = width
self.height = height
self.points_main = inliers_plano_desrotacionado
self.centroid = np.mean(self.points_main, axis=0)
valid = True
min_area_plane = settings.get_setting('min_area_plane')
# GATE DE VALIDAÇÃO DE ÁREA (Apenas se não for chão)
if np.abs(self.equation[2]) < 0.90:
if self.width * self.height < min_area_plane:
valid = False
print("PLANO NÃO PASSOU NA GATE DE VALIDAÇÃO DE ÁREA: ", self.width * self.height, " m2")
else:
print("GATE DE VALIDAÇÃO DE ÁREA OK")
# # GATE de validação de densidade
# densidade = len(self.inliersId)/(self.width*self.height)
# if not densidade > settings.get_setting('min_density'):
# valid = False
# print("PLANO NÃO PASSOU NA GATE DE VALIDAÇÃO DE DENSIDADE: ", densidade, " pontos por m2")
# else:
# print("GATE DE VALIDAÇÃO DE DENSIDADE OK ", densidade, "")
else:
print("PLANO NÃO PASSOU NA GATE DE VALIDAÇÃO DE CENTROIDE GEOMÉTRICA")
valid = False
if valid:
print("Saiu do plano: ", self.equation)
return self.equation, self.inliersId, valid
def move(self, ekf):
ekf = copy.deepcopy(ekf)
atual_loc = [ekf.x_m[0,0], ekf.x_m[1,0], 0]
atual_angulo = [0, 0, ekf.x_m[2,0]]
rotMatrix = aux.get_rotation_matrix_bti(atual_angulo)
tranlation = atual_loc
# inlin = np.dot(self.inliers, rotMatrix.T) + tranlation
# pmain = np.dot(self.points_main, rotMatrix.T) + tranlation
# cent = np.mean(inlin, axis=0)
# vec = np.dot(rotMatrix, [self.equation[0], self.equation[1], self.equation[2]])
# d = -np.sum(np.multiply(vec, cent))
# eqcerta = [vec[0], vec[1],vec[2], d]
# print("EQUAÇÃO CERTAAAAAAA: ", eqcerta)
# uv = d*np.asarray([[vec[0]], [vec[1]],[vec[2]]])
#for point in self.points_main:
# print("USANDO G: ",a_ekf.apply_g_point(ekf.x_m, np.asarray([point]).T).T)
self.inliers = np.dot(self.inliers, rotMatrix.T) + tranlation
if self.store_octree_model or self.store_point_bucket or self.store_voxel_grid_model:
self.inlier_bucket.points = o3d.utility.Vector3dVector(np.asarray(self.inliers))
if self.store_voxel_grid_model:
self.bucket_voxel_grid = pcd_to_voxel_grid(copy.deepcopy(self.inlier_bucket), 0.2)
if self.store_octree_model:
self.bucket_octree = pcd_to_octree(copy.deepcopy(self.inlier_bucket), 0.2)
if self.store_point_bucket:
t__start = timer()
self.bucket = copy.deepcopy(self.inlier_bucket)
self.t__bucket = timer() - t__start
t__start = timer()
self.bucket_pos = copy.deepcopy(self.inlier_bucket)
inliers_local = np.dot(self.inliers- tranlation, rotMatrix)
ekf_odom_x = copy.deepcopy(ekf.x_errado)
atual_loc_odom = [ekf_odom_x[0,0], ekf_odom_x[1,0], 0]
atual_angulo_odom = [0, 0, ekf_odom_x[2,0]]
rotMatrix_odom = aux.get_rotation_matrix_bti(atual_angulo_odom)
tranlation_odom = atual_loc_odom
inlier_move_odom = np.dot(np.asarray(inliers_local), rotMatrix_odom.T) + tranlation_odom
self.bucket_odom.points = o3d.utility.Vector3dVector(np.asarray(inlier_move_odom))
self.bucket_odom.colors = self.inlier_bucket.colors
self.t__bucket_debug = timer() - t__start
self.points_main = np.dot(self.points_main, rotMatrix.T)
#print('points_main antes: ', self.points_main)
self.points_main = self.points_main + tranlation
#print('points_main depois: ', self.points_main)
self.centroid = np.mean(self.inliers, axis=0)
Z = np.asarray([[self.equation[0]],[self.equation[1]],[self.equation[2]], [self.equation[3]]])
N = a_ekf.apply_g_plane(ekf.x_m, Z)
# Z2 = a_ekf.apply_h_plane(ekf.x_m, N)
# N2 = a_ekf.apply_g_plane(ekf.x_m, Z2)
# Z3 = a_ekf.apply_h_plane(ekf.x_m, N2)
# print("Z1: ", Z.T)
# print("Z2: ", Z2.T)
# print("Z3: ", Z3.T)
#print("USANDO GGGGGGGG: ", N.T)
self.equation = [N[0,0], N[1,0], N[2,0], N[3,0]]#[eqcerta[0],eqcerta[1],eqcerta[2],eqcerta[3]] # #
# if self.equation[3] < 0:
# self.equation[0] = self.equation[0]*-1
# self.equation[1] = self.equation[1]*-1
# self.equation[2] = self.equation[2]*-1
# self.equation[3] = self.equation[3]*-1
#print("EQUAÇÃO USAAAAAADAAAAA: ", self.equation)
center_point, rot_angle, width, height, inliers_plano_desrotacionado = self.update_geometry(self.points_main)
self.center2d = center_point
self.rot_angle = rot_angle
self.width = width
self.height = height
#self.points_main = inliers_plano_desrotacionado
self.centroid = np.mean(self.points_main, axis=0)
def getProrieties(self):
return {"equation": self.equation,"nPoints":self.inliers.shape[0], "color": self.color, "centroid":self.centroid,
"height": self.height, "width": self.width, "center2d": self.center2d, "rot_angle":self.rot_angle}
def get_height(self, ground_normal):
pts_Z = aux.rodrigues_rot(self.points_main, ground_normal, [0,0,1])
center_Z = aux.rodrigues_rot(self.points_main[4], ground_normal, [0,0,1])[0]
centered_pts_Z = pts_Z[:, 2] - center_Z[2]
height = np.max(centered_pts_Z) - np.min(centered_pts_Z)
return height
def get_geometry(self):
center_point = np.asarray([self.center2d[0], self.center2d[1], 0])
dep = 0.1
mesh_box = o3d.geometry.TriangleMesh.create_box(width=self.width, height=self.height, depth=dep)
mesh_box = mesh_box.translate(np.asarray([-self.width/2, -self.height/2, -dep/2]))
mesh_box = mesh_box.rotate(aux.get_rotation_matrix_bti([0, 0, self.rot_angle]), center=np.asarray([0, 0, 0]))
mesh_box.compute_vertex_normals()
mesh_box.paint_uniform_color(self.color)
# center the box on the frame
# move to the plane location
mesh_box = mesh_box.translate(np.asarray(center_point))
mesh_box = mesh_box.translate(np.asarray([0, 0, -self.equation[3]]))
#mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1, origin=[0, 0, 0])
#o3d.visualization.draw_geometries([mesh_frame, mesh_box])
mesh_box = mesh_box.rotate(aux.get_rotationMatrix_from_vectors([0, 0, 1], [self.equation[0], self.equation[1], self.equation[2]]), center=np.asarray([0, 0, 0]))
#pcd = o3d.geometry.PointCloud()
#pcd.points = o3d.utility.Vector3dVector(inliers_plano_desrotacionado)
# pcd.voxel_down_sample(voxel_size=0.1)
#pcd.paint_uniform_color(self.color)
#obb = pcd.get_oriented_bounding_box()
#obb.color = (self.color[0], self.color[1], self.color[2])
# estimate radius for rolling ball
#o3d.visualization.draw_geometries([pcd, mesh_box])
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(self.points_main)
return [mesh_box, pcd]
def get_octree(self, depth=5, expantion=0):
octree = o3d.geometry.Octree(max_depth=depth)
octree.convert_from_point_cloud(copy.deepcopy(self.bucket), size_expand=expantion)
return octree
def getVoxelStructure(self, voxel_size=0.2):
return o3d.geometry.VoxelGrid.create_from_point_cloud(copy.deepcopy(self.bucket), voxel_size=voxel_size)
def append_plane(self, plano, neweq = [], nvezes=0, is_cobaia = False):
#print("Shape antes de append: "+str(self.inliers.shape[0]))
# #print("Shape depois de append: "+str(self.inliers.shape[0]))
# centroid_pontos = np.mean(points, axis=0)
# center_point, rot_angle, width, height, inliers_plano_desrotacionado = self.update_geometry(points)
# centroid_retangulo = np.mean(inliers_plano_desrotacionado, axis=0)
# dimin = np.amin([width, height])
# if(np.linalg.norm(centroid_pontos-centroid_retangulo)<dimin*0.1):
plano = copy.deepcopy(plano)
neweq = copy.deepcopy(neweq)
usa_media = False
points = plano.feat.points_main
if is_cobaia == False:
if self.store_voxel_grid_model:
pcd_voxel_grid = voxel_grid_to_pcd(copy.deepcopy(self.bucket_voxel_grid), 10)
corrected_points = aux.projected_point_into_plane(np.append(pcd_voxel_grid.points, plano.feat.inlier_bucket.points, axis=0), self.equation)
pcd_voxel_grid.points = o3d.utility.Vector3dVector(corrected_points)
pcd_voxel_grid.colors = o3d.utility.Vector3dVector(np.append(pcd_voxel_grid.colors, plano.feat.inlier_bucket.colors, axis=0))
self.bucket_voxel_grid = pcd_to_voxel_grid(pcd_voxel_grid, 0.2)
if self.store_octree_model:
pcd_octree = octree_to_pcd(copy.deepcopy(self.bucket_octree), 3)
corrected_points = aux.projected_point_into_plane(np.append(pcd_octree.points, plano.feat.inlier_bucket.points, axis=0), self.equation)
pcd_octree.points = o3d.utility.Vector3dVector(corrected_points)
pcd_octree.colors = o3d.utility.Vector3dVector(np.append(pcd_octree.colors, plano.feat.inlier_bucket.colors, axis=0))
self.bucket_octree = pcd_to_octree(pcd_octree, 0.2)
# Add points to point bucket
if self.store_point_bucket:
t__start = timer()
self.bucket_pos.points = o3d.utility.Vector3dVector(np.append(self.bucket_pos.points, plano.feat.inlier_bucket.points, axis=0))
self.bucket_pos.colors = o3d.utility.Vector3dVector(np.append(self.bucket_pos.colors, plano.feat.inlier_bucket.colors, axis=0))
self.bucket_pos = self.bucket_pos.voxel_down_sample(voxel_size=settings.get_setting('plane_density_voxel_filter'))
self.t__bucket_debug = timer() - t__start
t__start = timer()
corrected_points = aux.projected_point_into_plane(np.append(self.bucket.points, plano.feat.inlier_bucket.points, axis=0), self.equation)
self.bucket.points = o3d.utility.Vector3dVector(corrected_points)
self.bucket.colors = o3d.utility.Vector3dVector(np.append(self.bucket.colors, plano.feat.inlier_bucket.colors, axis=0))
self.bucket = self.bucket.voxel_down_sample(voxel_size=settings.get_setting('plane_density_voxel_filter'))
self.t__bucket = timer() - t__start
t__start = timer()
self.bucket_odom.points = o3d.utility.Vector3dVector(np.append(self.bucket_odom.points, plano.feat.bucket_odom.points, axis=0))
self.bucket_odom.colors = o3d.utility.Vector3dVector(np.append(self.bucket_odom.colors, plano.feat.bucket_odom.colors, axis=0))
self.bucket_odom = self.bucket_odom.voxel_down_sample(voxel_size=settings.get_setting('plane_density_voxel_filter'))
self.t__bucket_debug = timer() - t__start + self.t__bucket_debug
if(usa_media):
eqplano2 = plano.feat.equation
nvezes_plano2 = plano.running_geo["total"]
eqplano1 = copy.deepcopy(self.equation)
# nova equação do plano:
# Média ponderada entre o o número de vezes já detectado e da área de cada plano
# print('eqplano1: ', eqplano1, ' nvezes: ', nvezes+1)
# print('eqplano2: ', eqplano2, 'nvezes_plano2: ', nvezes_plano2)
area1 = self.width*self.height
area2 = plano.feat.width*plano.feat.height
self.equation = (np.asarray(eqplano1)*nvezes*area1 + np.asarray(eqplano2)*nvezes_plano2*area2)/((nvezes*area1+nvezes_plano2*area2))
#print("JUNTANDO AS EQUAÇÃO TUDO: ",self.equation)
# Muda os dois planos para essa orientação e posição:
#self.points_main = aux.rodrigues_rot(self.points_main, [eqplano1[0], eqplano1[1], eqplano1[2]], [self.equation[0], self.equation[1], self.equation[2]])
#points = aux.rodrigues_rot(points, [eqplano2[0], eqplano2[1], eqplano2[2]], [self.equation[0], self.equation[1], self.equation[2]])
else:
self.equation = neweq
provisorio = copy.deepcopy(np.append(self.points_main, points, axis=0))
center_point, rot_angle, width, height, inliers_plano_desrotacionado = self.update_geometry(provisorio)
self.center2d = center_point
self.rot_angle = rot_angle
self.width = width
self.height = height
self.points_main = inliers_plano_desrotacionado
centroidantes = self.centroid
self.centroid = np.mean(self.points_main, axis=0)
centroiddepois = self.centroid
#print("DIFERENÇA DE CENTROIDES: ", np.linalg.norm(centroidantes-centroiddepois))
discentnormal = np.dot((centroidantes-centroiddepois),np.asarray([self.equation[0], self.equation[1], self.equation[2]]))
# O que me interessa mesmo aqui é mudança da centroide mas em direção a normal do plano. Não tem problema a centroide mudar na direção da superfície do plano
#print("DIFERENÇA DE CENTROIDES na direção do plano: ",discentnormal)
if(np.abs(discentnormal) > 0.8):
self.color = (1, 0, 0)
return False
return True
# else:
# return False
def update_geometry(self, points):
# Encontra parâmetros do semi-plano
inlier_planez = points
# Encontra representação 2d da projeção na normal do plano
inliers_plano = aux.rodrigues_rot(copy.deepcopy(inlier_planez), [self.equation[0], self.equation[1], self.equation[2]], [0, 0, 1])- np.asarray([0, 0, -self.equation[3]])
dd_plano = np.delete(inliers_plano, 2, 1)
# Fita retângulo de menor área
# print('dd_plano: ',dd_plano.shape)
# filename = 'pontos.pckl'
# outfile = open(filename,'wb')
# pickle.dump(dd_plano,outfile)
# outfile.close()
hull_points = qhull2D(dd_plano)
hull_points = hull_points[::-1]
(rot_angle, area, width, height, center_point, corner_points) = minBoundingRect(hull_points)
# Volta pro espaço 3D
p = np.vstack((np.asarray(corner_points), np.asarray(center_point)))
ddd_plano= np.c_[ p, np.zeros(p.shape[0]) ] + np.asarray([0, 0, -self.equation[3]])
inliers_plano_desrotacionado = aux.rodrigues_rot(ddd_plano, [0, 0, 1], [self.equation[0], self.equation[1], self.equation[2]])
return center_point, rot_angle, width, height, inliers_plano_desrotacionado
# # Load saved point cloud and visualize it
# pcd_load = o3d.io.read_point_cloud("caixa.ply")
# #o3d.visualization.draw_geometries([pcd_load])
# points = np.asarray(pcd_load.points)
# plano1 = Plane()
# best_eq, best_inliers = plano1.findPlane(points, 0.01)
# plane = pcd_load.select_by_index(best_inliers).paint_uniform_color([1, 0, 0])
# obb = plane.get_oriented_bounding_box()
# obb2 = plane.get_axis_aligned_bounding_box()
# obb.color = [0, 0, 1]
# obb2.color = [0, 1, 0]
# not_plane = pcd_load.select_by_index(best_inliers, invert=True)
# #mesh = o3d.geometry.TriangleMesh.create_coordinate_frame(origin=[0, 0, 0])
# o3d.visualization.draw_geometries([not_plane, plane, obb, obb2])
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains definitions for tags in Artella
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "tpovedatd@gmail.com"
import ast
import string
import tpDcc as tp
import artellapipe.register
class ArtellaTagNode(object):
def __init__(self, project, node, tag_info=None):
super(ArtellaTagNode, self).__init__()
self._project = project
self._node = node
self._tag_info_dict = tag_info
if tag_info:
self._tag_info_dict = ast.literal_eval(tag_info)
short_node = tp.Dcc.node_short_name(node)
if short_node in self._tag_info_dict.keys():
self._tag_info_dict = self._tag_info_dict[short_node]
else:
short_node_strip = short_node.rstrip(string.digits)
if short_node_strip in self._tag_info_dict.keys():
self._tag_info_dict = self._tag_info_dict[short_node_strip]
@property
def node(self):
"""
Returns linked to the tag node
:return: str
"""
return self._node
@property
def tag_info(self):
"""
Returns tag info data stored in this node
:return: dict
"""
return self._tag_info_dict
def get_clean_node(self):
"""
Returns current node with the short name and with ids removed
:return: str
"""
return tp.Dcc.node_short_name(self._node).rstrip(string.digits)
def get_asset_node(self):
"""
Returns asset node linked to this tag node
:return: ArtellaAssetNode
"""
if not self._node or not tp.Dcc.object_exists(self._node):
return None
if self._tag_info_dict:
return artellapipe.AssetsMgr().get_asset_node_in_scene(node_id=self._node)
else:
if not tp.Dcc.attribute_exists(
node=self._node, attribute_name=artellapipe.TagsMgr().TagDefinitions.NODE_ATTRIBUTE_NAME):
return None
connections = tp.Dcc.list_connections(
node=self._node, attribute_name=artellapipe.TagsMgr().TagDefinitions.NODE_ATTRIBUTE_NAME)
if connections:
node = connections[0]
return artellapipe.AssetsMgr().get_asset_node_in_scene(node_id=node)
return None
def get_tag_type(self):
"""
Returns the type of the tag
:return: str
"""
return self._get_attribute(attribute_name=artellapipe.TagsMgr().TagDefinitions.TAG_TYPE_ATTRIBUTE_NAME)
def _get_attribute(self, attribute_name):
"""
Internal function that retrieves attribute from wrapped TagData node
:param attribute_name: str, attribute name to retrieve from TagData node
:return: variant
"""
if self._tag_info_dict:
return self._tag_info_dict.get(attribute_name)
else:
if not self._node or not tp.Dcc.object_exists(self._node):
return None
if not tp.Dcc.attribute_exists(node=self._node, attribute_name=attribute_name):
return None
return tp.Dcc.get_attribute_value(node=self._node, attribute_name=attribute_name)
artellapipe.register.register_class('TagNode', ArtellaTagNode)
|
nilq/baby-python
|
python
|
#!/bin/env python
#
# Copyright (C) 2014 eNovance SAS <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import config
from utils import Base
from utils import ManageSfUtils
from utils import skipIfIssueTrackerMissing
from pysflib.sfgerrit import GerritUtils
from requests.auth import HTTPBasicAuth
from requests.exceptions import HTTPError
import requests
class TestGateway(Base):
def _auth_required(self, url):
resp = requests.get(url, allow_redirects=False)
self.assertEqual(resp.status_code, 307)
self.assertTrue("/auth/login" in resp.headers['Location'])
@skipIfIssueTrackerMissing()
def test_redmine_root_url_for_404(self):
""" Test if redmine yield RoutingError
"""
url = "%s/redmine/" % config.GATEWAY_URL
for i in xrange(11):
resp = requests.get(url)
self.assertNotEquals(resp.status_code, 404)
def _url_is_not_world_readable(self, url):
"""Utility function to make sure a url is not accessible"""
resp = requests.get(url)
self.assertTrue(resp.status_code > 399, resp.status_code)
def test_managesf_is_secure(self):
"""Test if managesf config.py file is not world readable"""
url = "%s/managesf/config.py" % config.GATEWAY_URL
self._url_is_not_world_readable(url)
def test_cauth_is_secure(self):
"""Test if managesf config.py file is not world readable"""
url = "%s/cauth/config.py" % config.GATEWAY_URL
self._url_is_not_world_readable(url)
@skipIfIssueTrackerMissing()
# TODO(XXX) this is not up to date and can change with config
def test_topmenu_links_shown(self):
""" Test if all service links are shown in topmenu
"""
subpaths = ["/r/", "/jenkins/", "/redmine/",
"/zuul/", "/etherpad/", "/paste/", "/docs/"]
url = config.GATEWAY_URL + "/topmenu.html"
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
for subpath in subpaths:
self.assertTrue(('href="%s"' % subpath) in resp.text)
def test_gerrit_accessible(self):
""" Test if Gerrit is accessible on gateway hosts
"""
# Unauthenticated calls
urls = [config.GATEWAY_URL + "/r/",
config.GATEWAY_URL + "/r/#/"]
for url in urls:
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Gerrit Code Review</title>' in resp.text)
# URL that requires login - shows login page
url = config.GATEWAY_URL + "/r/a/projects/?"
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue('form-signin' in resp.text)
# Authenticated URL that requires login
url = config.GATEWAY_URL + "/r/a/projects/?"
self._auth_required(url)
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
# /r/a/projects returns JSON list of projects
self.assertTrue('All-Users' in resp.text)
def test_gerrit_projectnames(self):
""" Test if projectnames similar to LocationMatch settings work
"""
# Unauthenticated calls, unknown projects. Must return 404, not 30x
urls = [config.GATEWAY_URL + "/r/dashboard",
config.GATEWAY_URL + "/r/grafana",
config.GATEWAY_URL + "/r/jenkinslogs"]
for url in urls:
resp = requests.get(url, allow_redirects=False)
self.assertEqual(resp.status_code, 404)
def test_gerrit_api_accessible(self):
""" Test if Gerrit API is accessible on gateway hosts
"""
m = ManageSfUtils(config.GATEWAY_URL)
url = config.GATEWAY_URL + "/api/"
a = GerritUtils(url)
a.g.url = "%s/" % a.g.url.rstrip('a/')
self.assertRaises(HTTPError, a.get_account, config.USER_1)
api_passwd = m.create_gerrit_api_password(config.USER_1)
auth = HTTPBasicAuth(config.USER_1, api_passwd)
a = GerritUtils(url, auth=auth)
self.assertTrue(a.get_account(config.USER_1))
m.delete_gerrit_api_password(config.USER_1)
a = GerritUtils(url, auth=auth)
self.assertRaises(HTTPError, a.get_account, config.USER_1)
a = GerritUtils(url)
a.g.url = "%s/" % a.g.url.rstrip('a/')
self.assertRaises(HTTPError, a.get_account, 'john')
def test_jenkins_accessible(self):
""" Test if Jenkins is accessible on gateway host
"""
url = config.GATEWAY_URL + "/jenkins/"
# Without SSO cookie. Note that auth is no longer enforced
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Dashboard [Jenkins]</title>' in resp.text)
# With SSO cookie
resp = requests.get(
url, cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Dashboard [Jenkins]</title>' in resp.text)
# User should be known in Jenkins if logged in with SSO
self.assertTrue(config.USER_1 in resp.text)
def test_zuul_accessible(self):
""" Test if Zuul is accessible on gateway host
"""
url = config.GATEWAY_URL + "/zuul/"
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Zuul Status</title>' in resp.text)
@skipIfIssueTrackerMissing()
def test_redmine_accessible(self):
""" Test if Redmine is accessible on gateway host
"""
url = config.GATEWAY_URL + "/redmine/"
# Without SSO cookie. Note that auth is no longer enforced
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Redmine</title>' in resp.text)
# With SSO cookie
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>Redmine</title>' in resp.text)
# User should be known in Redmine if logged in with SSO
self.assertTrue(config.USER_1 in resp.text)
# Check one of the CSS files to ensure static files are accessible
css_file = "plugin_assets/redmine_backlogs/stylesheets/global.css"
url = config.GATEWAY_URL + "/redmine/%s" % css_file
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('GLOBAL' in resp.text)
def test_etherpad_accessible(self):
""" Test if Etherpad is accessible on gateway host
"""
url = config.GATEWAY_URL + "/etherpad/"
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>SF - Etherpad</title>' in resp.text)
def test_paste_accessible(self):
""" Test if Paste is accessible on gateway host
"""
url = config.GATEWAY_URL + "/paste/"
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<title>New Paste | LodgeIt!</title>' in resp.text)
def test_css_js_for_topmenu_accessible(self):
""" Test if css/js for topmenu are accessible on gateway host
"""
url = config.GATEWAY_URL + "/static/js/jquery.min.js"
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue("jQuery v2.1.1" in resp.content)
paths = ('js/bootstrap.min.js', 'css/bootstrap.min.css')
for p in paths:
url = config.GATEWAY_URL + "/static/bootstrap/%s" % p
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue("Bootstrap v3.2.0" in resp.content)
def test_static_dir_for_paste_accessible(self):
""" Test if static dir for paste is accessible on gateway host
"""
url = config.GATEWAY_URL + "/static/lodgeit/jquery.js"
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
self.assertTrue("jQuery 1.2.6" in resp.content)
def test_docs_accessible(self):
""" Test if Sphinx docs are accessible on gateway host
"""
url = config.GATEWAY_URL + "/docs/index.html"
resp = requests.get(url)
self.assertEqual(resp.status_code, 200)
def test_dashboard_accessible(self):
""" Test if Dashboard is accessible on gateway host
"""
url = config.GATEWAY_URL + "/dashboard/"
self._auth_required(url)
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
self.assertTrue('<body ng-controller="mainController">' in resp.text)
def test_jenkinslogs_accessible(self):
""" Test if Jenkins logs are accessible on gateway host
"""
url = "http://%s/jenkinslogs/127.0.0.1/dashboard/" % (
config.GATEWAY_HOST)
resp = requests.get(url, allow_redirects=False)
self.assertEqual(resp.status_code, 307)
self._auth_required(url)
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 200)
url = "http://%s/jenkinslogs/127.0.0.2/dashboard/" % (
config.GATEWAY_HOST)
resp = requests.get(
url,
cookies=dict(
auth_pubtkt=config.USERS[config.USER_1]['auth_cookie']))
self.assertEqual(resp.status_code, 404)
|
nilq/baby-python
|
python
|
import argparse
from transcribeUtils import *
from webvttUtils import *
import requests
from videoUtils import *
from audioUtils import *
# Get the command line arguments and parse them
parser = argparse.ArgumentParser(
prog="testWebVTT.py",
description="Process a video found in the input file, process it, and write tit out to the output file",
)
parser.add_argument(
"-region", required=True, help="The AWS region containing the S3 buckets"
)
parser.add_argument(
"-inbucket", required=True, help="The S3 bucket containing the input file"
)
parser.add_argument("-infile", required=True, help="The input file to process")
parser.add_argument(
"-outbucket", required=True, help="The S3 bucket containing the input file"
)
parser.add_argument(
"-outfilename", required=True, help="The file name without the extension"
)
parser.add_argument(
"-outfiletype", required=True, help="The output file type. E.g. mp4, mov"
)
parser.add_argument(
"-outlang",
required=True,
nargs="+",
help="The language codes for the desired output. E.g. en = English, de = German",
)
parser.add_argument(
"-TranscriptJob",
required=True,
help="The URI resulting from the transcript job",
)
args = parser.parse_args()
job = getTranscriptionJobStatus(args.TranscriptJob)
# print( job )
# Now get the transcript JSON from AWS Transcribe
transcript = getTranscript(
str(job["TranscriptionJob"]["Transcript"]["TranscriptFileUri"])
)
# print( "\n==> Transcript: \n" + transcript)
# Create the WebVTT File for the original transcript and write it out.
writeTranscriptToWebVTT(transcript, "en", "subtitles-en.vtt")
# createVideo( args.infile, "subtitles-en.vtt", args.outfilename + "-en." + args.outfiletype, "audio-en.mp3", True)
# Now write out the translation to the transcript for each of the target languages
for lang in args.outlang:
writeTranslationToWebVTT(
transcript, "en", lang, "subtitles-" + lang + ".vtt"
)
# Now that we have the subtitle files, let's create the audio track
# createAudioTrackFromTranslation( args.region, transcript, 'en', lang, "audio-" + lang + ".mp3" )
# Finally, create the composited video
# createVideo( args.infile, "subtitles-" + lang + ".WebVTT", args.outfilename + "-" + lang + "." + args.outfiletype, "audio-" + lang + ".mp3", False)
|
nilq/baby-python
|
python
|
from basis.setting import PERIODS
from basis.assistant import getID
import progressbar
ALL_PERIODS = []
for i in range(len(PERIODS)-1):
ALL_PERIODS.append({"from":{"hour":PERIODS[i][0],"minute":PERIODS[i][1]},"to":{"hour":PERIODS[i+1][0],"minute":PERIODS[i+1][1]}})
def getperiodsIndex(all_periods):
time_dics = {}
for i,period in enumerate(all_periods):
for hour in range(period["from"]["hour"],period["to"]["hour"]+1):
min_minute,max_minute = 0,60
if hour == period["from"]["hour"]: min_minute = period["from"]["minute"]
if hour == period["to"]["hour"]: max_minute = period["to"]["minute"]
for minute in range(min_minute,max_minute):
time_dics[getID(hour,minute)] = i
return time_dics
def getMinutes(all_periods):
for i,period in enumerate(all_periods):
period["minutes"] = (period["to"]["hour"]-period["from"]["hour"])*60 + period["to"]["minute"] - period["from"]["minute"]
return all_periods
ALL_PERIODS = getMinutes(ALL_PERIODS)
TIME_DICS = getperiodsIndex(ALL_PERIODS)
|
nilq/baby-python
|
python
|
import logging
logging.basicConfig()
logger = logging.getLogger('led_detection')
logger.setLevel(logging.DEBUG)
from .api import *
from .unit_tests import *
from .algorithms import *
|
nilq/baby-python
|
python
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
def transform(logdata):
headers = logdata['httpRequest']['headers']
if len(headers) > 0:
logdata['httpRequest']['header'] = {}
for header in headers:
key = header['name'].lower().replace('-', '_')
logdata['httpRequest']['header'][key] = header['value']
if key == 'host':
logdata['url']['domain'] = header['value']
elif key == 'user_agent':
logdata['user_agent'] = {}
logdata['user_agent']['original'] = header['value']
elif key == 'referer':
logdata['http']['request']['referrer'] = header['value']
elif key == 'authorization':
del logdata['httpRequest']['header'][key]
try:
# WAFv2
logdata['rule']['ruleset'] = logdata['webaclId'].split('/')[2]
region_type = logdata['webaclId'].split(':')[5].split('/')[0]
if region_type == 'global':
logdata['cloud']['region'] = 'global'
else:
logdata['cloud']['region'] = logdata['webaclId'].split(':')[3]
logdata['cloud']['account'] = {'id': logdata['webaclId'].split(':')[4]}
except IndexError:
# WAFv1
logdata['rule']['ruleset'] = logdata['webaclId']
logdata['http']['version'] = (logdata['httpRequest']
['httpVersion'].split('/')[1])
# action = logdata.get('action')
# if 'ALLOW' in action:
# logdata['event']['outcome'] = 'success'
# elif 'BLOCK' in action:
# logdata['event']['outcome'] = 'failure'
# else:
# logdata['event']['outcome'] = 'unknown'
return logdata
|
nilq/baby-python
|
python
|
from django.contrib import admin
from app.models import UserProfileInfo
admin.site.register(UserProfileInfo)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Tag'
db.create_table(u'tagging_tag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True, blank=True)),
))
db.send_create_signal(u'tagging', ['Tag'])
# Adding unique constraint on 'Tag', fields ['title', 'site']
db.create_unique(u'tagging_tag', ['title', 'site_id'])
# Adding model 'ContentObjectTag'
db.create_table(u'tagging_contentobjecttag', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('content_type', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_type_set_for_contentobjecttag', to=orm['contenttypes.ContentType'])),
('object_pk', self.gf('django.db.models.fields.PositiveIntegerField')()),
('site', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'])),
('tag', self.gf('django.db.models.fields.related.ForeignKey')(related_name='content_object_tags', to=orm['tagging.Tag'])),
))
db.send_create_signal(u'tagging', ['ContentObjectTag'])
def backwards(self, orm):
# Removing unique constraint on 'Tag', fields ['title', 'site']
db.delete_unique(u'tagging_tag', ['title', 'site_id'])
# Deleting model 'Tag'
db.delete_table(u'tagging_tag')
# Deleting model 'ContentObjectTag'
db.delete_table(u'tagging_contentobjecttag')
models = {
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "(u'domain',)", 'object_name': 'Site', 'db_table': "u'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tagging.contentobjecttag': {
'Meta': {'object_name': 'ContentObjectTag'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_contentobjecttag'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.PositiveIntegerField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_object_tags'", 'to': u"orm['tagging.Tag']"})
},
u'tagging.tag': {
'Meta': {'unique_together': "[('title', 'site')]", 'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'})
}
}
complete_apps = ['tagging']
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: khmer-project@idyll.org
import sys
import khmer
import os
try:
import matplotlib
matplotlib.use('Agg')
from pylab import *
except ImportError:
pass
def main():
hashfile = sys.argv[1]
filename = sys.argv[2]
figure = sys.argv[3]
ht = khmer.load_countgraph(hashfile)
outabund = open(os.path.basename(filename) + '.counts', 'w')
counts = []
d = {}
for sequence in open(sys.argv[2]):
sequence = sequence.strip()
count = ht.get(sequence)
counts.append(count)
d[count] = d.get(count, 0) + 1
if count > 1000:
print(sequence, count, file=outabund)
outfp = open(figure + '.countshist', 'w')
sofar = 0
sofar_cumu = 0
for k in sorted(d.keys()):
sofar += d[k]
sofar_cumu += k * d[k]
print(k, d[k], sofar, sofar_cumu, file=outfp)
hist(counts, normed=True, cumulative=True, bins=100, range=(1, 1000))
savefig(figure)
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
import re
from isic_archive.models.dataset_helpers import matchFilenameRegex
def assertMatch(originalFilename, csvFilename):
"""Assert that the filename in the CSV matches the original filename."""
regex = matchFilenameRegex(csvFilename)
assert re.match(regex, originalFilename) is not None
def assertNotMatch(originalFilename, csvFilename):
"""Assert that the filename in the CSV doesn't match the original filename."""
regex = matchFilenameRegex(csvFilename)
assert re.match(regex, originalFilename) is None
def testMatchFilenameRegex():
"""
Test matchFilenameRegex.
The matchFilenameRegex function generates a regular expression to match image
filenames in a metadata CSV file to original image filenames in the database.
"""
originalFilename = 'ABC-6D.JPG'
assertMatch(originalFilename, 'ABC-6D')
assertMatch(originalFilename, 'ABC-6D.JPG')
assertMatch(originalFilename, 'ABC-6D.jpg')
assertMatch(originalFilename, 'abc-6D.jpg')
assertMatch(originalFilename, 'abc-6d.jpg')
assertNotMatch(originalFilename, 'ABC-6D.png')
assertNotMatch(originalFilename, 'ABC-6D.PNG')
originalFilename = '20010425124238356.jpg'
assertMatch(originalFilename, '20010425124238356')
assertMatch(originalFilename, '20010425124238356.jpg')
assertMatch(originalFilename, '20010425124238356.JPG')
assertNotMatch(originalFilename, '20010425124238356.png')
assertNotMatch(originalFilename, '20010425124238356.PNG')
originalFilename = 'AbcDef00598.jpg'
assertMatch(originalFilename, 'AbcDef00598')
assertMatch(originalFilename, 'AbcDef00598.jpg')
assertMatch(originalFilename, 'AbcDef00598.JPG')
assertMatch(originalFilename, 'abcdef00598.JPG')
assertNotMatch(originalFilename, 'AbcDef00598.png')
assertNotMatch(originalFilename, 'AbcDef00598.PNG')
originalFilename = 'test-20010425124238356.jpg'
assertMatch(originalFilename, 'test-20010425124238356')
assertMatch(originalFilename, 'test-20010425124238356.jpg')
assertMatch(originalFilename, 'TEST-20010425124238356.jpg')
assertMatch(originalFilename, 'TEST-20010425124238356.JPG')
assertNotMatch(originalFilename, 'TEST-20010425124238356.png')
assertNotMatch(originalFilename, 'TEST-20010425124238356.PNG')
originalFilename = 'AEOU3014, (20020901020318037) 20010425124238356.jpg'
assertMatch(originalFilename, 'AEOU3014, (20020901020318037) 20010425124238356')
assertMatch(originalFilename, 'AEOU3014, (20020901020318037) 20010425124238356.jpg')
assertMatch(originalFilename, 'AEOU3014, (20020901020318037) 20010425124238356.JPG')
assertMatch(originalFilename, 'aeou3014, (20020901020318037) 20010425124238356.JPG')
assertMatch(originalFilename, 'aeou3014, (20020901020318037) 20010425124238356.jpg')
assertNotMatch(originalFilename, 'AEOU3014, (20020901020318037) 20010425124238356.png')
assertNotMatch(originalFilename, 'AEOU3014, (20020901020318037) 20010425124238356.PNG')
originalFilename = '20020901020318037_30445187_2002-0901_Null_ 001.jpg'
assertMatch(originalFilename, '20020901020318037_30445187_2002-0901_Null_ 001')
assertMatch(originalFilename, '20020901020318037_30445187_2002-0901_Null_ 001.jpg')
assertMatch(originalFilename, '20020901020318037_30445187_2002-0901_Null_ 001.JPG')
assertMatch(originalFilename, '20020901020318037_30445187_2002-0901_NULL_ 001.jpg')
assertMatch(originalFilename, '20020901020318037_30445187_2002-0901_NULL_ 001.JPG')
assertNotMatch(originalFilename, '20020901020318037_30445187_2002-0901_NULL_ 001.png')
assertNotMatch(originalFilename, '20020901020318037_30445187_2002-0901_NULL_ 001.PNG')
# Filename that contains a period
originalFilename = 'test.315704d.jpg'
assertMatch(originalFilename, 'test.315704d')
assertMatch(originalFilename, 'test.315704d.jpg')
assertNotMatch(originalFilename, 'test.315704d.PNG')
# Filename that contains multiple periods
originalFilename = 'test.315704d.4e95e3d.png'
assertMatch(originalFilename, 'test.315704d.4e95e3d')
assertMatch(originalFilename, 'test.315704d.4e95e3d.png')
assertNotMatch(originalFilename, 'test.315704d')
assertNotMatch(originalFilename, 'test.315704d.4e95e3d.')
assertNotMatch(originalFilename, 'test.315704d.4e95e3d.jpg')
|
nilq/baby-python
|
python
|
# ------------------------------------------------------------------------------
# pose.pytorch
# Copyright (c) 2018-present Microsoft
# Licensed under The Apache-2.0 License [see LICENSE for details]
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision
import _init_paths
from config import cfg
from config import update_config
from core.loss import JointsMSELoss
from core.function import validate
from utils.utils import create_logger
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog
from models.pose_hrnet import get_pose_net
from dataset.coco_realtime import COCODataset
from utils.vis import save_batch_heatmaps
import cv2
import glob
import os
import time
def parse_args():
parser = argparse.ArgumentParser(description='Train keypoints network')
# general
parser.add_argument('--cfg',
help='experiment configure file name',
default='experiments/coco/hrnet/w32_256x192_adam_lr1e-3.yaml',
required=True,
type=str)
parser.add_argument('opts',
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='models/pytorch/pose_coco')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--prevModelDir',
help='prev Model directory',
type=str,
default='')
args = parser.parse_args()
return args
def detectron_validate(config, val_dataset, model, criterion, output_dir,
tb_log_dir, writer_dict=None):
# switch to evaluate mode
model.eval()
with torch.no_grad():
outputs = model(val_dataset.cuda())
def detectron_save_image(crop, model, criterion, final_output_dir):
model.eval()
with torch.no_grad():
outputs = model(crop.cuda())
grid_img = torchvision.utils.make_grid(crop, padding=0)
prefix = '{}_{:05d}'.format(
os.path.join(final_output_dir, 'val'), criterion
)
torchvision.utils.save_image(grid_img, prefix + '_im.jpg', normalize=True)
save_batch_heatmaps(crop, outputs, prefix + '_heat.jpg')
def main():
args = parse_args()
update_config(cfg, args)
logger, final_output_dir, tb_log_dir = create_logger(
cfg, args.cfg, 'valid')
logger.info(pprint.pformat(args))
logger.info(cfg)
# cudnn related setting
cudnn.benchmark = cfg.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = cfg.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = cfg.CUDNN.ENABLED
# Create config
detection_cfg = get_cfg()
detection_cfg.DATASETS.TRAIN = (os.getcwd() + "/data/coco/images/train2017",)
detection_cfg.DATASETS.TEST = (os.getcwd() + "../data/coco/images/val2017",)
detection_cfg.merge_from_file("../detectron2/configs/COCO-Detection/faster_rcnn_R_50_FPN_3x.yaml")
detection_cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
detection_cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_50_FPN_3x/137849458/model_final_280758.pkl"
# Create predictor
predictor = DefaultPredictor(detection_cfg)
# Create detector
model = get_pose_net(cfg, is_train=False)
'''
model = eval('models.' + cfg.MODEL.NAME + '.get_pose_net')(
cfg, is_train=False
)
'''
# print(model)
if cfg.TEST.MODEL_FILE:
logger.info('=> loading model from {}'.format(cfg.TEST.MODEL_FILE))
model.load_state_dict(torch.load(cfg.TEST.MODEL_FILE), strict=False)
else:
model_state_file = os.path.join(
final_output_dir, 'final_state.pth'
)
logger.info('=> loading model from {}'.format(model_state_file))
model.load_state_dict(torch.load(model_state_file))
model = torch.nn.DataParallel(model, device_ids=cfg.GPUS).cuda()
# define loss function (criterion) and optimizer
criterion = JointsMSELoss(
use_target_weight=cfg.LOSS.USE_TARGET_WEIGHT
).cuda()
test_list = glob.glob("{}/{}/*".format(os.getcwd(), '/data/coco/images/val2017'))
tic = time.time()
total_image = len(test_list)
total_person = 0
detect_time = 0
estimate_time = 0
for i in range(len(test_list)):
inputs = cv2.imread(test_list[i])
det_start = time.time()
outputs = predictor(inputs)
detect_time = detect_time + time.time() - det_start
human_boxes = outputs['instances'].pred_boxes[outputs['instances'].pred_classes == 0]
# human_boxes = [i for i in human_boxes if abs(int(boxes[i, 1])-int(boxes[i, 3])) * abs(int(boxes[i, 0])-int(boxes[i, 2])) >= 32*32]
boxes = human_boxes.tensor
total_person = total_person + boxes.shape[0]
if boxes.shape[0] > 0:
for j in range(boxes.shape[0]):
cropped_img = cv2.resize(inputs[int(boxes[j, 1]): int(boxes[j, 3]),
int(boxes[j, 0]): int(boxes[j, 2])], dsize=(192, 256))
if j is 0:
crop = torch.unsqueeze(torch.from_numpy(cropped_img), 0)
else:
crop = torch.cat((crop, torch.unsqueeze(torch.from_numpy(cropped_img), 0)), 0)
crop = torch.transpose(torch.transpose(crop, -1, -2), -2, -3).float() # NCHW
crop = ((crop/255.) - torch.tensor([[[[0.485]],[[0.456]],[[0.406]]]]))/torch.tensor([[[[0.229]],[[0.224]],[[0.225]]]])
est_start = time.time()
detectron_validate(cfg, crop, model, criterion,
final_output_dir, tb_log_dir)
estimate_time = estimate_time + time.time() - est_start
detectron_save_image(crop, model, i, final_output_dir)
else:
total_image -= 1
total_time = time.time()-tic
print('-[only detection]-')
print('[*] Total elapsed time: {}'.format(detect_time))
print('[*] image per second: {}'.format(total_image / detect_time))
print('[*] person per second: {}'.format(total_person / detect_time))
print('--[only estimation]-')
print('[*] Total elapsed time: {}'.format(estimate_time))
print('[*] image per second: {}'.format(total_image / estimate_time))
print('[*] person per second: {}'.format(total_person / estimate_time))
print('--[detection+estimation]-')
print('[*] Total elapsed time: {}'.format(total_time))
print('[*] image per second: {}'.format(total_image/total_time))
print('[*] person per second: {}'.format(total_person / total_time))
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
from django.urls import path
from .views import (
FollowAPIView,
FollowersListAPIView,
FollowingListAPIView,
UserListAPIView,
UserProfileAPIView,
UserRetrieveUpdateDeleteAPIView,
UserRegisterAPIView,
UserLoginAPIView, confirm_email,
password_reset_request, password_reset_confirm,
NotificationListAPIView,
set_notifications_as_read,
set_notifications_as_unread,
)
urlpatterns = [
path("", UserListAPIView.as_view()),
path("<str:username>/", UserRetrieveUpdateDeleteAPIView.as_view()),
path("<str:username>/profile/", UserProfileAPIView.as_view()),
path("<str:username>/follow/", FollowAPIView.as_view()),
path("<str:username>/followers/", FollowersListAPIView.as_view()),
path("<str:username>/following/", FollowingListAPIView.as_view()),
path("<str:username>/notifications/", NotificationListAPIView.as_view()),
path("<str:username>/notifications/<int:pk>/read/", set_notifications_as_read),
path("<str:username>/notifications/<int:pk>/unread/", set_notifications_as_unread),
path("auth/register/", UserRegisterAPIView.as_view()),
path("auth/login/", UserLoginAPIView.as_view()),
path("auth/email/confirm/<str:token>/", confirm_email),
path("auth/password/reset/", password_reset_request),
path("auth/password/reset/confirm/<str:token>/", password_reset_confirm),
]
|
nilq/baby-python
|
python
|
import ast
import sys
from pyflakes import checker
from pyflakes.test.harness import TestCase, skipIf
class TypeableVisitorTests(TestCase):
"""
Tests of L{_TypeableVisitor}
"""
@staticmethod
def _run_visitor(s):
"""
Run L{_TypeableVisitor} on the parsed source and return the visitor.
"""
tree = ast.parse(s)
visitor = checker._TypeableVisitor()
visitor.visit(tree)
return visitor
def test_node_types(self):
"""
Test that the typeable node types are collected
"""
visitor = self._run_visitor(
"""\
x = 1 # assignment
for x in range(1): pass # for loop
def f(): pass # function definition
with a as b: pass # with statement
"""
)
self.assertEqual(visitor.typeable_lines, [1, 2, 3, 4])
self.assertIsInstance(visitor.typeable_nodes[1], ast.Assign)
self.assertIsInstance(visitor.typeable_nodes[2], ast.For)
self.assertIsInstance(visitor.typeable_nodes[3], ast.FunctionDef)
self.assertIsInstance(visitor.typeable_nodes[4], ast.With)
def test_visitor_recurses(self):
"""
Test the common pitfall of missing `generic_visit` in visitors by
ensuring that nested nodes are reported
"""
visitor = self._run_visitor(
"""\
def f():
x = 1
"""
)
self.assertEqual(visitor.typeable_lines, [1, 2])
self.assertIsInstance(visitor.typeable_nodes[1], ast.FunctionDef)
self.assertIsInstance(visitor.typeable_nodes[2], ast.Assign)
@skipIf(sys.version_info < (3, 5), 'async syntax introduced in py35')
def test_py35_node_types(self):
"""
Test that the PEP 492 node types are collected
"""
visitor = self._run_visitor(
"""\
async def f(): # async def
async for x in y: pass # async for
async with a as b: pass # async with
"""
)
self.assertEqual(visitor.typeable_lines, [1, 2, 3])
self.assertIsInstance(visitor.typeable_nodes[1], ast.AsyncFunctionDef)
self.assertIsInstance(visitor.typeable_nodes[2], ast.AsyncFor)
self.assertIsInstance(visitor.typeable_nodes[3], ast.AsyncWith)
def test_last_node_wins(self):
"""
Test that when two typeable nodes are present on a line, the last
typeable one wins.
"""
visitor = self._run_visitor('x = 1; y = 1')
# detected both assignable nodes
self.assertEqual(visitor.typeable_lines, [1, 1])
# but the assignment to `y` wins
self.assertEqual(visitor.typeable_nodes[1].targets[0].id, 'y')
class CollectTypeCommentsTests(TestCase):
"""
Tests of L{_collect_type_comments}
"""
@staticmethod
def _collect(s):
"""
Run L{_collect_type_comments} on the parsed source and return the
mapping from nodes to comments. The return value is converted to
a set: {(node_type, tuple of comments), ...}
"""
tree = ast.parse(s)
tokens = checker.make_tokens(s)
ret = checker._collect_type_comments(tree, tokens)
return {(type(k), tuple(s for _, s in v)) for k, v in ret.items()}
def test_bytes(self):
"""
Test that the function works for binary source
"""
ret = self._collect(b'x = 1 # type: int')
self.assertSetEqual(ret, {(ast.Assign, ('# type: int',))})
def test_text(self):
"""
Test that the function works for text source
"""
ret = self._collect(u'x = 1 # type: int')
self.assertEqual(ret, {(ast.Assign, ('# type: int',))})
def test_non_type_comment_ignored(self):
"""
Test that a non-type comment is ignored
"""
ret = self._collect('x = 1 # noqa')
self.assertSetEqual(ret, set())
def test_type_comment_before_typeable(self):
"""
Test that a type comment before something typeable is ignored.
"""
ret = self._collect('# type: int\nx = 1')
self.assertSetEqual(ret, set())
def test_type_ignore_comment_ignored(self):
"""
Test that `# type: ignore` comments are not collected.
"""
ret = self._collect('x = 1 # type: ignore')
self.assertSetEqual(ret, set())
def test_type_ignore_with_other_things_ignored(self):
"""
Test that `# type: ignore` comments with more content are also not
collected.
"""
ret = self._collect('x = 1 # type: ignore # noqa')
self.assertSetEqual(ret, set())
ret = self._collect('x = 1 #type:ignore#noqa')
self.assertSetEqual(ret, set())
def test_type_comment_with_extra_still_collected(self):
ret = self._collect('x = 1 # type: int # noqa')
self.assertSetEqual(ret, {(ast.Assign, ('# type: int # noqa',))})
def test_type_comment_without_whitespace(self):
ret = self._collect('x = 1 #type:int')
self.assertSetEqual(ret, {(ast.Assign, ('#type:int',))})
def test_type_comment_starts_with_word_ignore(self):
ret = self._collect('x = 1 # type: ignore[T]')
self.assertSetEqual(ret, {(ast.Assign, ('# type: ignore[T]',))})
def test_last_node_wins(self):
"""
Test that when two typeable nodes are present on a line, the last
typeable one wins.
"""
ret = self._collect('def f(): x = 1 # type: int')
self.assertSetEqual(ret, {(ast.Assign, ('# type: int',))})
def test_function_def_assigned_comments(self):
"""
Test that type comments for function arguments are all attributed to
the function definition.
"""
ret = self._collect(
"""\
def f(
a, # type: int
b, # type: str
):
# type: (...) -> None
pass
"""
)
expected = {(
ast.FunctionDef,
('# type: int', '# type: str', '# type: (...) -> None'),
)}
self.assertSetEqual(ret, expected)
|
nilq/baby-python
|
python
|
from dotenv import load_dotenv
import os
load_dotenv()
client = os.getenv("CLIENT_ID")
secret = os.getenv("CLIENT_SECRET")
def printenvironment():
print(f'The client id is: {client}.')
print(f'The secret id is: {secret}.')
if __name__ == "__main__":
printenvironment()
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.