hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
34c2cfd6b9e37608df44ff0e6379934f1861f606 | 2,038 | py | Python | date-conversion/models/transformer/attention.py | AndreMaz/dnn-attention | 86e834b90bd419646fd00c6ff4df910ab7874910 | [
"MIT"
] | 1 | 2020-03-11T22:52:19.000Z | 2020-03-11T22:52:19.000Z | date-conversion/models/transformer/attention.py | AndreMaz/dnn-attention | 86e834b90bd419646fd00c6ff4df910ab7874910 | [
"MIT"
] | 3 | 2021-05-21T16:15:18.000Z | 2022-02-10T01:11:23.000Z | sorting-numbers/models/transformer/attention.py | AndreMaz/dnn-attention | 86e834b90bd419646fd00c6ff4df910ab7874910 | [
"MIT"
] | null | null | null | import tensorflow as tf
from models.transformer.utils import scaled_dot_product_attention
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last (features) dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(scaled_attention,
(batch_size, -1, self.d_model)) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights | 41.591837 | 117 | 0.675172 |
4848170d260b8e4b3152cf82c09cbb3563bcd577 | 1,823 | py | Python | examples/example4.py | mainqueg/steeldesign | e1250f93af0b156f5820e83e5925d2d991d43519 | [
"MIT"
] | null | null | null | examples/example4.py | mainqueg/steeldesign | e1250f93af0b156f5820e83e5925d2d991d43519 | [
"MIT"
] | null | null | null | examples/example4.py | mainqueg/steeldesign | e1250f93af0b156f5820e83e5925d2d991d43519 | [
"MIT"
] | null | null | null | '''Example 17.1 I-Section (LRFD).
Lin, Shin-Hua; Yu, Wei-wen; and Galambos, Theodore V., "Illustrative examples based on the ASCE
standard specifcation for the design of cold-formed stainless steel structural members" (1991). Center
for Cold-Formed Steel Structures Library. p221
'''
import steeldesign as sd
# creo un perfil c on refuerz ode labios
p1 = sd.I_builtup_c_profile(H= 6, B= 1.5, t= 0.135, r_out= (0.135+3/16) )
p2 = sd.c_profile(H= 6, B= 1.5, t= 0.135, r_out= (0.135+3/16) )
p2.calculate(loadProfileFromDB=True)
# creo un acero
s = sd.steel(FY= 30, E0= 27000, nu= 0.3, n= 9.7, offset= 0.002, name= 'SA409_long')
# defino parametros de diseño
dp = sd.designParameters(Lx= 14*12, Ly= 7*12, Lz= 7*12)
# creo un miembro
m = sd.member(L= 14*12, profile= p1, steel= s, designParameters= dp)
p1.J = 2*p2.J
# creo el analisis
analysis = sd.ASCE_8_02(m)
# calculo admisibles #
fiPn, midC = analysis.s3_4()
print('fiPn =', round(fiPn,2),'| Pn =', round(midC['Fn_FBy']*midC['Ae'],2))
print('Esbeltez de', m.profile.elements[1]['name'],'=', round(m.profile.elements[1]['sec 3.4-fiPn']['esbeltez'],2))
print('Esbeltez de', m.profile.elements[2]['name'],'=', round(m.profile.elements[2]['sec 3.4-fiPn']['esbeltez'],2))
# Valores de referencia: fiPn = 19.53 | Pn = 22.98
# Valores de steeldesign: fiPn = 19.78 | Pn = 23.27
#NOTA: Tado el error proviene de la diferencia en el ry calculado respecto del usado en la referencia. ry_ref=0.515 ry_calc= 0.518
m.profile.ry= 0.515
# creo el analisis
analysis = sd.ASCE_8_02(m)
# calculo admisibles #
fiPn, midC = analysis.s3_4()
print('\n Valores con ry= 0.515 segun referencia:')
print('fiPn =', round(fiPn,2),'| Pn =', round(midC['Fn_FBy']*midC['Ae'],2))
# Valores de referencia: fiPn = 19.53 | Pn = 22.98
# Valores de steeldesign: fiPn = 19.53 | Pn = 22.97 | 38.787234 | 130 | 0.6791 |
2d12b87ed251a452aba94525fea768763ab47bf5 | 17,068 | py | Python | cdsdashboards/hubextension/spawners/variablemixin.py | pierrotsmnrd/cdsdashboards | e1d3ddb1999e7c77d4ef3c659c948e923825d6d2 | [
"BSD-3-Clause"
] | 153 | 2020-04-28T17:46:47.000Z | 2022-03-31T15:19:12.000Z | cdsdashboards/hubextension/spawners/variablemixin.py | pierrotsmnrd/cdsdashboards | e1d3ddb1999e7c77d4ef3c659c948e923825d6d2 | [
"BSD-3-Clause"
] | 94 | 2020-05-26T14:16:14.000Z | 2022-03-24T06:00:05.000Z | cdsdashboards/hubextension/spawners/variablemixin.py | pierrotsmnrd/cdsdashboards | e1d3ddb1999e7c77d4ef3c659c948e923825d6d2 | [
"BSD-3-Clause"
] | 32 | 2020-06-17T09:38:18.000Z | 2022-03-18T12:17:27.000Z | import os.path
import re
from copy import deepcopy
from traitlets import Unicode, Integer, Dict, Bool, validate, default
from traitlets.config import Configurable
from jupyterhub.spawner import _quote_safe
from jupyterhub.traitlets import Command
from ..base import SpawnPermissionsController, CDSConfigStore
def _get_voila_template(args, spawner):
voila_template = getattr(spawner, 'voila_template', '')
if voila_template != '':
args.append('='.join(('{--}template', voila_template)))
return args
def _get_streamlit_debug(args, spawner):
try:
if spawner.debug:
args.insert(args.index('streamlit')+1, '{--}log_level=debug')
except ValueError:
pass
return args
def _fixed_format(s, **kwargs):
for k,v in kwargs.items():
s = s.replace(''.join(('{',k,'}')), v)
return s
class VariableMixin(Configurable):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# Merge extra_presentation_launchers config into a copy of builtin_presentation_launchers
self.merged_presentation_launchers = deepcopy(self.builtin_presentation_launchers)
for frameworkname, launcher in self.extra_presentation_launchers.items():
if frameworkname not in self.merged_presentation_launchers:
self.merged_presentation_launchers[frameworkname] = {}
for k,v in launcher.items():
self.merged_presentation_launchers[frameworkname][k] = v
builtin_presentation_launchers = {
'voila': {
#'cmd': ['python3', '-m', 'jhsingle_native_proxy.main'], # This is the default cmd anyway
'args': ['--destport=0', 'python3', '{-}m','voila', '{presentation_path}',
'{--}port={port}',
'{--}no-browser',
'{--}Voila.base_url={base_url}/',
'{--}Voila.server_url=/',
'--progressive'],
'extra_args_fn': _get_voila_template
},
'streamlit': {
'args': ['--destport=0', 'streamlit', 'run', '{presentation_path}',
'{--}server.port={port}',
'{--}server.headless=True',
'{--}browser.serverAddress={origin_host}',
'{--}browser.gatherUsageStats=false'],
'debug_args': [], # The default is {--}debug, we don't want that
'extra_args_fn': _get_streamlit_debug # But --log_level=debug has to come earlier in the cmdline
},
'plotlydash': {
'args': ['--destport=0', 'python3', '{-}m','plotlydash_tornado_cmd.main', '{presentation_path}',
'{--}port={port}'],
'env': {'DASH_REQUESTS_PATHNAME_PREFIX': '{base_url}/'}
},
'bokeh': {
'args': ['--destport=0', 'python3', '{-}m','bokeh_root_cmd.main', '{presentation_path}',
'{--}port={port}',
'{--}allow-websocket-origin={origin_host}',
'{--}prefix={base_url}',
'--ready-check-path=/ready-check']
},
'panel': {
'args': ['--destport=0', 'python3', '{-}m','bokeh_root_cmd.main', '{presentation_path}',
'{--}port={port}',
'{--}allow-websocket-origin={origin_host}',
'{--}server=panel',
'{--}prefix={base_url}',
'--ready-check-path=/ready-check']
},
'rshiny': {
'args': ['--destport=0', 'python3', '{-}m','rshiny_server_cmd.main', '{presentation_path}',
'{--}port={port}']
}
}
extra_presentation_launchers = Dict(
{},
help="""
Configuration dict containing details of any custom frameworks that should be made available to Dashboard creators.
Any new keys added here also need to be added to the c.CDSDashboardsConfig.presentation_types list.
See cdsdashboards/hubextension/spawners/variablemixin.py in the https://github.com/ideonate/cdsdashboards source code
for details of the builtin_presentation_launchers dict which shows some examples. This extra_presentation_launchers
config takes the same format.
Any keys in extra_presentation_launchersthat also belong to builtin_presentation_launchers will be merged into the
builtin config, e.g. {'streamlit':{'env':{'STREAMLIT_ENV_VAR':'TEST'}}} will overwrite only the env section of the
builting streamlit launcher.
"""
).tag(config=True)
default_presentation_cmd = Command(
['python3', '-m', 'jhsingle_native_proxy.main'],
allow_none=False,
help="""
The command to run presentations through jhsingle_native_proxy, can be a string or list.
Default is ['python3', '-m', 'jhsingle_native_proxy.main']
Change to e.g. ['start.sh', 'python3', '-m', 'jhsingle_native_proxy.main'] to ensure start hooks are
run in the singleuser Docker images.
"""
).tag(config=True)
voila_template = Unicode(
'',
help="""
--template argument to pass to Voila. Default is blank (empty string) to not pass any template to Voila command line.
""",
).tag(config=True)
proxy_request_timeout = Integer(
0,
help="""
Request timeout in seconds that jhsingle-native-proxy should allow when proxying to the underlying process (e.g. Voila).
The default of 0 means that no --request-timeout flag will be passed to jhsingle-native-proxy so it will use its own default.
""",
).tag(config=True)
proxy_ready_timeout = Integer(
0,
help="""
Ready timeout in seconds that jhsingle-native-proxy should allow the underlying process to wait during startup before failing if still
unable to respond at the --ready-check-path URL.
The default of 0 means that no --ready-timeout flag will be passed to jhsingle-native-proxy so it will use its own default.
""",
).tag(config=True)
proxy_force_alive = Bool(
True,
help="""
Whether or not jhsingle-native-proxy should fake activity on its subprocess, always reporting to the hub that activity has happened.
The default of True means that no flag will be passed to jhsingle-native-proxy so it will use its own default (expected to be --force-alive).
If False is specified, --no-force-alive will be passed to jhsingle-native-proxy.
""",
).tag(config=True)
proxy_last_activity_interval = Integer(
300,
help="""
Frequency in seconds that jhsingle-native-proxy should send any recent activity timestamp to the hub.
If the default of 300 is specified, no --last-activity-interval flag will be passed to jhsingle-native-proxy so it will use its default.
Otherwise the specified value will be passed to --last-activity-interval.
""",
).tag(config=True)
proxy_websocket_max_message_size = Integer(
0,
help="""
Max websocket message size allowed by jhsingle-native-proxy, passed as --websocket-max-message-size on the command line.
The default of 0 means that no --websocket-max-message-size flag will be passed to jhsingle-native-proxy so it will use its own default.
""",
).tag(config=True)
async def start(self):
"""
Copy trait values from user_options into the trait attrs of the spawner object
"""
if self.user_options:
trait_names = set(self.trait_names()) - {'user_options'}
for k in trait_names.intersection(self.user_options.keys()):
merged_trait = self.user_options[k]
if type(getattr(self, k, None)) == dict:
# Merge dicts if one already exists for this trait
merged_trait = {**getattr(self, k), **merged_trait}
setattr(self, k, merged_trait)
# Any update for cmd needs to be set here (args and env have their own overridden functions)
presentation_type = self._get_presentation_type()
if presentation_type != '':
launcher = self.merged_presentation_launchers[presentation_type]
if 'cmd' in launcher:
self.cmd = launcher['cmd']
else:
self.cmd = self.default_presentation_cmd
return await super().start()
def get_args(self):
"""Return the arguments to be passed after self.cmd
Doesn't expect shell expansion to happen.
Also adds self.args at the end in case specified by the config.
"""
presentation_type = self._get_presentation_type()
if presentation_type == '':
return super().get_args()
launcher = self.merged_presentation_launchers[presentation_type]
presentation_path = self.user_options.get('presentation_path', '')
args = []
# jhsingle-native-proxy --destport $destport --authtype oauth voila `pwd` {--}port={port} {--}no-browser {--}Voila.base_url={base_url}/ {--}Voila.server_url=/ --port $port
notebook_dir = '.'
if self.notebook_dir:
notebook_dir = self.format_string(self.notebook_dir)
git_repo = self.user_options.get('git_repo', '')
git_repo_branch = self.user_options.get('git_repo_branch', '')
repofolder = ''
if git_repo != '':
repofolder = self._calc_repo_folder(git_repo)
args.append('--repo={}'.format(_quote_safe(git_repo)))
notebook_dir = os.path.join(notebook_dir, repofolder)
args.append('--repofolder={}'.format(_quote_safe(notebook_dir)))
if git_repo_branch:
args.append('--repobranch={}'.format(_quote_safe(git_repo_branch)))
if presentation_path != '' and not '..' in presentation_path:
# Should have been validated when dashboard created, but .. is particularly dangerous
presentation_path = re.sub('^/+', '', presentation_path) # Remove leading slash(es) to ensure it is relative to home folder
notebook_dir = os.path.join(notebook_dir, presentation_path)
if 'args' in launcher:
args.extend(launcher['args'])
args.append('--presentation-path={}'.format(_quote_safe(notebook_dir)))
conda_env = self.user_options.get('conda_env', '')
if conda_env != '':
args.append('--conda-env=%s' % _quote_safe(conda_env))
if self.ip:
args.append('--ip=%s' % _quote_safe(self.ip))
if self.port:
args.append('--port=%i' % self.port)
if self.debug:
if 'debug_args' in launcher:
args.extend(launcher['debug_args'])
else:
args.append('{--}debug')
args.append('--debug') # For jhsingle-native-proxy itself
proxy_request_timeout = getattr(self, 'proxy_request_timeout', 0)
if proxy_request_timeout:
args.append('--request-timeout={}'.format(proxy_request_timeout))
proxy_ready_timeout = getattr(self, 'proxy_ready_timeout', 0)
if proxy_ready_timeout:
args.append('--ready-timeout={}'.format(proxy_ready_timeout))
proxy_force_alive = getattr(self, 'proxy_force_alive', True)
if proxy_force_alive == False:
args.append('--no-force-alive')
proxy_last_activity_interval = getattr(self, 'proxy_last_activity_interval', 300)
if proxy_last_activity_interval != 300:
args.append('--last-activity-interval={}'.format(proxy_last_activity_interval))
proxy_websocket_max_message_size = getattr(self, 'proxy_websocket_max_message_size', 0)
if proxy_websocket_max_message_size:
args.append('--websocket-max-message-size={}'.format(proxy_websocket_max_message_size))
args.extend(self.args)
if 'extra_args_fn' in launcher and callable(launcher['extra_args_fn']): # Last chance for launcher config to change everything and anything
args = launcher['extra_args_fn'](args, self)
return args
def _get_presentation_type(self):
"""
Returns the presentation_type (e.g. '' for standard spawner, 'voila', 'streamlit' for named presentation frameworks).
Throws an exception if the presentation_type doesn't have a launcher configuration in either extra_presentation_launchers
or builtin_presentation_launchers.
"""
if self.user_options and 'presentation_type' in self.user_options:
presentation_type = self.user_options['presentation_type']
if presentation_type not in self.merged_presentation_launchers:
raise Exception('presentation type {} has not been registered with the spawner'.format(presentation_type))
return presentation_type
return ''
def get_env(self):
env = super().get_env()
presentation_type = self._get_presentation_type()
if presentation_type != '':
launcher = self.merged_presentation_launchers[presentation_type]
if 'env' in launcher:
presentation_dirname = '.'
presentation_path = ''
if self.user_options and 'presentation_path' in self.user_options:
presentation_path = self.user_options['presentation_path']
presentation_dirname = os.path.dirname(presentation_path)
self.log.info('presentation_dirname: {}'.format(presentation_dirname))
for k,v in launcher['env'].items():
env[k] = _fixed_format(v,
base_url=self.server.base_url,
presentation_dirname=presentation_dirname,
presentation_path=presentation_path,
username=self.user.name
)
return env
def _calc_repo_folder(self, git_repo):
s = re.sub('^https?', '', git_repo.lower()) # Remove https and convert to lower case
s = re.sub('[^a-z0-9]', '-', s) # Replace any non-alphanumeric chars with dash
s = re.sub('^-+|-+$|-(?=-)', '', s) # Remove dashes from start/end and reduce multiple dashes to just one dash
return s
def run_pre_spawn_hook(self):
if not SpawnPermissionsController.get_instance(CDSConfigStore.get_instance(self.config), self.db).can_user_spawn(self.user.orm_user):
raise Exception('User {} is not allowed to spawn a server'.format(self.user.name))
return super().run_pre_spawn_hook()
def _wrap_options_from_form(outerself, realfn):
def inner_options_from_form(options):
"""
If there is an options_form present on a spawner, then when it is submitted by the user,
it clobbers any existing user_options - which may include 'presentation_*' etc data
from the dashboard.
For now,
"""
formdata = realfn(options)
return outerself._postprocess_options_from_form(formdata)
return inner_options_from_form
def _postprocess_options_from_form(self, formdata):
"""
If there is an options_form present on a spawner, then when it is submitted by the user,
it clobbers any existing user_options - which may include 'presentation_*' etc data
from the dashboard.
For now,
"""
if hasattr(self, 'orm_spawner') and self.orm_spawner and hasattr(self.orm_spawner, 'user_options') \
and isinstance(self.orm_spawner.user_options, dict):
existing_user_options = self.orm_spawner.user_options.copy()
existing_user_options.update(formdata)
formdata = existing_user_options
return formdata
@validate('options_from_form')
def _validate_options_from_form(self, proposal):
"""
Make sure we wrap this custom options_from_form in a function that will preserve dashboard metadata
"""
return self._wrap_options_from_form(proposal['value'])
@default("options_from_form")
def _varmix_options_from_form(self):
"""
Make sure we wrap the default options_from_form in a function that will preserve dashboard metadata
May need to override this to something more specific, e.g. in KubeSpawner
"""
return self._wrap_options_from_form(self._default_options_from_form) # Spawner class' default
class MetaVariableMixin(type(Configurable)):
"""
Use this metaclass to ensure VariableMixin occurs earlier in the MRO, so all traits are accessible at the right time.
"""
def mro(cls):
mro = super().mro()
# Take VariableMixin (normally item 4) and put it at item 1
try:
vm_index = mro.index(VariableMixin)
if vm_index > 1 and vm_index < len(mro)-1:
mro = [mro[0], mro[vm_index]]+ mro[1:vm_index] + mro[vm_index+1:]
except ValueError:
pass
return mro
| 43.210127 | 179 | 0.628486 |
abaa9d2669d521f4ec737354d1c2f4c402f41db0 | 843 | py | Python | twitch_project/geo/views.py | tejesh95/livecoding | 327114d67d613ef7300528d62b5165cd215bc1b4 | [
"MIT"
] | null | null | null | twitch_project/geo/views.py | tejesh95/livecoding | 327114d67d613ef7300528d62b5165cd215bc1b4 | [
"MIT"
] | null | null | null | twitch_project/geo/views.py | tejesh95/livecoding | 327114d67d613ef7300528d62b5165cd215bc1b4 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from .models import Geo
# Create your views here.
# import googlemaps
from geopy.geocoders import Nominatim
@csrf_exempt
def fetch_location(request):
if request.method == 'POST':
if request.POST.get('address'):
geolocator = Nominatim()
try:
location = geolocator.geocode(request.POST.get('address'))
except:
return HttpResponse('There is some problem decoding to lat long, Please try again!')
Geo.objects.create(latitude=location.latitude, longitude=location.longitude, address=location.address)
return HttpResponse("Check console for fetched address ")
return render(request, 'geo_coding.html', context=None)
| 36.652174 | 114 | 0.70344 |
ca2ce18a715b8d605fa375afd51b5f9e4d2b311e | 2,317 | py | Python | anndata/tests/test_get_vector.py | michalk8/anndata | 664e32b0aa6625fe593370d37174384c05abfd4e | [
"BSD-3-Clause"
] | 262 | 2017-11-10T11:43:19.000Z | 2022-03-31T20:24:24.000Z | anndata/tests/test_get_vector.py | michalk8/anndata | 664e32b0aa6625fe593370d37174384c05abfd4e | [
"BSD-3-Clause"
] | 654 | 2017-11-22T13:26:02.000Z | 2022-03-30T04:31:15.000Z | anndata/tests/test_get_vector.py | michalk8/anndata | 664e32b0aa6625fe593370d37174384c05abfd4e | [
"BSD-3-Clause"
] | 137 | 2017-12-28T14:33:11.000Z | 2022-03-24T02:39:53.000Z | import numpy as np
import pandas as pd
from scipy import sparse
import pytest
import anndata as ad
def test_amgibuous_keys():
"""Tests that an error is raised if obs_vector or var_vector is ambiguous."""
var_keys = ["The", "quick", "brown", "fox", "jumps", "over", "the", "lazy", "dog"]
obs_keys = [
"Lorem",
"ipsum",
"dolor",
"sit",
"amet",
"consectetur",
"adipiscing",
"elit",
]
adata = ad.AnnData(
X=sparse.random(len(obs_keys), len(var_keys), format="csr"),
layers={"layer": sparse.random(len(obs_keys), len(var_keys), format="csr")},
obs=pd.DataFrame(
np.random.randn(len(obs_keys), len(obs_keys) + len(var_keys)),
index=obs_keys,
columns=obs_keys + var_keys,
),
var=pd.DataFrame(
np.random.randn(len(var_keys), len(obs_keys) + len(var_keys)),
index=var_keys,
columns=var_keys + obs_keys,
),
)
adata.raw = adata
for k in var_keys:
# These are mostly to check that the test is working
assert k in adata.var_names
assert k in adata.obs.columns
# Now the actual checks:
with pytest.raises(ValueError, match=r".*var_names.*obs\.columns.*"):
adata.obs_vector(k)
with pytest.raises(ValueError, match=r".*var_names.*obs\.columns.*"):
adata.obs_vector(k, layer="layer")
# Should uniquely select column from in adata.var
assert list(adata.var[k]) == list(adata.var_vector(k))
assert list(adata.var[k]) == list(adata.var_vector(k, layer="layer"))
assert list(adata.raw.var[k]) == list(adata.raw.var_vector(k))
for k in obs_keys:
assert k in adata.obs_names
assert k in adata.var.columns
with pytest.raises(ValueError, match=r".*obs_names.*var\.columns"):
adata.var_vector(k)
with pytest.raises(ValueError, match=r".*obs_names.*var\.columns"):
adata.var_vector(k, layer="layer")
assert list(adata.obs[k]) == list(adata.obs_vector(k))
assert list(adata.obs[k]) == list(adata.obs_vector(k, layer="layer"))
with pytest.raises(ValueError, match=r".*obs_names.*var\.columns*"):
adata.raw.var_vector(k)
| 34.073529 | 86 | 0.594303 |
f31268cdc035fdbef6ff21ed29a8804685b5971a | 345 | py | Python | code/2-example-solution.py | jrperlic/data-structure-tutorial | 5e2376556e2df413517b46895a3657da8b706f1b | [
"MIT"
] | null | null | null | code/2-example-solution.py | jrperlic/data-structure-tutorial | 5e2376556e2df413517b46895a3657da8b706f1b | [
"MIT"
] | null | null | null | code/2-example-solution.py | jrperlic/data-structure-tutorial | 5e2376556e2df413517b46895a3657da8b706f1b | [
"MIT"
] | null | null | null | from collections import deque
def mystery1(ll):
for i in reversed(ll):
print(i, end=" ")
print("")
def mystery2(ll):
for i in range(0, len(ll), 2):
print(ll[i], end=" ")
for i in range(1, len(ll), 2):
print(ll[i], end=" ")
ll = deque([1, 2, 3, 4, 5])
mystery1(ll) # 5 4 3 2 1
mystery2(ll) # 1 3 5 2 4 | 19.166667 | 34 | 0.527536 |
dc3a6f0af5f60a946b5a9a3c95bbba90bfb0f311 | 80 | py | Python | project/project/settings/prod/settings.py | identitynr8/nlp | 0d8436ebef6692f472f81082265aade9c3052088 | [
"MIT"
] | null | null | null | project/project/settings/prod/settings.py | identitynr8/nlp | 0d8436ebef6692f472f81082265aade9c3052088 | [
"MIT"
] | null | null | null | project/project/settings/prod/settings.py | identitynr8/nlp | 0d8436ebef6692f472f81082265aade9c3052088 | [
"MIT"
] | null | null | null | from .._base import *
DEBUG = False
ALLOWED_HOSTS = [
'.YOURHOST.COM',
]
| 8.888889 | 21 | 0.6125 |
e95af8103f67a07ee4ac370c0c33e1e1a8c93967 | 1,492 | py | Python | tests/sentry/utils/locking/backends/test_redis.py | E-LLP/sentry | 83d97a0ca45cdaac1d5f3026058131a3aeae0068 | [
"BSD-3-Clause"
] | 4 | 2016-03-16T07:21:36.000Z | 2017-09-04T07:29:56.000Z | tests/sentry/utils/locking/backends/test_redis.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | tests/sentry/utils/locking/backends/test_redis.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from exam import fixture
from sentry.testutils import TestCase
from sentry.utils.locking.backends.redis import RedisLockBackend
from sentry.utils.redis import clusters
class RedisLockBackendTestCase(TestCase):
@fixture
def cluster(self):
return clusters.get('default')
@fixture
def backend(self):
return RedisLockBackend(self.cluster)
def test_success(self):
key = u"\U0001F4A9"
duration = 60
full_key = self.backend.prefix_key(key)
client = self.backend.get_client(key)
self.backend.acquire(key, duration)
assert client.get(full_key) == self.backend.uuid
assert duration - 2 < float(client.ttl(full_key)) <= duration
self.backend.release(key)
assert client.exists(full_key) is False
def test_acquire_fail_on_conflict(self):
key = 'lock'
duration = 60
other_cluster = RedisLockBackend(self.cluster)
other_cluster.acquire(key, duration)
with pytest.raises(Exception):
self.backend.acquire(key, duration)
def test_release_fail_on_missing(self):
with pytest.raises(Exception):
self.backend.release('missing-key')
def test_release_fail_on_conflict(self):
key = 'lock'
duration = 60
self.backend.get_client(key).set(self.backend.prefix_key(key), 'someone-elses-uuid')
with pytest.raises(Exception):
self.backend.acquire(key, duration)
| 29.254902 | 92 | 0.672252 |
70f7fe2d81a0141c98d5fb2994865d4afd536fd0 | 1,667 | py | Python | tests/post/test_prometheus.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
] | null | null | null | tests/post/test_prometheus.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
] | null | null | null | tests/post/test_prometheus.py | ssalaues/metalk8s | cca4a4c64fe9cd4d7b87717aa3fda1642144da4b | [
"Apache-2.0"
] | null | null | null | import logging
import pytest
import requests
from pytest_bdd import parsers
from pytest_bdd import scenarios
from pytest_bdd import then
from pytest_bdd import when
@pytest.fixture
def pytestbdd_strict_gherkin():
return False
scenarios('features/prometheus.feature')
@when(parsers.parse(
"I list the prometheus '{prometheus_endpoints}' job endpoints"))
def get_prometheus_endpoint(request, kubectl_proxy, prometheus_endpoints):
prometheus_endpoints_res = requests.get(
'http://127.0.0.1:8001/api/v1/namespaces/kube-ops/services/'
'kube-prometheus:http/proxy/api/v1/targets')
prometheus_endpoints_res.raise_for_status()
def filter_endpoints(endpoints_result, job_label):
for endpoint in endpoints_result['data']['activeTargets']:
logging.debug('Prometheus Endpoint found {}'.format(endpoint))
try:
if endpoint['labels']['job'] == job_label:
yield endpoint
except KeyError:
logging.warning(
'Endpoints {} has no job label'.format(endpoint))
endpoints_list = list(filter_endpoints(
prometheus_endpoints_res.json(),
prometheus_endpoints))
request.prometheus_endpoints = endpoints_list
return endpoints_list
@then(parsers.parse('I should count as many endpoints as {groups_name} hosts'))
def count_prometheus_endpoint(request, groups_name, inventory_obj):
num_endpoints = len(request.prometheus_endpoints)
nodes = set()
for group_name in groups_name.split(":"):
nodes.update(inventory_obj.get_groups_dict()[group_name])
assert num_endpoints == len(nodes)
| 31.45283 | 79 | 0.709058 |
9a31132cdc9874d5ed3d28e05454a7cc043e3a52 | 356 | py | Python | setup.py | cosmic-tichy/geogateway-django-app-develop | c14853e5cbb56edd0643615e522b5476e0afca64 | [
"Apache-2.0"
] | null | null | null | setup.py | cosmic-tichy/geogateway-django-app-develop | c14853e5cbb56edd0643615e522b5476e0afca64 | [
"Apache-2.0"
] | null | null | null | setup.py | cosmic-tichy/geogateway-django-app-develop | c14853e5cbb56edd0643615e522b5476e0afca64 | [
"Apache-2.0"
] | null | null | null | import setuptools
setuptools.setup(
name="geogateway-django-app",
version="0.0.1",
description="GeoGateway Django app",
packages=setuptools.find_packages(),
install_requires=[
'django>=1.11.16'
],
entry_points="""
[airavata.djangoapp]
geogateway_django_app = geogateway_django_app.apps:GeogatewayDjangoAppConfig
""",
)
| 22.25 | 76 | 0.707865 |
6d8f70dae6a60bb7c2a373fd579066d77af1ff0e | 850 | py | Python | angr/procedures/definitions/win32_mapi32.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_mapi32.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | angr/procedures/definitions/win32_mapi32.py | r4b3rt/angr | c133cfd4f83ffea2a1d9e064241e9459eaabc55f | [
"BSD-2-Clause"
] | null | null | null | # pylint:disable=line-too-long
import logging
from ...sim_type import SimTypeFunction, SimTypeShort, SimTypeInt, SimTypeLong, SimTypeLongLong, SimTypeDouble, SimTypeFloat, SimTypePointer, SimTypeChar, SimStruct, SimTypeFixedSizeArray, SimTypeBottom, SimUnion, SimTypeBool
from ...calling_conventions import SimCCStdcall, SimCCMicrosoftAMD64
from .. import SIM_PROCEDURES as P
from . import SimLibrary
_l = logging.getLogger(name=__name__)
lib = SimLibrary()
lib.set_default_cc('X86', SimCCStdcall)
lib.set_default_cc('AMD64', SimCCMicrosoftAMD64)
lib.set_library_names("mapi32.dll")
prototypes = \
{
#
'MAPIFreeBuffer': SimTypeFunction([SimTypePointer(SimTypeBottom(label="Void"), offset=0)], SimTypeInt(signed=False, label="UInt32"), arg_names=["pv"]),
}
lib.set_prototypes(prototypes)
| 35.416667 | 257 | 0.745882 |
5d13b3177916ec01e1d0752560c1ba0c93b3a8e7 | 4,654 | py | Python | buildpack/stage.py | huibin-coder/cf-mendix-buildpack | 8c376e9852d107ba3773ae88c9cdaee4dd0c2e2f | [
"Apache-2.0"
] | 36 | 2015-01-22T16:28:55.000Z | 2021-12-28T10:26:10.000Z | buildpack/stage.py | huibin-coder/cf-mendix-buildpack | 8c376e9852d107ba3773ae88c9cdaee4dd0c2e2f | [
"Apache-2.0"
] | 208 | 2015-06-01T13:39:17.000Z | 2022-03-24T14:16:09.000Z | buildpack/stage.py | huibin-coder/cf-mendix-buildpack | 8c376e9852d107ba3773ae88c9cdaee4dd0c2e2f | [
"Apache-2.0"
] | 135 | 2015-01-17T14:47:22.000Z | 2022-03-07T08:20:18.000Z | #!/usr/bin/env python3
import logging
import os
import shutil
import sys
from buildpack import (
appdynamics,
dynatrace,
databroker,
datadog,
java,
metering,
mx_java_agent,
mxbuild,
newrelic,
nginx,
runtime,
telegraf,
util,
)
from buildpack.runtime_components import database
BUILDPACK_DIR = os.path.dirname(
os.path.dirname(os.path.join(os.path.dirname(__file__), ".."))
)
BUILD_DIR = sys.argv[1]
CACHE_DIR = os.path.join(sys.argv[2], "bust")
DOT_LOCAL_LOCATION = os.path.join(BUILD_DIR, ".local")
SUPPORTED_STACKS = [
"cflinuxfs3",
None,
] # None is allowed, but not supported
def check_database_environment():
try:
database.get_config()
return True
except RuntimeError as ex:
logging.error(
"You should provide a DATABASE_URL by adding a database service "
"to this application, it can be either MySQL or Postgres "
"If this is the first push of a new app, "
"set up a database service "
"and push again afterwards: %s",
ex,
)
return False
def preflight_check(version):
if not check_database_environment():
raise ValueError("Missing database configuration")
stack = os.getenv("CF_STACK")
logging.info(
"Preflight check on Mendix version [%s] and stack [%s]...",
version,
stack,
)
if not stack in SUPPORTED_STACKS:
raise NotImplementedError(
"Stack [{}] is not supported by this buildpack".format(stack)
)
if not runtime.is_version_supported(version):
raise NotImplementedError(
"Mendix version [{}] is not supported by this buildpack".format(
version
)
)
if runtime.is_version_end_of_support(version):
logging.warning(
"Mendix version [{}] is end-of-support. Please upgrade to a supported Mendix version (https://docs.mendix.com/releasenotes/studio-pro/lts-mts).".format(
version
)
)
logging.info("Preflight check completed")
def set_up_directory_structure():
logging.debug("Creating buildpack directory structure...")
util.mkdir_p(DOT_LOCAL_LOCATION)
def copy_buildpack_resources():
shutil.copytree(
os.path.join(BUILDPACK_DIR, "buildpack"),
os.path.join(BUILD_DIR, "buildpack"),
)
shutil.copytree(
os.path.join(BUILDPACK_DIR, "lib"), os.path.join(BUILD_DIR, "lib")
)
commit_file_path = os.path.join(BUILDPACK_DIR, ".commit")
if os.path.isfile(commit_file_path):
shutil.copy(
commit_file_path,
os.path.join(BUILD_DIR, ".commit"),
)
shutil.copy(
os.path.join(BUILDPACK_DIR, "VERSION"),
os.path.join(BUILD_DIR, "VERSION"),
)
def get_mpr_file():
return util.get_mpr_file_from_dir(BUILD_DIR)
def is_source_push():
if get_mpr_file() is not None:
return True
else:
return False
if __name__ == "__main__":
logging.basicConfig(
level=util.get_buildpack_loglevel(),
stream=sys.stdout,
format="%(levelname)s: %(message)s",
)
runtime_version = runtime.get_version(BUILD_DIR)
try:
preflight_check(runtime_version)
except (ValueError, NotImplementedError) as error:
logging.error(error)
exit(1)
if is_source_push():
try:
mxbuild.build_from_source(
BUILD_DIR,
CACHE_DIR,
DOT_LOCAL_LOCATION,
runtime_version,
runtime.get_java_version(runtime_version),
)
except RuntimeError as error:
logging.error(error)
exit(1)
set_up_directory_structure()
copy_buildpack_resources()
java.stage(
BUILDPACK_DIR,
CACHE_DIR,
DOT_LOCAL_LOCATION,
runtime.get_java_version(runtime_version),
)
appdynamics.stage(DOT_LOCAL_LOCATION, CACHE_DIR)
dynatrace.stage(DOT_LOCAL_LOCATION, CACHE_DIR)
newrelic.stage(DOT_LOCAL_LOCATION, CACHE_DIR)
mx_java_agent.stage(
BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR, runtime_version
)
telegraf.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
datadog.stage(BUILDPACK_DIR, DOT_LOCAL_LOCATION, CACHE_DIR)
metering.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
runtime.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
databroker.stage(DOT_LOCAL_LOCATION, CACHE_DIR)
nginx.stage(BUILDPACK_DIR, BUILD_DIR, CACHE_DIR)
logging.info("Mendix Cloud Foundry Buildpack staging completed")
| 27.702381 | 164 | 0.643532 |
1639467cb178639c2d99c49777f2c770ef12c01e | 1,723 | py | Python | Chapter18/hw/run.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 621 | 2019-07-27T19:24:56.000Z | 2022-03-31T14:19:52.000Z | Chapter18/hw/run.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 40 | 2019-09-01T09:45:22.000Z | 2022-03-24T13:13:00.000Z | Chapter18/hw/run.py | haohaoxiao/Deep-Reinforcement-Learning-Hands-On-Second-Edition | 1cbdff216fdc5cec02cc0da8664b788941f025c1 | [
"MIT"
] | 346 | 2019-07-26T15:16:56.000Z | 2022-03-30T15:33:20.000Z | """
Simple orientation calculation from Accelerometer
"""
import pyb
import utime
from machine import I2C
from libhw.hw_sensors import lis331dlh as lis
from libhw.sensor_buffer import SensorsBuffer
from libhw.postproc import PostPitchRoll
from libhw import servo
SDA = 'X12'
SCL = 'Y11'
PINS = ["B6", "B7", "B10", "B11"]
INV = [True, False, True, False]
STACK_OBS = 4
def do_import(module_name):
res = __import__("libhw.%s" % module_name,
globals(), locals(), [module_name])
return res
def run(model_name):
model = do_import(model_name)
i2c = I2C(freq=400000, scl=SCL, sda=SDA)
acc = lis.Lis331DLH(i2c)
buf = SensorsBuffer([acc], timer_index=1, freq=100,
batch_size=10, buffer_size=100)
post = PostPitchRoll(buf, pad_yaw=True)
buf.start()
ch = servo.pins_to_timer_channels(PINS)
brain = servo.ServoBrain()
brain.init(ch, inversions=INV)
obs = []
obs_len = STACK_OBS*(3+4)
frames = 0
frame_time = 0
ts = utime.ticks_ms()
try:
while True:
for v in post:
for n in brain.positions:
obs.append([n])
for n in v:
obs.append([n])
obs = obs[-obs_len:]
if len(obs) == obs_len:
frames += 1
frame_time += utime.ticks_diff(utime.ticks_ms(), ts)
ts = utime.ticks_ms()
res = model.forward(obs)
pos = [v[0] for v in res]
print("%s, FPS: %.3f" % (pos, frames*1000/frame_time))
brain.positions = pos
finally:
buf.stop()
brain.deinit()
| 26.921875 | 74 | 0.551944 |
886f576a45759632492bab1da51b2e9bc62ae421 | 2,796 | py | Python | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/restore_point_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 207 | 2017-11-29T06:59:41.000Z | 2022-03-31T10:00:53.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/restore_point_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 4,061 | 2017-10-27T23:19:56.000Z | 2022-03-31T23:18:30.000Z | src/db-up/azext_db_up/vendored_sdks/azure_mgmt_sql/sql/models/restore_point_py3.py | Mannan2812/azure-cli-extensions | e2b34efe23795f6db9c59100534a40f0813c3d95 | [
"MIT"
] | 802 | 2017-10-11T17:36:26.000Z | 2022-03-31T22:24:32.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource_py3 import ProxyResource
class RestorePoint(ProxyResource):
"""Database restore points.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar location: Resource location.
:vartype location: str
:ivar restore_point_type: The type of restore point. Possible values
include: 'CONTINUOUS', 'DISCRETE'
:vartype restore_point_type: str or
~azure.mgmt.sql.models.RestorePointType
:ivar earliest_restore_date: The earliest time to which this database can
be restored
:vartype earliest_restore_date: datetime
:ivar restore_point_creation_date: The time the backup was taken
:vartype restore_point_creation_date: datetime
:ivar restore_point_label: The label of restore point for backup request
by user
:vartype restore_point_label: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'location': {'readonly': True},
'restore_point_type': {'readonly': True},
'earliest_restore_date': {'readonly': True},
'restore_point_creation_date': {'readonly': True},
'restore_point_label': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'restore_point_type': {'key': 'properties.restorePointType', 'type': 'RestorePointType'},
'earliest_restore_date': {'key': 'properties.earliestRestoreDate', 'type': 'iso-8601'},
'restore_point_creation_date': {'key': 'properties.restorePointCreationDate', 'type': 'iso-8601'},
'restore_point_label': {'key': 'properties.restorePointLabel', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(RestorePoint, self).__init__(**kwargs)
self.location = None
self.restore_point_type = None
self.earliest_restore_date = None
self.restore_point_creation_date = None
self.restore_point_label = None
| 38.833333 | 106 | 0.626967 |
c9c4785a603223f77ae99b074f9bc0891f470b4d | 5,271 | py | Python | deep_learning/DeepLearningWithPython/chapter6-seq-text-rnn-cnn/6.3-advanced-usage-of-recurrent-neural-networks.py | keepangry/ai_algorithm | 21d8024296a2f2d2797448ed34eb383359259684 | [
"Apache-2.0"
] | 2 | 2018-08-29T11:09:36.000Z | 2018-10-22T11:46:36.000Z | deep_learning/DeepLearningWithPython/chapter6-seq-text-rnn-cnn/6.3-advanced-usage-of-recurrent-neural-networks.py | keepangry/ai_algorithm | 21d8024296a2f2d2797448ed34eb383359259684 | [
"Apache-2.0"
] | null | null | null | deep_learning/DeepLearningWithPython/chapter6-seq-text-rnn-cnn/6.3-advanced-usage-of-recurrent-neural-networks.py | keepangry/ai_algorithm | 21d8024296a2f2d2797448ed34eb383359259684 | [
"Apache-2.0"
] | null | null | null | import os
data_dir = '/home/yangsen/workspace/ai_algorithm/data/'
fname = os.path.join(data_dir, 'jena_climate_2009_2016.csv')
f = open(fname)
data = f.read()
f.close()
lines = data.split('\n')
header = lines[0].split(',')
lines = lines[1:]
print(header)
print(len(lines))
import numpy as np
float_data = np.zeros((len(lines), len(header) - 1))
for i, line in enumerate(lines):
values = [float(x) for x in line.split(',')[1:]]
float_data[i, :] = values
from matplotlib import pyplot as plt
temp = float_data[:, 1] # temperature (in degrees Celsius)
plt.plot(range(len(temp)), temp)
plt.show()
plt.plot(range(1440), temp[:1440])
plt.show()
mean = float_data[:200000].mean(axis=0)
float_data -= mean
std = float_data[:200000].std(axis=0)
float_data /= std
def generator(data, lookback, delay, min_index, max_index,
shuffle=False, batch_size=128, step=6):
if max_index is None:
max_index = len(data) - delay - 1
i = min_index + lookback
while 1:
if shuffle:
rows = np.random.randint(
min_index + lookback, max_index, size=batch_size)
else:
if i + batch_size >= max_index:
i = min_index + lookback
rows = np.arange(i, min(i + batch_size, max_index))
i += len(rows)
samples = np.zeros((len(rows),
lookback // step,
data.shape[-1]))
targets = np.zeros((len(rows),))
for j, row in enumerate(rows):
indices = range(rows[j] - lookback, rows[j], step)
samples[j] = data[indices]
targets[j] = data[rows[j] + delay][1]
yield samples, targets
lookback = 1440
step = 6
delay = 144
batch_size = 128
train_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=0,
max_index=200000,
shuffle=True,
step=step,
batch_size=batch_size)
val_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=200001,
max_index=300000,
step=step,
batch_size=batch_size)
test_gen = generator(float_data,
lookback=lookback,
delay=delay,
min_index=300001,
max_index=None,
step=step,
batch_size=batch_size)
# This is how many steps to draw from `val_gen`
# in order to see the whole validation set:
val_steps = (300000 - 200001 - lookback) // batch_size
# This is how many steps to draw from `test_gen`
# in order to see the whole test set:
test_steps = (len(float_data) - 300001 - lookback) // batch_size
def evaluate_naive_method():
batch_maes = []
for step in range(val_steps):
samples, targets = next(val_gen)
preds = samples[:, -1, 1]
mae = np.mean(np.abs(preds - targets))
batch_maes.append(mae)
print(np.mean(batch_maes))
evaluate_naive_method()
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.Flatten(input_shape=(lookback // step, float_data.shape[-1])))
model.add(layers.Dense(32, activation='relu'))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=20,
validation_data=val_gen,
validation_steps=val_steps)
from keras.models import Sequential
from keras import layers
from keras.optimizers import RMSprop
model = Sequential()
model.add(layers.GRU(32, input_shape=(None, float_data.shape[-1])))
model.add(layers.Dense(1))
model.compile(optimizer=RMSprop(), loss='mae')
history = model.fit_generator(train_gen,
steps_per_epoch=500,
epochs=10,
validation_data=val_gen,
validation_steps=val_steps)
# imdb b-lstm
from keras.datasets import imdb
from keras.preprocessing import sequence
from keras import layers
from keras.models import Sequential
# Number of words to consider as features
max_features = 10000
# Cut texts after this number of words (among top max_features most common words)
maxlen = 500
# Load data
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
# Reverse sequences
x_train = [x[::-1] for x in x_train]
x_test = [x[::-1] for x in x_test]
# Pad sequences
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
model = Sequential()
model.add(layers.Embedding(max_features, 128))
model.add(layers.LSTM(32))
model.add(layers.Dense(1, activation='sigmoid'))
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
history = model.fit(x_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2) | 29.283333 | 81 | 0.600266 |
15bc5f87e9e8ef5e403c1977b69ea04ee3312910 | 3,253 | py | Python | robosuite/models/arenas/table_cabinet_arena.py | quantumiracle/robolite | b3166a1c51a1118706177f4a4e7401e7c2c6c404 | [
"MIT"
] | null | null | null | robosuite/models/arenas/table_cabinet_arena.py | quantumiracle/robolite | b3166a1c51a1118706177f4a4e7401e7c2c6c404 | [
"MIT"
] | null | null | null | robosuite/models/arenas/table_cabinet_arena.py | quantumiracle/robolite | b3166a1c51a1118706177f4a4e7401e7c2c6c404 | [
"MIT"
] | null | null | null | import numpy as np
from robosuite.models.arenas import Arena
from robosuite.utils.mjcf_utils import xml_path_completion
from robosuite.utils.mjcf_utils import array_to_string, string_to_array
class TableCabinetArena(Arena):
"""Workspace that contains an empty table."""
def __init__(
self, table_full_size=(0.8, 0.8, 0.8), table_friction=(1, 0.005, 0.0001)
):
"""
Args:
table_full_size: full dimensions of the table
friction: friction parameters of the table
"""
super().__init__(xml_path_completion("arenas/table_cabinet_arena.xml"))
self.table_full_size = np.array(table_full_size)
self.table_half_size = self.table_full_size / 2
self.table_friction = table_friction
self.floor = self.worldbody.find("./geom[@name='floor']")
self.table_body = self.worldbody.find("./body[@name='table']")
self.table_collision = self.table_body.find("./geom[@name='table_collision']")
self.table_visual = self.table_body.find("./geom[@name='table_visual']")
self.table_top = self.table_body.find("./site[@name='table_top']")
self.door_body = self.worldbody.find("./body[@name='frame_link']")
self.door_inertial = self.door_body.find("./inertial") # since the mass cannot be directly set to body, and the inertial in xml does not have a name, the inertial needs to be referred here
self.door_link = self.door_body.find(("./body[@name='door_link']"))
self.door_hinge = self.door_link.find(("./joint[@name='hinge0']"))
# self.door_hinge = self.door_body.find("./body[@name='door_link2']").find(("./joint[@name='base_to_door2']"))
self.knob_link_body = self.door_link.find("./body[@name='knob_link']")
self.knob_link_inertial = self.knob_link_body.find("./inertial")
self.knob_geom = self.knob_link_body.find("./geom[@name='cabinet_knob']")
assert self.floor is not None
assert self.table_body is not None
assert self.door_body is not None
assert self.door_inertial is not None
assert self.door_link is not None
assert self.door_hinge is not None
assert self.knob_link_body is not None
assert self.knob_link_inertial is not None
assert self.knob_geom is not None
self.configure_location()
def configure_location(self):
self.bottom_pos = np.array([0, 0, 0])
self.floor.set("pos", array_to_string(self.bottom_pos))
self.center_pos = self.bottom_pos + np.array([0, 0, self.table_half_size[2]])
self.table_body.set("pos", array_to_string(self.center_pos))
self.table_collision.set("size", array_to_string(self.table_half_size))
self.table_collision.set("friction", array_to_string(self.table_friction))
self.table_visual.set("size", array_to_string(self.table_half_size))
self.table_top.set(
"pos", array_to_string(np.array([0, 0, self.table_half_size[2]]))
)
@property
def table_top_abs(self):
"""Returns the absolute position of table top"""
table_height = np.array([0, 0, self.table_full_size[2]])
return string_to_array(self.floor.get("pos")) + table_height
| 47.838235 | 197 | 0.673225 |
08d327a3739b669f6660f3b2b81dea3366c9f8e4 | 227 | py | Python | ivf/core/sfs/normal_from_gradient.py | tody411/ImageViewerFramework | 5c183c34e65494b6af1287e70152b995a868c6ac | [
"MIT"
] | null | null | null | ivf/core/sfs/normal_from_gradient.py | tody411/ImageViewerFramework | 5c183c34e65494b6af1287e70152b995a868c6ac | [
"MIT"
] | null | null | null | ivf/core/sfs/normal_from_gradient.py | tody411/ImageViewerFramework | 5c183c34e65494b6af1287e70152b995a868c6ac | [
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
## @package ivf.core.sfs.normal_from_gradient
#
# ivf.core.sfs.normal_from_gradient utility package.
# @author tody
# @date 2016/02/08
import numpy as np
import scipy.sparse
import cv2
| 17.461538 | 53 | 0.678414 |
56d5195f1d8562743e9ddcdfd69805a38e2ae4ed | 5,178 | py | Python | lambda/functions/createDCV/lambda_function_createDCV.py | xinlaoda/DCVCluster | fdc77fdd5bbef17e56c4477fc89d09cadc504757 | [
"Apache-2.0"
] | null | null | null | lambda/functions/createDCV/lambda_function_createDCV.py | xinlaoda/DCVCluster | fdc77fdd5bbef17e56c4477fc89d09cadc504757 | [
"Apache-2.0"
] | null | null | null | lambda/functions/createDCV/lambda_function_createDCV.py | xinlaoda/DCVCluster | fdc77fdd5bbef17e56c4477fc89d09cadc504757 | [
"Apache-2.0"
] | null | null | null | import boto3
import json
import logging
import os
from uuid import uuid4
uuidChars = ("a", "b", "c", "d", "e", "f",
"g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s",
"t", "u", "v", "w", "x", "y", "z", "0", "1", "2", "3", "4", "5",
"6", "7", "8", "9", "A", "B", "C", "D", "E", "F", "G", "H", "I",
"J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V",
"W", "X", "Y", "Z")
print('Loading function...')
cfn = boto3.client('cloudformation')
dynamodb = boto3.resource('dynamodb')
with open('createDCV.yml', 'r') as f:
cf_temp = f.read()
def short_uuid():
uuid = str(uuid4()).replace('-', '')
result = ''
for i in range(0,8):
sub = uuid[i * 4: i * 4 + 4]
x = int(sub,16)
result += uuidChars[x % 0x3E]
return result
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
'''Invoke cloudformation template to create DCV instance, target group
and ALB rule.
'''
if not cf_temp:
return respond("Failed to get CloudFormation template file.")
### get parameters from environment.
env_KeyPair = os.environ['KeyPair']
env_PublicSubnet = os.environ['PublicSubnetId']
env_VPCId = os.environ['VPCId']
env_SecurityGroupId = os.environ['DCVSecurityGroupId']
env_DCVELBListener = os.environ['DCVELBListener']
env_DCVDynamoDBTable = os.environ['DCVDynamoDBTable']
print("Received event: " + json.dumps(event, indent=2))
''' post body format:
{
user: <user>
bundle: <bundle>
instanceSize: <instanceSize>
}
'''
print("Post method, create DCV session.")
_body = event["body"]
print(type(_body))
if isinstance(_body,str):
body = json.loads(_body)
else:
body = _body
print("post body: " + json.dumps(body))
if "user" in body.keys():
dcv_user = body["user"]
else:
err = "Can not get user from request body."
return respond(err)
if "bundle" in body.keys():
dcv_bundle = body["bundle"]
else:
return respond("Can not get bundle from request body.")
if "instanceSize" in body.keys():
dcv_instanceSize = body["instanceSize"]
else:
return respond("Can not get instanceSize from request body.")
dcv_id = short_uuid()
stackName = 'dcv-node-' + dcv_id
response = cfn.create_stack(
StackName=stackName,
TemplateBody=cf_temp,
Parameters=[
{
'ParameterKey': 'AMI',
'ParameterValue': 'ami-058c6c4192619f214'
},
{
'ParameterKey': 'DCVELBListener',
'ParameterValue': env_DCVELBListener
},
{
'ParameterKey': 'DCVUser',
'ParameterValue': dcv_user
},
{
'ParameterKey': 'InstanceType',
'ParameterValue': dcv_instanceSize
},
{
'ParameterKey': 'KeyPair',
'ParameterValue': env_KeyPair
},
{
'ParameterKey': 'NISDomainname',
'ParameterValue': 'hpc'
},
{
'ParameterKey': 'NISServer',
'ParameterValue': '10.0.10.49'
},
{
'ParameterKey': 'PublicSubnet',
'ParameterValue': env_PublicSubnet
},
{
'ParameterKey': 'VPC',
'ParameterValue': env_VPCId
},
{
'ParameterKey': 'SecurityGroupId',
'ParameterValue': env_SecurityGroupId
}
],
)
stackId = response["StackId"]
logging.info("Stack ID = " + stackId)
print("reponse event: " + json.dumps(response, indent=2))
''' add this item into dynamodb, formation:
{
User: // user name
Id: // uuid session id
DCVState: // overall state, which will map stack state and instance state
Bundle: // bundle name
CFStackName: //CloudFormation stack name
EC2instanceId: //DCV EC2 instance Id
CFState: //CF stack state
EC2State: //DCV EC2 instance state
}
'''
table = dynamodb.Table(env_DCVDynamoDBTable)
response = table.put_item(
Item={
'User': dcv_user,
'Id': dcv_id,
'CFStackName': stackName,
'DCVState': 'Proversioning',
'CFState': 'CREATE_IN_PROGRESS',
'Bundle': 'ALinux2',
'InstanceSize': dcv_instanceSize
}
)
reponse_body = {
'User': dcv_user,
'Id': dcv_id,
'DCVState': 'CREATE_IN_PROGRESS'
}
print("reponse event: " + json.dumps(reponse_body, indent=2))
return respond(None, reponse_body)
| 30.280702 | 85 | 0.50618 |
4991e8babb4d9edf060d9d0f7a6248f9cfbb07e5 | 8,016 | py | Python | python_modules/dagster/dagster/core/execution/context/compute.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 4,606 | 2018-06-21T17:45:20.000Z | 2022-03-31T23:39:42.000Z | python_modules/dagster/dagster/core/execution/context/compute.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 6,221 | 2018-06-12T04:36:01.000Z | 2022-03-31T21:43:05.000Z | python_modules/dagster/dagster/core/execution/context/compute.py | alexismanuel/dagster | b2cdf8cc985ad48ff203b44b664ff3cb4aded9a3 | [
"Apache-2.0"
] | 619 | 2018-08-22T22:43:09.000Z | 2022-03-31T22:48:06.000Z | from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional
from dagster import check
from dagster.core.definitions.dependency import Node, NodeHandle
from dagster.core.definitions.mode import ModeDefinition
from dagster.core.definitions.pipeline import PipelineDefinition
from dagster.core.definitions.solid import SolidDefinition
from dagster.core.definitions.step_launcher import StepLauncher
from dagster.core.errors import DagsterInvalidPropertyError
from dagster.core.instance import DagsterInstance
from dagster.core.log_manager import DagsterLogManager
from dagster.core.storage.pipeline_run import PipelineRun
from dagster.utils.forked_pdb import ForkedPdb
from .system import StepExecutionContext
class AbstractComputeExecutionContext(ABC): # pylint: disable=no-init
"""Base class for solid context implemented by SolidExecutionContext and DagstermillExecutionContext"""
@abstractmethod
def has_tag(self, key) -> bool:
"""Implement this method to check if a logging tag is set."""
@abstractmethod
def get_tag(self, key: str) -> Optional[str]:
"""Implement this method to get a logging tag."""
@abstractproperty
def run_id(self) -> str:
"""The run id for the context."""
@abstractproperty
def solid_def(self) -> SolidDefinition:
"""The solid definition corresponding to the execution step being executed."""
@abstractproperty
def solid(self) -> Node:
"""The solid corresponding to the execution step being executed."""
@abstractproperty
def pipeline_def(self) -> PipelineDefinition:
"""The pipeline being executed."""
@abstractproperty
def pipeline_run(self) -> PipelineRun:
"""The PipelineRun object corresponding to the execution."""
@abstractproperty
def resources(self) -> Any:
"""Resources available in the execution context."""
@abstractproperty
def log(self) -> DagsterLogManager:
"""The log manager available in the execution context."""
@abstractproperty
def solid_config(self) -> Any:
"""The parsed config specific to this solid."""
@property
def op_config(self) -> Any:
return self.solid_config
class SolidExecutionContext(AbstractComputeExecutionContext):
"""The ``context`` object that can be made available as the first argument to a solid's compute
function.
The context object provides system information such as resources, config, and logging to a
solid's compute function. Users should not instantiate this object directly.
Example:
.. code-block:: python
@solid
def hello_world(context: SolidExecutionContext):
context.log.info("Hello, world!")
"""
__slots__ = ["_step_execution_context"]
def __init__(self, step_execution_context: StepExecutionContext):
self._step_execution_context = check.inst_param(
step_execution_context,
"step_execution_context",
StepExecutionContext,
)
self._pdb: Optional[ForkedPdb] = None
@property
def solid_config(self) -> Any:
solid_config = self._step_execution_context.resolved_run_config.solids.get(
str(self.solid_handle)
)
return solid_config.config if solid_config else None
@property
def pipeline_run(self) -> PipelineRun:
"""PipelineRun: The current pipeline run"""
return self._step_execution_context.pipeline_run
@property
def instance(self) -> DagsterInstance:
"""DagsterInstance: The current Dagster instance"""
return self._step_execution_context.instance
@property
def pdb(self) -> ForkedPdb:
"""dagster.utils.forked_pdb.ForkedPdb: Gives access to pdb debugging from within the solid.
Example:
.. code-block:: python
@solid
def debug_solid(context):
context.pdb.set_trace()
"""
if self._pdb is None:
self._pdb = ForkedPdb()
return self._pdb
@property
def file_manager(self):
"""Deprecated access to the file manager.
:meta private:
"""
raise DagsterInvalidPropertyError(
"You have attempted to access the file manager which has been moved to resources in 0.10.0. "
"Please access it via `context.resources.file_manager` instead."
)
@property
def resources(self) -> Any:
"""Resources: The currently available resources."""
return self._step_execution_context.resources
@property
def step_launcher(self) -> Optional[StepLauncher]:
"""Optional[StepLauncher]: The current step launcher, if any."""
return self._step_execution_context.step_launcher
@property
def run_id(self) -> str:
"""str: The id of the current execution's run."""
return self._step_execution_context.run_id
@property
def run_config(self) -> dict:
"""dict: The run config for the current execution."""
return self._step_execution_context.run_config
@property
def pipeline_def(self) -> PipelineDefinition:
"""PipelineDefinition: The currently executing pipeline."""
return self._step_execution_context.pipeline_def
@property
def pipeline_name(self) -> str:
"""str: The name of the currently executing pipeline."""
return self._step_execution_context.pipeline_name
@property
def mode_def(self) -> ModeDefinition:
"""ModeDefinition: The mode of the current execution."""
return self._step_execution_context.mode_def
@property
def log(self) -> DagsterLogManager:
"""DagsterLogManager: The log manager available in the execution context."""
return self._step_execution_context.log
@property
def solid_handle(self) -> NodeHandle:
"""NodeHandle: The current solid's handle.
:meta private:
"""
return self._step_execution_context.solid_handle
@property
def solid(self) -> Node:
"""Solid: The current solid object.
:meta private:
"""
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle)
@property
def solid_def(self) -> SolidDefinition:
"""SolidDefinition: The current solid definition."""
return self._step_execution_context.pipeline_def.get_solid(self.solid_handle).definition
def has_tag(self, key: str) -> bool:
"""Check if a logging tag is set.
Args:
key (str): The tag to check.
Returns:
bool: Whether the tag is set.
"""
return self._step_execution_context.has_tag(key)
def get_tag(self, key: str) -> Optional[str]:
"""Get a logging tag.
Args:
key (tag): The tag to get.
Returns:
Optional[str]: The value of the tag, if present.
"""
return self._step_execution_context.get_tag(key)
def get_step_execution_context(self) -> StepExecutionContext:
"""Allows advanced users (e.g. framework authors) to punch through to the underlying
step execution context.
:meta private:
Returns:
StepExecutionContext: The underlying system context.
"""
return self._step_execution_context
@property
def retry_number(self) -> int:
"""
Which retry attempt is currently executing i.e. 0 for initial attempt, 1 for first retry, etc.
"""
return self._step_execution_context.previous_attempt_count
def describe_op(self):
return self._step_execution_context.describe_op()
def get_mapping_key(self) -> Optional[str]:
"""
Which mapping_key this execution is for if downstream of a DynamicOutput, otherwise None.
"""
return self._step_execution_context.step.get_mapping_key()
class OpExecutionContext(SolidExecutionContext):
pass
| 31.435294 | 107 | 0.6749 |
fd58922a43e028e481fb3c6f671df6f2176d75da | 5,068 | py | Python | wagtail/core/templatetags/wagtailcore_tags.py | mukesh5/wagtail | 857a4fe18bb7bb0a43019bc1a678f90bf3fce01b | [
"BSD-3-Clause"
] | null | null | null | wagtail/core/templatetags/wagtailcore_tags.py | mukesh5/wagtail | 857a4fe18bb7bb0a43019bc1a678f90bf3fce01b | [
"BSD-3-Clause"
] | null | null | null | wagtail/core/templatetags/wagtailcore_tags.py | mukesh5/wagtail | 857a4fe18bb7bb0a43019bc1a678f90bf3fce01b | [
"BSD-3-Clause"
] | null | null | null | from django import template
from django.template.defaulttags import token_kwargs
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from wagtail import VERSION, __version__
from wagtail.core.models import Page
from wagtail.core.rich_text import RichText, expand_db_html
from wagtail.utils.version import get_main_version
register = template.Library()
@register.simple_tag(takes_context=True)
def pageurl(context, page):
"""
Outputs a page's URL as relative (/foo/bar/) if it's within the same site as the
current page, or absolute (http://example.com/foo/bar/) if not.
"""
if not hasattr(page, 'relative_url'):
raise ValueError("pageurl tag expected a Page object, got %r" % page)
try:
current_site = context['request'].site
except (KeyError, AttributeError):
# request.site not available in the current context; fall back on page.url
return page.url
# Pass page.relative_url the request object, which may contain a cached copy of
# Site.get_site_root_paths()
# This avoids page.relative_url having to make a database/cache fetch for this list
# each time it's called.
return page.relative_url(current_site, request=context.get('request'))
@register.simple_tag(takes_context=True)
def slugurl(context, slug):
"""
Returns the URL for the page that has the given slug.
First tries to find a page on the current site. If that fails or a request
is not available in the context, then returns the URL for the first page
that matches the slug on any site.
"""
try:
current_site = context['request'].site
except (KeyError, AttributeError):
# No site object found - allow the fallback below to take place.
page = None
else:
page = Page.objects.in_site(current_site).filter(slug=slug).first()
# If no page is found, fall back to searching the whole tree.
if page is None:
page = Page.objects.filter(slug=slug).first()
if page:
# call pageurl() instead of page.relative_url() here so we get the ``accepts_kwarg`` logic
return pageurl(context, page)
@register.simple_tag
def wagtail_version():
return __version__
@register.simple_tag
def wagtail_documentation_path():
major, minor, patch, release, num = VERSION
if release == 'final':
return 'https://docs.wagtail.io/en/v%s' % __version__
else:
return 'https://docs.wagtail.io/en/latest'
@register.simple_tag
def wagtail_release_notes_path():
return "%s.html" % get_main_version(VERSION)
@register.filter
def richtext(value):
if isinstance(value, RichText):
# passing a RichText value through the |richtext filter should have no effect
return value
elif value is None:
html = ''
else:
if isinstance(value,str) or isinstance(value,bytes):
html = expand_db_html(value)
else:
raise TypeError("'richtext' template filter received an invalid value; expected string or bytes type, got {}.".format(type(value)))
return mark_safe('<div class="rich-text">' + html + '</div>')
class IncludeBlockNode(template.Node):
def __init__(self, block_var, extra_context, use_parent_context):
self.block_var = block_var
self.extra_context = extra_context
self.use_parent_context = use_parent_context
def render(self, context):
try:
value = self.block_var.resolve(context)
except template.VariableDoesNotExist:
return ''
if hasattr(value, 'render_as_block'):
if self.use_parent_context:
new_context = context.flatten()
else:
new_context = {}
if self.extra_context:
for var_name, var_value in self.extra_context.items():
new_context[var_name] = var_value.resolve(context)
return value.render_as_block(context=new_context)
else:
return force_text(value)
@register.tag
def include_block(parser, token):
"""
Render the passed item of StreamField content, passing the current template context
if there's an identifiable way of doing so (i.e. if it has a `render_as_block` method).
"""
tokens = token.split_contents()
try:
tag_name = tokens.pop(0)
block_var_token = tokens.pop(0)
except IndexError:
raise template.TemplateSyntaxError("%r tag requires at least one argument" % tag_name)
block_var = parser.compile_filter(block_var_token)
if tokens and tokens[0] == 'with':
tokens.pop(0)
extra_context = token_kwargs(tokens, parser)
else:
extra_context = None
use_parent_context = True
if tokens and tokens[0] == 'only':
tokens.pop(0)
use_parent_context = False
if tokens:
raise template.TemplateSyntaxError("Unexpected argument to %r tag: %r" % (tag_name, tokens[0]))
return IncludeBlockNode(block_var, extra_context, use_parent_context)
| 32.280255 | 143 | 0.677585 |
dfea38cb9efdc6220c4a8b82e93e22d2e98bcd64 | 2,651 | py | Python | other_functions.py | Ariel96cs/SimpleMapReduceFramework | 7cf482f94b1ed1e6436a7b229dfe09c3c0906d56 | [
"MIT"
] | null | null | null | other_functions.py | Ariel96cs/SimpleMapReduceFramework | 7cf482f94b1ed1e6436a7b229dfe09c3c0906d56 | [
"MIT"
] | null | null | null | other_functions.py | Ariel96cs/SimpleMapReduceFramework | 7cf482f94b1ed1e6436a7b229dfe09c3c0906d56 | [
"MIT"
] | null | null | null | # def _get_str_data(self, data_file_name='map_data', data_url=None):
# if data_url is None:
# file_url = "{}/{}".format(self.job_url, data_file_name)
# else:
# file_url = data_url
#
# message = mt.Message()
# message.set_message("GETDATA", {'file_url': file_url,
# 'byte': False})
# data = None
#
# answer = mt.loop_tool(mt.try_to_send_message,mt.send_message, message, self.file_sys_addr,
# lambda: print('JobTracker: ',"File system didn't respond"))
#
# self._verify_if_errors_in_fs(answer)
#
# new_addr = (answer.payload['ip'], answer.payload['port'])
#
# message = mt.Message()
# message.set_message("OK", {})
#
# answer = mt.loop_tool(mt.try_to_send_message,mt.send_message, message, new_addr,
# lambda: print('JobTracker: ',"File SYS didn't respond"))
#
# self._verify_if_errors_in_fs(answer)
#
# if answer.message_name == 'DATA':
# data = answer.payload['data']
#
# is_byte_data = False
# print('JobTracker: ',"this is the data: ", data)
# return data, is_byte_data
#
# def _distribute_text(self, data, workers_status):
# '''
# este metodo por ahora el criterio de distribucion es: coge un texto y pícalo en slices de igual tamaño
# :param data: el dato en si
# :param workers_status: ahora mismo es un diccionario que tiene nada mas que ip y puerto como llaves
# :return: cada uno de los slices
# '''
#
# max_cnt_slices = len(workers_status)
# max_data_slice_len = int(len(data) / max_cnt_slices)
# print('JobTracker: ',max_data_slice_len)
# start_i = 0
# slices = []
# continue_reading = True
# delimiters = self.delimiters
# while continue_reading:
#
# j = start_i + max_data_slice_len
# print('JobTracker: ','tomamos a j:{}'.format(j))
#
# while True:
#
# if j < len(data):
# if data[j] not in delimiters:
# j += 1
# print('JobTracker: ','incremento j ', j)
# else:
# slices.append((start_i, j))
# start_i = j + 1
# print('JobTracker: ','encontre un slice ', slices[-1], data[slices[-1][0]:slices[-1][1]])
# break
# else:
# continue_reading = False
# slices.append((start_i, j))
# print('JobTracker: ','encontre un slice ', slices[-1], data[slices[-1][0]:slices[-1][1]])
# break
#
# return [data[i:j] for i, j in slices] | 37.338028 | 111 | 0.556017 |
99facefe330edd0f5164c8fe263efe6848024a5c | 1,198 | py | Python | patternai/charcounter1.py | uogbuji/techwriting | 5c4610fa508f050a7923cd9bbd28b44566417978 | [
"Apache-2.0"
] | 6 | 2019-04-13T20:19:00.000Z | 2020-09-18T07:54:50.000Z | patternai/charcounter1.py | uogbuji/techwriting | 5c4610fa508f050a7923cd9bbd28b44566417978 | [
"Apache-2.0"
] | 1 | 2019-11-15T14:54:46.000Z | 2019-11-22T04:02:05.000Z | patternai/charcounter1.py | uogbuji/techwriting | 5c4610fa508f050a7923cd9bbd28b44566417978 | [
"Apache-2.0"
] | 4 | 2019-09-06T19:23:12.000Z | 2019-09-06T22:43:43.000Z | import sys
import pprint
def count_chars(input_fp, frequencies, buffer_size=1024):
'''Read the text content of a file and keep a running count of how often
each character appears.
Arguments:
input_fp -- file pointer with input text
frequencies -- mapping from each character to its counted frequency
buffer_size -- incremental quantity of text to be read at a time,
in bytes (1024 if not otherwise specified)
Returns:
nothing
'''
#Read the first chunk of text
text = input_fp.read(buffer_size)
#Loop over the file while there is text to read
while text:
for c in text:
#Accommodate the character if seen for the first time
frequencies.setdefault(c, 0)
#Increment the count for the present character
frequencies[c] += 1
#Read the next chunk of text
text = input_fp.read(buffer_size)
return
if __name__ == '__main__':
#Initialize the mapping
frequencies = {}
#Pull the input data from the console
count_chars(sys.stdin, frequencies)
#Display the resulting frequencies in readable format
pprint.pprint(frequencies)
| 30.717949 | 76 | 0.66611 |
c9ecf4407aa22d1eae118aacbc04eb3260cca2fa | 2,914 | py | Python | bumblebee_status/modules/core/memory.py | spxtr/bumblebee-status | 45125f39af8323775aeabf809ae5ae80cfe3ccd9 | [
"MIT"
] | 1,089 | 2016-11-06T10:02:53.000Z | 2022-03-26T12:53:30.000Z | bumblebee_status/modules/core/memory.py | spxtr/bumblebee-status | 45125f39af8323775aeabf809ae5ae80cfe3ccd9 | [
"MIT"
] | 817 | 2016-11-05T05:42:39.000Z | 2022-03-25T19:43:52.000Z | bumblebee_status/modules/core/memory.py | spxtr/bumblebee-status | 45125f39af8323775aeabf809ae5ae80cfe3ccd9 | [
"MIT"
] | 317 | 2016-11-05T00:35:06.000Z | 2022-03-24T13:35:03.000Z | # pylint: disable=C0111,R0903
"""Displays available RAM, total amount of RAM and percentage available.
By default, opens `gnome-system-monitor` on left mouse click.
Requirements:
* gnome-system-monitor for default mouse click action
Parameters:
* memory.warning : Warning threshold in % of memory used (defaults to 80%)
* memory.critical: Critical threshold in % of memory used (defaults to 90%)
* memory.format: Format string (defaults to '{used}/{total} ({percent:05.02f}%)')
* memory.usedonly: Only show the amount of RAM in use (defaults to False). Same as memory.format='{used}'
"""
import re
import core.module
import core.widget
import core.input
import util.format
class Module(core.module.Module):
def __init__(self, config, theme):
super().__init__(config, theme, core.widget.Widget(self.memory_usage))
core.input.register(
self, button=core.input.LEFT_MOUSE, cmd="gnome-system-monitor"
)
@property
def _format(self):
if util.format.asbool(self.parameter("usedonly", False)):
return "{used}"
else:
return self.parameter("format", "{used}/{total} ({percent:05.02f}%)")
def memory_usage(self, widget):
return self._format.format(**self._mem)
def update(self):
data = self.__parse_meminfo()
if "MemAvailable" in data:
used = data["MemTotal"] - data["MemAvailable"]
else:
used = (
data["MemTotal"]
- data["MemFree"]
- data["Buffers"]
- data["Cached"]
- data["Slab"]
)
self._mem = {
"total": util.format.byte(data["MemTotal"]),
"available": util.format.byte(data["MemAvailable"]),
"free": util.format.byte(data["MemFree"]),
"used": util.format.byte(used),
"percent": float(used) / float(data["MemTotal"]) * 100.0,
}
def state(self, widget):
if self._mem["percent"] > float(self.parameter("critical", 90)):
return "critical"
if self._mem["percent"] > float(self.parameter("warning", 80)):
return "warning"
return None
def __parse_meminfo(self):
data = {}
with open("/proc/meminfo", "r") as f:
# https://bugs.python.org/issue32933
for line in f.readlines():
tmp = re.split(r"[:\s]+", line)
value = self.__parse_value(tmp)
data[tmp[0]] = value
return data
def __parse_value(self, data):
value = int(data[1])
if data[2] == "kB":
value = value * 1024
if data[2] == "mB":
value = value * 1024 * 1024
if data[2] == "gB":
value = value * 1024 * 1024 * 1024
return value
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| 30.354167 | 109 | 0.569321 |
cd52d3b903b646c3311dce5bd5f9d51f27e367cd | 129 | py | Python | tests/expr3.py | Smit-create/lpython | 6a731170ba9628eadc0430815c52c175a8c2e767 | [
"MIT"
] | null | null | null | tests/expr3.py | Smit-create/lpython | 6a731170ba9628eadc0430815c52c175a8c2e767 | [
"MIT"
] | null | null | null | tests/expr3.py | Smit-create/lpython | 6a731170ba9628eadc0430815c52c175a8c2e767 | [
"MIT"
] | null | null | null | def test_cast():
a: i32
b: f32
a = 2
b = 4.2
a *= b
b += 1
a = 5
a -= 3.9
a /= b
b = 3/4
| 10.75 | 16 | 0.302326 |
fbf86a69499f3e439a69df68d666fff99080d19b | 244 | py | Python | src/arc_utilities/numpy_conversions.py | ToyotaResearchInstitute/arc_utilities | f15a1dfd9fba5ca83296354bae8b97628895c1c2 | [
"BSD-2-Clause"
] | null | null | null | src/arc_utilities/numpy_conversions.py | ToyotaResearchInstitute/arc_utilities | f15a1dfd9fba5ca83296354bae8b97628895c1c2 | [
"BSD-2-Clause"
] | null | null | null | src/arc_utilities/numpy_conversions.py | ToyotaResearchInstitute/arc_utilities | f15a1dfd9fba5ca83296354bae8b97628895c1c2 | [
"BSD-2-Clause"
] | 1 | 2017-11-06T21:38:23.000Z | 2017-11-06T21:38:23.000Z | import numpy as np
from geometry_msgs.msg import *
def ListPointsToNpArray(points):
arr = np.empty(shape=(3, len(points)))
for ind in range(len(points)):
arr[:,ind] = [points[ind].x, points[ind].y, points[ind].z]
return arr | 30.5 | 66 | 0.659836 |
f79edc8435293faed732243134ab0e4447f6b5d9 | 14,381 | py | Python | tools/manifest/manifest.py | janvenmans/wpt | c063a8c9acc637e3bd72b719b46e6f2fb627da4a | [
"BSD-3-Clause"
] | 2 | 2020-04-16T18:41:05.000Z | 2021-01-30T04:33:07.000Z | tools/manifest/manifest.py | janvenmans/wpt | c063a8c9acc637e3bd72b719b46e6f2fb627da4a | [
"BSD-3-Clause"
] | null | null | null | tools/manifest/manifest.py | janvenmans/wpt | c063a8c9acc637e3bd72b719b46e6f2fb627da4a | [
"BSD-3-Clause"
] | 1 | 2020-05-04T05:26:16.000Z | 2020-05-04T05:26:16.000Z | import itertools
import json
import os
from copy import deepcopy
from multiprocessing import Pool, cpu_count
from six import PY3, iteritems, itervalues, string_types, binary_type, text_type
from . import vcs
from .item import (ConformanceCheckerTest, ManifestItem, ManualTest, RefTest, SupportFile,
TestharnessTest, VisualTest, WebDriverSpecTest, CrashTest)
from .log import get_logger
from .sourcefile import SourceFile
from .typedata import TypeData
MYPY = False
if MYPY:
# MYPY is set to True when run under Mypy.
from logging import Logger
from typing import Any
from typing import Container
from typing import Dict
from typing import IO
from typing import Iterator
from typing import Iterable
from typing import Optional
from typing import Set
from typing import Text
from typing import Tuple
from typing import Type
from typing import Union
try:
import ujson
fast_json = ujson
except ImportError:
fast_json = json # type: ignore
CURRENT_VERSION = 8 # type: int
class ManifestError(Exception):
pass
class ManifestVersionMismatch(ManifestError):
pass
item_classes = {"testharness": TestharnessTest,
"reftest": RefTest,
"crashtest": CrashTest,
"manual": ManualTest,
"wdspec": WebDriverSpecTest,
"conformancechecker": ConformanceCheckerTest,
"visual": VisualTest,
"support": SupportFile} # type: Dict[str, Type[ManifestItem]]
def compute_manifest_items(source_file):
# type: (SourceFile) -> Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]
rel_path_parts = source_file.rel_path_parts
new_type, manifest_items = source_file.manifest_items()
file_hash = source_file.hash
return rel_path_parts, new_type, set(manifest_items), file_hash
if MYPY:
ManifestDataType = Dict[Any, TypeData]
else:
ManifestDataType = dict
class ManifestData(ManifestDataType):
def __init__(self, manifest):
# type: (Manifest) -> None
"""Dictionary subclass containing a TypeData instance for each test type,
keyed by type name"""
self.initialized = False # type: bool
for key, value in iteritems(item_classes):
self[key] = TypeData(manifest, value)
self.initialized = True
self.json_obj = None # type: None
def __setitem__(self, key, value):
# type: (str, TypeData) -> None
if self.initialized:
raise AttributeError
dict.__setitem__(self, key, value)
def paths(self):
# type: () -> Set[Text]
"""Get a list of all paths containing test items
without actually constructing all the items"""
rv = set() # type: Set[Text]
for item_data in itervalues(self):
for item in item_data:
rv.add(os.path.sep.join(item))
return rv
def type_by_path(self):
# type: () -> Dict[Tuple[Text, ...], str]
rv = {}
for item_type, item_data in iteritems(self):
for item in item_data:
rv[item] = item_type
return rv
class Manifest(object):
def __init__(self, tests_root=None, url_base="/"):
# type: (Optional[str], Text) -> None
assert url_base is not None
self._data = ManifestData(self) # type: ManifestData
self.tests_root = tests_root # type: Optional[str]
self.url_base = url_base # type: Text
def __iter__(self):
# type: () -> Iterator[Tuple[str, Text, Set[ManifestItem]]]
return self.itertypes()
def itertypes(self, *types):
# type: (*str) -> Iterator[Tuple[str, Text, Set[ManifestItem]]]
for item_type in (types or sorted(self._data.keys())):
for path in self._data[item_type]:
str_path = os.sep.join(path)
tests = self._data[item_type][path]
yield item_type, str_path, tests
def iterpath(self, path):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(path.split(os.path.sep))
for type_tests in self._data.values():
i = type_tests.get(tpath, set())
assert i is not None
for test in i:
yield test
def iterdir(self, dir_name):
# type: (Text) -> Iterable[ManifestItem]
tpath = tuple(dir_name.split(os.path.sep))
tpath_len = len(tpath)
for type_tests in self._data.values():
for path, tests in iteritems(type_tests):
if path[:tpath_len] == tpath:
for test in tests:
yield test
def update(self, tree, parallel=True):
# type: (Iterable[Tuple[Union[SourceFile, bytes], bool]], bool) -> bool
"""Update the manifest given an iterable of items that make up the updated manifest.
The iterable must either generate tuples of the form (SourceFile, True) for paths
that are to be updated, or (path, False) for items that are not to be updated. This
unusual API is designed as an optimistaion meaning that SourceFile items need not be
constructed in the case we are not updating a path, but the absence of an item from
the iterator may be used to remove defunct entries from the manifest."""
changed = False
# Create local variable references to these dicts so we avoid the
# attribute access in the hot loop below
data = self._data
types = data.type_by_path()
deleted = set(types)
to_update = []
for source_file, update in tree:
if not update:
assert isinstance(source_file, (binary_type, text_type))
deleted.remove(tuple(source_file.split(os.path.sep)))
else:
assert not isinstance(source_file, bytes)
rel_path_parts = source_file.rel_path_parts
assert isinstance(rel_path_parts, tuple)
is_new = rel_path_parts not in deleted # type: bool
hash_changed = False # type: bool
if not is_new:
deleted.remove(rel_path_parts)
old_type = types[rel_path_parts]
old_hash = data[old_type].hashes[rel_path_parts]
file_hash = source_file.hash # type: Text
if old_hash != file_hash:
hash_changed = True
del data[old_type][rel_path_parts]
if is_new or hash_changed:
to_update.append(source_file)
if to_update:
changed = True
if parallel and len(to_update) > 25 and cpu_count() > 1:
# 25 derived experimentally (2020-01) to be approximately
# the point at which it is quicker to create Pool and
# parallelize this
pool = Pool()
# chunksize set > 1 when more than 10000 tests, because
# chunking is a net-gain once we get to very large numbers
# of items (again, experimentally, 2020-01)
results = pool.imap_unordered(compute_manifest_items,
to_update,
chunksize=max(1, len(to_update) // 10000)
) # type: Iterator[Tuple[Tuple[Text, ...], Text, Set[ManifestItem], Text]]
elif PY3:
results = map(compute_manifest_items, to_update)
else:
results = itertools.imap(compute_manifest_items, to_update)
for result in results:
rel_path_parts, new_type, manifest_items, file_hash = result
data[new_type][rel_path_parts] = manifest_items
data[new_type].hashes[rel_path_parts] = file_hash
if deleted:
changed = True
for rel_path_parts in deleted:
for test_data in itervalues(data):
if rel_path_parts in test_data:
del test_data[rel_path_parts]
return changed
def to_json(self, caller_owns_obj=True):
# type: (bool) -> Dict[Text, Any]
"""Dump a manifest into a object which can be serialized as JSON
If caller_owns_obj is False, then the return value remains
owned by the manifest; it is _vitally important_ that _no_
(even read) operation is done on the manifest, as otherwise
objects within the object graph rooted at the return value can
be mutated. This essentially makes this mode very dangerous
and only to be used under extreme care.
"""
out_items = {
test_type: type_paths.to_json()
for test_type, type_paths in iteritems(self._data) if type_paths
}
if caller_owns_obj:
out_items = deepcopy(out_items)
rv = {"url_base": self.url_base,
"items": out_items,
"version": CURRENT_VERSION} # type: Dict[Text, Any]
return rv
@classmethod
def from_json(cls, tests_root, obj, types=None, callee_owns_obj=False):
# type: (str, Dict[Text, Any], Optional[Container[Text]], bool) -> Manifest
"""Load a manifest from a JSON object
This loads a manifest for a given local test_root path from an
object obj, potentially partially loading it to only load the
types given by types.
If callee_owns_obj is True, then ownership of obj transfers
to this function when called, and the caller must never mutate
the obj or anything referred to in the object graph rooted at
obj.
"""
version = obj.get("version")
if version != CURRENT_VERSION:
raise ManifestVersionMismatch
self = cls(tests_root, url_base=obj.get("url_base", "/"))
if not hasattr(obj, "items"):
raise ManifestError
for test_type, type_paths in iteritems(obj["items"]):
if test_type not in item_classes:
raise ManifestError
if types and test_type not in types:
continue
if not callee_owns_obj:
type_paths = deepcopy(type_paths)
self._data[test_type].set_json(type_paths)
return self
def load(tests_root, manifest, types=None):
# type: (str, Union[IO[bytes], str], Optional[Container[Text]]) -> Optional[Manifest]
logger = get_logger()
logger.warning("Prefer load_and_update instead")
return _load(logger, tests_root, manifest, types)
__load_cache = {} # type: Dict[str, Manifest]
def _load(logger, # type: Logger
tests_root, # type: str
manifest, # type: Union[IO[bytes], str]
types=None, # type: Optional[Container[Text]]
allow_cached=True # type: bool
):
# type: (...) -> Optional[Manifest]
manifest_path = (manifest if isinstance(manifest, string_types)
else manifest.name)
if allow_cached and manifest_path in __load_cache:
return __load_cache[manifest_path]
if isinstance(manifest, string_types):
if os.path.exists(manifest):
logger.debug("Opening manifest at %s" % manifest)
else:
logger.debug("Creating new manifest at %s" % manifest)
try:
with open(manifest, "rb") as f:
rv = Manifest.from_json(tests_root,
fast_json.load(f),
types=types,
callee_owns_obj=True)
except IOError:
return None
except ValueError:
logger.warning("%r may be corrupted", manifest)
return None
else:
rv = Manifest.from_json(tests_root,
fast_json.load(manifest),
types=types,
callee_owns_obj=True)
if allow_cached:
__load_cache[manifest_path] = rv
return rv
def load_and_update(tests_root, # type: bytes
manifest_path, # type: bytes
url_base, # type: Text
update=True, # type: bool
rebuild=False, # type: bool
metadata_path=None, # type: Optional[bytes]
cache_root=None, # type: Optional[bytes]
working_copy=True, # type: bool
types=None, # type: Optional[Container[Text]]
write_manifest=True, # type: bool
allow_cached=True, # type: bool
parallel=True # type: bool
):
# type: (...) -> Manifest
logger = get_logger()
manifest = None
if not rebuild:
try:
manifest = _load(logger,
tests_root,
manifest_path,
types=types,
allow_cached=allow_cached)
except ManifestVersionMismatch:
logger.info("Manifest version changed, rebuilding")
if manifest is not None and manifest.url_base != url_base:
logger.info("Manifest url base did not match, rebuilding")
manifest = None
if manifest is None:
manifest = Manifest(tests_root, url_base)
rebuild = True
update = True
if rebuild or update:
tree = vcs.get_tree(tests_root, manifest, manifest_path, cache_root,
working_copy, rebuild)
changed = manifest.update(tree, parallel)
if write_manifest and changed:
write(manifest, manifest_path)
tree.dump_caches()
return manifest
def write(manifest, manifest_path):
# type: (Manifest, bytes) -> None
dir_name = os.path.dirname(manifest_path)
if not os.path.exists(dir_name):
os.makedirs(dir_name)
with open(manifest_path, "w") as f:
# Use ',' instead of the default ', ' separator to prevent trailing
# spaces: https://docs.python.org/2/library/json.html#json.dump
json.dump(manifest.to_json(caller_owns_obj=True), f,
sort_keys=True, indent=1, separators=(',', ': '))
f.write("\n")
| 35.9525 | 117 | 0.59064 |
da69d4c44e0b6052d4dbfe66b869f5d5b90fe639 | 3,350 | py | Python | src/adafruit-circuitpython-bundle-4.x-mpy-20190713/examples/rfm69_simpletest.py | mbaaba/solar_panel | 42059d8c61320494ad1298065dbc50cd9b3bd51e | [
"MIT"
] | 1 | 2020-04-13T16:10:53.000Z | 2020-04-13T16:10:53.000Z | infra/libs-400rc2-20190512/examples/rfm69_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | infra/libs-400rc2-20190512/examples/rfm69_simpletest.py | jadudm/feather-isa | b7419e6698c3f64be4d8122656eb8124631ca859 | [
"MIT"
] | null | null | null | # Simple example to send a message and then wait indefinitely for messages
# to be received. This uses the default RadioHead compatible GFSK_Rb250_Fd250
# modulation and packet format for the radio.
# Author: Tony DiCola
import board
import busio
import digitalio
import adafruit_rfm69
# Define radio parameters.
RADIO_FREQ_MHZ = 915.0 # Frequency of the radio in Mhz. Must match your
# module! Can be a value like 915.0, 433.0, etc.
# Define pins connected to the chip, use these if wiring up the breakout according to the guide:
CS = digitalio.DigitalInOut(board.D5)
RESET = digitalio.DigitalInOut(board.D6)
# Or uncomment and instead use these if using a Feather M0 RFM69 board
# and the appropriate CircuitPython build:
#CS = digitalio.DigitalInOut(board.RFM69_CS)
#RESET = digitalio.DigitalInOut(board.RFM69_RST)
# Define the onboard LED
LED = digitalio.DigitalInOut(board.D13)
LED.direction = digitalio.Direction.OUTPUT
# Initialize SPI bus.
spi = busio.SPI(board.SCK, MOSI=board.MOSI, MISO=board.MISO)
# Initialze RFM radio
rfm69 = adafruit_rfm69.RFM69(spi, CS, RESET, RADIO_FREQ_MHZ)
# Optionally set an encryption key (16 byte AES key). MUST match both
# on the transmitter and receiver (or be set to None to disable/the default).
rfm69.encryption_key = b'\x01\x02\x03\x04\x05\x06\x07\x08\x01\x02\x03\x04\x05\x06\x07\x08'
# Print out some chip state:
print('Temperature: {0}C'.format(rfm69.temperature))
print('Frequency: {0}mhz'.format(rfm69.frequency_mhz))
print('Bit rate: {0}kbit/s'.format(rfm69.bitrate/1000))
print('Frequency deviation: {0}hz'.format(rfm69.frequency_deviation))
# Send a packet. Note you can only send a packet up to 60 bytes in length.
# This is a limitation of the radio packet size, so if you need to send larger
# amounts of data you will need to break it into smaller send calls. Each send
# call will wait for the previous one to finish before continuing.
rfm69.send(bytes('Hello world!\r\n',"utf-8"))
print('Sent hello world message!')
# Wait to receive packets. Note that this library can't receive data at a fast
# rate, in fact it can only receive and process one 60 byte packet at a time.
# This means you should only use this for low bandwidth scenarios, like sending
# and receiving a single message at a time.
print('Waiting for packets...')
while True:
packet = rfm69.receive()
# Optionally change the receive timeout from its default of 0.5 seconds:
#packet = rfm69.receive(timeout=5.0)
# If no packet was received during the timeout then None is returned.
if packet is None:
# Packet has not been received
LED.value = False
print('Received nothing! Listening again...')
else:
# Received a packet!
LED.value = True
# Print out the raw bytes of the packet:
print('Received (raw bytes): {0}'.format(packet))
# And decode to ASCII text and print it too. Note that you always
# receive raw bytes and need to convert to a text format like ASCII
# if you intend to do string processing on your data. Make sure the
# sending side is sending ASCII data before you try to decode!
packet_text = str(packet, 'ascii')
print('Received (ASCII): {0}'.format(packet_text))
| 44.078947 | 97 | 0.709254 |
b10e7ac7baeb3ca610a27258dc048c131589e525 | 91 | py | Python | enthought/graphcanvas/graph_view.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/graphcanvas/graph_view.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/graphcanvas/graph_view.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from __future__ import absolute_import
from graphcanvas.graph_view import *
| 22.75 | 38 | 0.846154 |
c2eb890202de9ebb96af99e358bf2e900e1dcc2f | 21,549 | py | Python | PythonNetwork/venv/Lib/site-packages/scipy/sparse/linalg/_expm_multiply.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 6,989 | 2017-07-18T06:23:18.000Z | 2022-03-31T15:58:36.000Z | PythonNetwork/venv/Lib/site-packages/scipy/sparse/linalg/_expm_multiply.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 1,978 | 2017-07-18T09:17:58.000Z | 2022-03-31T14:28:43.000Z | PythonNetwork/venv/Lib/site-packages/scipy/sparse/linalg/_expm_multiply.py | Moldovandreii/RepetitionCount | b5715b0948b609fde0ce05d45023b7d4cfd635e7 | [
"FTL"
] | 1,228 | 2017-07-18T09:03:13.000Z | 2022-03-29T05:57:40.000Z | """Compute the action of the matrix exponential.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
import scipy.linalg
import scipy.sparse.linalg
from scipy.sparse.linalg import aslinearoperator
__all__ = ['expm_multiply']
def _exact_inf_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=1).flat)
else:
return np.linalg.norm(A, np.inf)
def _exact_1_norm(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return max(abs(A).sum(axis=0).flat)
else:
return np.linalg.norm(A, 1)
def _trace(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return A.diagonal().sum()
else:
return np.trace(A)
def _ident_like(A):
# A compatibility function which should eventually disappear.
if scipy.sparse.isspmatrix(A):
return scipy.sparse.construct.eye(A.shape[0], A.shape[1],
dtype=A.dtype, format=A.format)
else:
return np.eye(A.shape[0], A.shape[1], dtype=A.dtype)
def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None):
"""
Compute the action of the matrix exponential of A on B.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix or vector to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
Returns
-------
expm_A_B : ndarray
The result of the action :math:`e^{t_k A} B`.
Notes
-----
The optional arguments defining the sequence of evenly spaced time points
are compatible with the arguments of `numpy.linspace`.
The output ndarray shape is somewhat complicated so I explain it here.
The ndim of the output could be either 1, 2, or 3.
It would be 1 if you are computing the expm action on a single vector
at a single time point.
It would be 2 if you are computing the expm action on a vector
at multiple time points, or if you are computing the expm action
on a matrix at a single time point.
It would be 3 if you want the action on a matrix with multiple
columns at multiple time points.
If multiple time points are requested, expm_A_B[0] will always
be the action of the expm at the first time point,
regardless of whether the action is on a vector or a matrix.
References
----------
.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011)
"Computing the Action of the Matrix Exponential,
with an Application to Exponential Integrators."
SIAM Journal on Scientific Computing,
33 (2). pp. 488-511. ISSN 1064-8275
http://eprints.ma.man.ac.uk/1591/
.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010)
"Computing Matrix Functions."
Acta Numerica,
19. 159-208. ISSN 0962-4929
http://eprints.ma.man.ac.uk/1451/
Examples
--------
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import expm, expm_multiply
>>> A = csc_matrix([[1, 0], [0, 1]])
>>> A.todense()
matrix([[1, 0],
[0, 1]], dtype=int64)
>>> B = np.array([np.exp(-1.), np.exp(-2.)])
>>> B
array([ 0.36787944, 0.13533528])
>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True)
array([[ 1. , 0.36787944],
[ 1.64872127, 0.60653066],
[ 2.71828183, 1. ]])
>>> expm(A).dot(B) # Verify 1st timestep
array([ 1. , 0.36787944])
>>> expm(1.5*A).dot(B) # Verify 2nd timestep
array([ 1.64872127, 0.60653066])
>>> expm(2*A).dot(B) # Verify 3rd timestep
array([ 2.71828183, 1. ])
"""
if all(arg is None for arg in (start, stop, num, endpoint)):
X = _expm_multiply_simple(A, B)
else:
X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint)
return X
def _expm_multiply_simple(A, B, t=1.0, balance=False):
"""
Compute the action of the matrix exponential at a single time point.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
t : float
A time point.
balance : bool
Indicates whether or not to apply balancing.
Returns
-------
F : ndarray
:math:`e^{t A} B`
Notes
-----
This is algorithm (3.2) in Al-Mohy and Higham (2011).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance)
def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False):
"""
A helper function.
"""
if balance:
raise NotImplementedError
if tol is None:
u_d = 2 ** -53
tol = u_d
F = B
eta = np.exp(t*mu / float(s))
for i in range(s):
c1 = _exact_inf_norm(B)
for j in range(m_star):
coeff = t / float(s*(j+1))
B = coeff * A.dot(B)
c2 = _exact_inf_norm(B)
F = F + B
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
F = eta * F
B = F
return F
# This table helps to compute bounds.
# They seem to have been difficult to calculate, involving symbolic
# manipulation of equations, followed by numerical root finding.
_theta = {
# The first 30 values are from table A.3 of Computing Matrix Functions.
1: 2.29e-16,
2: 2.58e-8,
3: 1.39e-5,
4: 3.40e-4,
5: 2.40e-3,
6: 9.07e-3,
7: 2.38e-2,
8: 5.00e-2,
9: 8.96e-2,
10: 1.44e-1,
# 11
11: 2.14e-1,
12: 3.00e-1,
13: 4.00e-1,
14: 5.14e-1,
15: 6.41e-1,
16: 7.81e-1,
17: 9.31e-1,
18: 1.09,
19: 1.26,
20: 1.44,
# 21
21: 1.62,
22: 1.82,
23: 2.01,
24: 2.22,
25: 2.43,
26: 2.64,
27: 2.86,
28: 3.08,
29: 3.31,
30: 3.54,
# The rest are from table 3.1 of
# Computing the Action of the Matrix Exponential.
35: 4.7,
40: 6.0,
45: 7.2,
50: 8.5,
55: 9.9,
}
def _onenormest_matrix_power(A, p,
t=2, itmax=5, compute_v=False, compute_w=False):
"""
Efficiently estimate the 1-norm of A^p.
Parameters
----------
A : ndarray
Matrix whose 1-norm of a power is to be computed.
p : int
Non-negative integer power.
t : int, optional
A positive parameter controlling the tradeoff between
accuracy versus time and memory usage.
Larger values take longer and use more memory
but give more accurate output.
itmax : int, optional
Use at most this many iterations.
compute_v : bool, optional
Request a norm-maximizing linear operator input vector if True.
compute_w : bool, optional
Request a norm-maximizing linear operator output vector if True.
Returns
-------
est : float
An underestimate of the 1-norm of the sparse matrix.
v : ndarray, optional
The vector such that ||Av||_1 == est*||v||_1.
It can be thought of as an input to the linear operator
that gives an output with particularly large norm.
w : ndarray, optional
The vector Av which has relatively large 1-norm.
It can be thought of as an output of the linear operator
that is relatively large in norm compared to the input.
"""
#XXX Eventually turn this into an API function in the _onenormest module,
#XXX and remove its underscore,
#XXX but wait until expm_multiply goes into scipy.
return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p)
class LazyOperatorNormInfo:
"""
Information about an operator is lazily computed.
The information includes the exact 1-norm of the operator,
in addition to estimates of 1-norms of powers of the operator.
This uses the notation of Computing the Action (2011).
This class is specialized enough to probably not be of general interest
outside of this module.
"""
def __init__(self, A, A_1_norm=None, ell=2, scale=1):
"""
Provide the operator and some norm-related information.
Parameters
----------
A : linear operator
The operator of interest.
A_1_norm : float, optional
The exact 1-norm of A.
ell : int, optional
A technical parameter controlling norm estimation quality.
scale : int, optional
If specified, return the norms of scale*A instead of A.
"""
self._A = A
self._A_1_norm = A_1_norm
self._ell = ell
self._d = {}
self._scale = scale
def set_scale(self,scale):
"""
Set the scale parameter.
"""
self._scale = scale
def onenorm(self):
"""
Compute the exact 1-norm.
"""
if self._A_1_norm is None:
self._A_1_norm = _exact_1_norm(self._A)
return self._scale*self._A_1_norm
def d(self, p):
"""
Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm.
"""
if p not in self._d:
est = _onenormest_matrix_power(self._A, p, self._ell)
self._d[p] = est ** (1.0 / p)
return self._scale*self._d[p]
def alpha(self, p):
"""
Lazily compute max(d(p), d(p+1)).
"""
return max(self.d(p), self.d(p+1))
def _compute_cost_div_m(m, p, norm_info):
"""
A helper function for computing bounds.
This is equation (3.10).
It measures cost in terms of the number of required matrix products.
Parameters
----------
m : int
A valid key of _theta.
p : int
A matrix power.
norm_info : LazyOperatorNormInfo
Information about 1-norms of related operators.
Returns
-------
cost_div_m : int
Required number of matrix products divided by m.
"""
return int(np.ceil(norm_info.alpha(p) / _theta[m]))
def _compute_p_max(m_max):
"""
Compute the largest positive integer p such that p*(p-1) <= m_max + 1.
Do this in a slightly dumb way, but safe and not too slow.
Parameters
----------
m_max : int
A count related to bounds.
"""
sqrt_m_max = np.sqrt(m_max)
p_low = int(np.floor(sqrt_m_max))
p_high = int(np.ceil(sqrt_m_max + 1))
return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1)
def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
norm_info : LazyOperatorNormInfo
Information about norms of certain linear operators of interest.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
tol : float
Expected to be
:math:`2^{-24}` for single precision or
:math:`2^{-53}` for double precision.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
best_m : int
Related to bounds for error control.
best_s : int
Amount of scaling.
Notes
-----
This is code fragment (3.1) in Al-Mohy and Higham (2011).
The discussion of default values for m_max and ell
is given between the definitions of equation (3.11)
and the definition of equation (3.12).
"""
if ell < 1:
raise ValueError('expected ell to be a positive integer')
best_m = None
best_s = None
if _condition_3_13(norm_info.onenorm(), n0, m_max, ell):
for m, theta in _theta.items():
s = int(np.ceil(norm_info.onenorm() / theta))
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
else:
# Equation (3.11).
for p in range(2, _compute_p_max(m_max) + 1):
for m in range(p*(p-1)-1, m_max+1):
if m in _theta:
s = _compute_cost_div_m(m, p, norm_info)
if best_m is None or m * s < best_m * best_s:
best_m = m
best_s = s
best_s = max(best_s, 1)
return best_m, best_s
def _condition_3_13(A_1_norm, n0, m_max, ell):
"""
A helper function for the _expm_multiply_* functions.
Parameters
----------
A_1_norm : float
The precomputed 1-norm of A.
n0 : int
Number of columns in the _expm_multiply_* B matrix.
m_max : int
A value related to a bound.
ell : int
The number of columns used in the 1-norm approximation.
This is usually taken to be small, maybe between 1 and 5.
Returns
-------
value : bool
Indicates whether or not the condition has been met.
Notes
-----
This is condition (3.13) in Al-Mohy and Higham (2011).
"""
# This is the rhs of equation (3.12).
p_max = _compute_p_max(m_max)
a = 2 * ell * p_max * (p_max + 3)
# Evaluate the condition (3.13).
b = _theta[m_max] / float(n0 * m_max)
return A_1_norm <= a * b
def _expm_multiply_interval(A, B, start=None, stop=None,
num=None, endpoint=None, balance=False, status_only=False):
"""
Compute the action of the matrix exponential at multiple time points.
Parameters
----------
A : transposable linear operator
The operator whose exponential is of interest.
B : ndarray
The matrix to be multiplied by the matrix exponential of A.
start : scalar, optional
The starting time point of the sequence.
stop : scalar, optional
The end time point of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced time points, so that `stop` is excluded.
Note that the step size changes when `endpoint` is False.
num : int, optional
Number of time points to use.
endpoint : bool, optional
If True, `stop` is the last time point. Otherwise, it is not included.
balance : bool
Indicates whether or not to apply balancing.
status_only : bool
A flag that is set to True for some debugging and testing operations.
Returns
-------
F : ndarray
:math:`e^{t_k A} B`
status : int
An integer status for testing and debugging.
Notes
-----
This is algorithm (5.2) in Al-Mohy and Higham (2011).
There seems to be a typo, where line 15 of the algorithm should be
moved to line 6.5 (between lines 6 and 7).
"""
if balance:
raise NotImplementedError
if len(A.shape) != 2 or A.shape[0] != A.shape[1]:
raise ValueError('expected A to be like a square matrix')
if A.shape[1] != B.shape[0]:
raise ValueError('the matrices A and B have incompatible shapes')
ident = _ident_like(A)
n = A.shape[0]
if len(B.shape) == 1:
n0 = 1
elif len(B.shape) == 2:
n0 = B.shape[1]
else:
raise ValueError('expected B to be like a matrix or a vector')
u_d = 2**-53
tol = u_d
mu = _trace(A) / float(n)
# Get the linspace samples, attempting to preserve the linspace defaults.
linspace_kwargs = {'retstep': True}
if num is not None:
linspace_kwargs['num'] = num
if endpoint is not None:
linspace_kwargs['endpoint'] = endpoint
samples, step = np.linspace(start, stop, **linspace_kwargs)
# Convert the linspace output to the notation used by the publication.
nsamples = len(samples)
if nsamples < 2:
raise ValueError('at least two time points are required')
q = nsamples - 1
h = step
t_0 = samples[0]
t_q = samples[q]
# Define the output ndarray.
# Use an ndim=3 shape, such that the last two indices
# are the ones that may be involved in level 3 BLAS operations.
X_shape = (nsamples,) + B.shape
X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float))
t = t_q - t_0
A = A - mu * ident
A_1_norm = _exact_1_norm(A)
ell = 2
norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell)
if t*A_1_norm == 0:
m_star, s = 0, 1
else:
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
# Compute the expm action up to the initial time point.
X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s)
# Compute the expm action at the rest of the time points.
if q <= s:
if status_only:
return 0
else:
return _expm_multiply_interval_core_0(A, X,
h, mu, q, norm_info, tol, ell,n0)
elif not (q % s):
if status_only:
return 1
else:
return _expm_multiply_interval_core_1(A, X,
h, mu, m_star, s, q, tol)
elif (q % s):
if status_only:
return 2
else:
return _expm_multiply_interval_core_2(A, X,
h, mu, m_star, s, q, tol)
else:
raise Exception('internal error')
def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0):
"""
A helper function, for the case q <= s.
"""
# Compute the new values of m_star and s which should be applied
# over intervals of size t/q
if norm_info.onenorm() == 0:
m_star, s = 0, 1
else:
norm_info.set_scale(1./q)
m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell)
norm_info.set_scale(1)
for k in range(q):
X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s)
return X, 0
def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s == 0.
"""
d = q // s
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(s):
Z = X[i*d]
K[0] = Z
high_p = 0
for k in range(1, d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p > high_p:
K[p] = h * A.dot(K[p-1]) / float(p)
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 1
def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol):
"""
A helper function, for the case q > s and q % s > 0.
"""
d = q // s
j = q // d
r = q - d * j
input_shape = X.shape[1:]
K_shape = (m_star + 1, ) + input_shape
K = np.empty(K_shape, dtype=X.dtype)
for i in range(j + 1):
Z = X[i*d]
K[0] = Z
high_p = 0
if i < j:
effective_d = d
else:
effective_d = r
for k in range(1, effective_d+1):
F = K[0]
c1 = _exact_inf_norm(F)
for p in range(1, m_star+1):
if p == high_p + 1:
K[p] = h * A.dot(K[p-1]) / float(p)
high_p = p
coeff = float(pow(k, p))
F = F + coeff * K[p]
inf_norm_K_p_1 = _exact_inf_norm(K[p])
c2 = coeff * inf_norm_K_p_1
if c1 + c2 <= tol * _exact_inf_norm(F):
break
c1 = c2
X[k + i*d] = np.exp(k*h*mu) * F
return X, 2
| 30.609375 | 80 | 0.575943 |
befde76b358b87eaef2855cf6086659fa3e7cfa0 | 15,591 | py | Python | rlcard/agents/human_agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_post_doing_action.py | randombenj/rlcard | 0948035d26e1b619c068360326f12451f5d28f8b | [
"MIT"
] | 1,735 | 2019-09-05T12:49:43.000Z | 2022-03-30T12:02:07.000Z | rlcard/agents/human_agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_post_doing_action.py | randombenj/rlcard | 0948035d26e1b619c068360326f12451f5d28f8b | [
"MIT"
] | 197 | 2019-09-14T05:59:02.000Z | 2022-03-03T19:21:19.000Z | rlcard/agents/human_agents/gin_rummy_human_agent/gui_gin_rummy/game_canvas_post_doing_action.py | randombenj/rlcard | 0948035d26e1b619c068360326f12451f5d28f8b | [
"MIT"
] | 476 | 2019-09-13T15:25:32.000Z | 2022-03-29T01:41:29.000Z | '''
Project: Gui Gin Rummy
File name: game_canvas_post_doing_action.py
Author: William Hale
Date created: 3/14/2020
'''
# from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .game_canvas import GameCanvas
from typing import List, Tuple
from rlcard.games.base import Card
import rlcard.games.gin_rummy.judge as judge
import rlcard.games.gin_rummy.utils.utils as gin_rummy_utils
from rlcard.games.gin_rummy.utils.melding import get_best_meld_clusters
from . import configurations
from . import status_messaging
from . import utils
from .configurations import DRAW_CARD_ACTION_ID, PICK_UP_DISCARD_ACTION_ID
from .configurations import DISCARD_ACTION_ID, KNOCK_ACTION_ID, GIN_ACTION_ID
class GameCanvasPostDoingAction(object):
def __init__(self, game_canvas: 'GameCanvas'):
self.game_canvas = game_canvas
def post_do_get_card_action(self,
player_id: int,
drawn_card_item_id: int,
hit_item_id: int,
drawn_card_item_tag: int):
game_canvas = self.game_canvas
drawn_card_id = game_canvas.getter.get_card_id(card_item_id=drawn_card_item_id)
drawn_card = game_canvas.card_items[drawn_card_id]
if not drawn_card.is_face_up() and game_canvas.is_treating_as_human(player_id=player_id):
drawn_card.set_card_id_face_up(face_up=True)
selected_held_pile_item_ids = game_canvas.getter.get_selected_held_pile_item_ids(player_id=player_id)
if selected_held_pile_item_ids:
item_ids = selected_held_pile_item_ids + [drawn_card_item_id]
utils.drop_item_ids(item_ids=item_ids, on_item_id=hit_item_id,
player_id=player_id, game_canvas=game_canvas)
status_messaging.show_put_card_message(player_id=player_id, game_canvas=game_canvas)
if drawn_card_item_tag == configurations.DISCARD_PILE_TAG:
action = PICK_UP_DISCARD_ACTION_ID
else:
action = DRAW_CARD_ACTION_ID
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, [action])
else:
to_location = game_canvas.bbox(hit_item_id)[:2]
dx = game_canvas.held_pile_tab
to_location = utils.translated_by(dx=dx, dy=0, location=to_location)
def loop_completion():
game_canvas.dtag(drawn_card_item_id, configurations.DRAWN_TAG)
game_canvas.dtag(drawn_card_item_id, drawn_card_item_tag)
game_canvas.addtag_withtag(game_canvas.held_pile_tags[player_id], drawn_card_item_id)
if not game_canvas.is_treating_as_human(player_id=player_id):
utils.set_card_item_id_face_up(card_item_id=drawn_card_item_id,
face_up=False,
game_canvas=game_canvas)
utils.held_pile_insert(card_item_id=drawn_card_item_id, above_hit_item_id=hit_item_id,
player_id=player_id, game_canvas=game_canvas)
if drawn_card_item_tag == configurations.DISCARD_PILE_TAG:
action = PICK_UP_DISCARD_ACTION_ID
else:
action = DRAW_CARD_ACTION_ID
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, [action])
self._move_loop(drawn_card_item_id, to_location, completion=loop_completion)
def post_do_discard_action(self, player_id: int, selected_held_pile_item_id: int):
# The selected held_pile_item is discarded.
# The remaining held_pile_items are fanned.
game_canvas = self.game_canvas
top_discard_pile_item_id = game_canvas.getter.get_top_discard_pile_item_id()
if top_discard_pile_item_id is None:
to_location = game_canvas.discard_pile_anchor
else:
dx = game_canvas.discard_pile_tab
to_location = game_canvas.coords(top_discard_pile_item_id)
to_location = utils.translated_by(dx=dx, dy=0, location=to_location)
utils.set_card_item_id_face_up(card_item_id=selected_held_pile_item_id, face_up=True, game_canvas=game_canvas)
if not game_canvas.query.is_human(player_id=player_id): # turn card over immediately
selected_card_id = game_canvas.getter.get_card_id(selected_held_pile_item_id)
utils.set_card_id_face_up(selected_card_id, face_up=True, game_canvas=game_canvas)
def loop_completion():
game_canvas.dtag(selected_held_pile_item_id, configurations.SELECTED_TAG)
game_canvas.dtag(selected_held_pile_item_id, configurations.JOGGED_TAG)
game_canvas.dtag(selected_held_pile_item_id, game_canvas.held_pile_tags[player_id])
game_canvas.addtag_withtag(configurations.DISCARD_PILE_TAG, selected_held_pile_item_id)
utils.fan_held_pile(player_id=player_id, game_canvas=game_canvas)
card_id = game_canvas.card_item_ids.index(selected_held_pile_item_id)
action = DISCARD_ACTION_ID + card_id
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, [action])
self._move_loop(selected_held_pile_item_id, to_location, completion=loop_completion)
def post_do_discard_card_drawn_from_stock_pile_action(self, top_stock_pile_item_id: int):
game_canvas = self.game_canvas
top_discard_pile_item_id = game_canvas.getter.get_top_discard_pile_item_id()
dx = game_canvas.discard_pile_tab
to_location = game_canvas.coords(top_discard_pile_item_id)
to_location = utils.translated_by(dx=dx, dy=0, location=to_location)
def loop_completion():
game_canvas.dtag(top_stock_pile_item_id, configurations.DRAWN_TAG)
game_canvas.dtag(top_stock_pile_item_id, configurations.STOCK_PILE_TAG)
game_canvas.addtag_withtag(configurations.DISCARD_PILE_TAG, top_stock_pile_item_id)
card_id = game_canvas.card_item_ids.index(top_stock_pile_item_id)
actions = [DRAW_CARD_ACTION_ID, DISCARD_ACTION_ID + card_id]
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, actions)
self._move_loop(top_stock_pile_item_id, to_location, completion=loop_completion)
def post_do_knock_action(self, selected_held_pile_item_id: int):
# The selected held_pile_item is discarded.
# The remaining held_pile_items are fanned.
game_canvas = self.game_canvas
game_canvas.going_out_button.place_forget()
current_player_id = game_canvas.current_player_id
top_discard_pile_item_id = game_canvas.getter.get_top_discard_pile_item_id()
if top_discard_pile_item_id is None:
to_location = game_canvas.discard_pile_anchor
else:
dx = game_canvas.discard_pile_tab
to_location = game_canvas.coords(top_discard_pile_item_id)
to_location = utils.translated_by(dx=dx, dy=0, location=to_location)
utils.set_card_item_id_face_up(card_item_id=selected_held_pile_item_id, face_up=True, game_canvas=game_canvas)
def loop_completion():
game_canvas.dtag(selected_held_pile_item_id, configurations.SELECTED_TAG)
game_canvas.dtag(selected_held_pile_item_id, configurations.JOGGED_TAG)
game_canvas.dtag(selected_held_pile_item_id, game_canvas.held_pile_tags[current_player_id])
game_canvas.addtag_withtag(configurations.DISCARD_PILE_TAG, selected_held_pile_item_id)
utils.fan_held_pile(player_id=current_player_id, game_canvas=game_canvas)
# show meld piles for both players
self._show_meld_piles()
# submit action to game_canvas_updater
card_id = game_canvas.card_item_ids.index(selected_held_pile_item_id)
action = KNOCK_ACTION_ID + card_id
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, [action])
self._move_loop(selected_held_pile_item_id, to_location, completion=loop_completion)
def post_do_gin_action(self):
game_canvas = self.game_canvas
game_canvas.going_out_button.place_forget()
current_player_id = game_canvas.current_player_id
current_hand = game_canvas.getter.get_held_pile_cards(player_id=current_player_id)
going_out_deadwood_count = self.game_canvas.game_canvas_updater.env.game.settings.going_out_deadwood_count
_, gin_cards = judge.get_going_out_cards(hand=current_hand, going_out_deadwood_count=going_out_deadwood_count)
card = gin_cards[0]
card_id = gin_rummy_utils.get_card_id(card=card)
card_item = game_canvas.card_items[card_id]
selected_held_pile_item_id = card_item.item_id
top_discard_pile_item_id = game_canvas.getter.get_top_discard_pile_item_id()
if top_discard_pile_item_id is None:
to_location = game_canvas.discard_pile_anchor
else:
dx = game_canvas.discard_pile_tab
to_location = game_canvas.coords(top_discard_pile_item_id)
to_location = utils.translated_by(dx=dx, dy=0, location=to_location)
utils.set_card_item_id_face_up(card_item_id=selected_held_pile_item_id, face_up=True, game_canvas=game_canvas)
def loop_completion():
game_canvas.dtag(selected_held_pile_item_id, configurations.SELECTED_TAG)
game_canvas.dtag(selected_held_pile_item_id, configurations.JOGGED_TAG)
game_canvas.dtag(selected_held_pile_item_id, game_canvas.held_pile_tags[current_player_id])
game_canvas.addtag_withtag(configurations.DISCARD_PILE_TAG, selected_held_pile_item_id)
utils.fan_held_pile(player_id=current_player_id, game_canvas=game_canvas)
# show meld piles for both players
self._show_meld_piles()
# submit action to game_canvas_updater
action = GIN_ACTION_ID
game_canvas.after_idle(game_canvas.game_canvas_updater.did_perform_actions, [action])
self._move_loop(selected_held_pile_item_id, to_location, completion=loop_completion)
def post_do_declare_dead_hand_action(self, player_id: int):
game_canvas = self.game_canvas
status_messaging.show_epilog_message_on_declare_dead_hand(game_canvas=game_canvas)
game_canvas.going_out_button.place_forget()
game_canvas.dead_hand_button.place_forget()
# show meld piles for both players
self._show_meld_piles()
# =========================================
# Private methods
# =========================================
def _show_meld_piles(self):
game_canvas = self.game_canvas
current_player_id = game_canvas.current_player_id
opponent_player_id = (current_player_id + 1) % 2
utils.fan_held_pile(player_id=current_player_id, game_canvas=game_canvas)
# do current_player_id melding
best_meld_cluster = self._get_best_meld_cluster(player_id=current_player_id)
self.put_down_meld_cluster(best_meld_cluster, player_id=current_player_id)
# do opponent_player_id melding
opponent_best_meld_cluster = self._get_best_meld_cluster(player_id=opponent_player_id)
self.put_down_meld_cluster(opponent_best_meld_cluster, player_id=opponent_player_id)
def _get_best_meld_cluster(self, player_id: int) -> List[List[Card]]:
game_canvas = self.game_canvas
hand = game_canvas.getter.get_held_pile_cards(player_id=player_id)
best_meld_clusters = get_best_meld_clusters(hand=hand)
best_meld_cluster = [] if not best_meld_clusters else best_meld_clusters[0]
return best_meld_cluster
def put_down_meld_cluster(self, meld_cluster, player_id: int):
game_canvas = self.game_canvas
card_width = game_canvas.card_width
card_height = game_canvas.card_height
player_pane = game_canvas.player_panes[player_id]
y_tab = int(card_height * 0.15)
anchor_x = int(card_width * 0.5) # type: int
anchor_y = int(game_canvas.coords(player_pane.item_id)[1]) + y_tab # type: int
for meld_pile in meld_cluster:
self.put_down_meld_pile(meld_pile, anchor=(anchor_x, anchor_y), player_id=player_id)
utils.fan_held_pile(player_id=player_id, game_canvas=game_canvas)
anchor_x += len(meld_pile) * game_canvas.held_pile_tab
anchor_y += y_tab
held_pile_item_ids = game_canvas.getter.get_held_pile_item_ids(player_id=player_id)
if not game_canvas.query.is_human(player_id):
# sort deadwood cards of computer player
held_pile_cards = game_canvas.getter.get_held_pile_cards(player_id=player_id)
card_ids = [gin_rummy_utils.get_card_id(card) for card in held_pile_cards]
sorted_card_ids = sorted(card_ids, reverse=True, key=utils.gin_rummy_sort_order_id)
for sorted_card_id in sorted_card_ids:
card_item_id = game_canvas.card_item_ids[sorted_card_id]
game_canvas.tag_raise(card_item_id)
utils.fan_held_pile(player_id, game_canvas=game_canvas)
# face up deadwood cards of computer player
for held_pile_item_id in held_pile_item_ids:
utils.set_card_item_id_face_up(card_item_id=held_pile_item_id, face_up=True, game_canvas=game_canvas)
def put_down_meld_pile(self, meld_pile: List[Card], anchor: Tuple[int, int], player_id: int):
game_canvas = self.game_canvas
held_pile_tag = game_canvas.held_pile_tags[player_id]
x, y = anchor
card_ids = [gin_rummy_utils.get_card_id(card=card) for card in meld_pile]
sorted_card_ids = sorted(card_ids, reverse=True, key=utils.gin_rummy_sort_order_id)
for sorted_card_id in sorted_card_ids:
card_item_id = game_canvas.card_item_ids[sorted_card_id]
game_canvas.tag_raise(card_item_id)
game_canvas.dtag(card_item_id, held_pile_tag)
utils.move_to(card_item_id, x, y, game_canvas)
utils.set_card_item_id_face_up(card_item_id, face_up=True, game_canvas=game_canvas)
x += game_canvas.held_pile_tab
# =========================================
# Utility Methods
# =========================================
def _move_loop(self, item_id, to_location, index=0, dx=0, dy=0, completion=None):
game_canvas = self.game_canvas
step_size = 10
time_in_milli_seconds = 10 # need to figure out the relationship between step_size and time_in_milli_seconds
if index == 0:
item_location = game_canvas.coords(item_id)
dx = (to_location[0] - item_location[0]) / step_size
dy = (to_location[1] - item_location[1]) / step_size
if index == 1: # Note: need to put 1 rather than 0. Should I just do tag_raise every iteration ???
game_canvas.tag_raise(item_id)
game_canvas.update_idletasks() # Note: is this necessary ??? Probably not.
game_canvas.move(item_id, dx, dy)
index += 1
if index < step_size:
game_canvas.after(time_in_milli_seconds, self._move_loop, item_id, to_location, index, dx, dy, completion)
else:
game_canvas.coords(item_id, to_location)
if completion:
completion()
| 54.324042 | 118 | 0.70701 |
27d8b1d588dccab4aad8ef551ae24c87eb66e95f | 2,975 | py | Python | dhtxmpp_componentd/dhtxmpp_componentd.py | pendleto/dhtxmpp_component | f7b5f018b74d5d1bf34d175b6766677de9eaa987 | [
"MIT"
] | 3 | 2018-10-24T07:07:44.000Z | 2021-12-24T20:25:24.000Z | dhtxmpp_componentd/dhtxmpp_componentd.py | pendleto/dhtxmpp_component | f7b5f018b74d5d1bf34d175b6766677de9eaa987 | [
"MIT"
] | null | null | null | dhtxmpp_componentd/dhtxmpp_componentd.py | pendleto/dhtxmpp_component | f7b5f018b74d5d1bf34d175b6766677de9eaa987 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import logging
from optparse import OptionParser
from dhtxmpp_component.component import dhtxmpp_component
class dhtxmpp_componentd:
def run(self, opts):
# Setup the dhtxmpp_component and register plugins. Note that while plugins
# may have interdependencies, the order in which you register them does
# not matter.
xmpp = dhtxmpp_component(opts.jid, opts.password, opts.server, opts.port, opts.bootstrapip)
xmpp.registerPlugin('xep_0030') # Service Discovery
xmpp.registerPlugin('xep_0004') # Data Forms
xmpp.registerPlugin('xep_0060') # PubSub
xmpp.registerPlugin('xep_0199') # XMPP Ping
xmpp.registerPlugin('xep_0184') # receipts
xmpp.auto_authorize = True
xmpp.auto_subscribe = True
xmpp.register_event_handlers()
# Connect to the XMPP server and start processing XMPP stanzas.
logging.debug("Connecting to XMPP server...")
if xmpp.connect():
logging.debug("Connected")
xmpp.use_message_ids = True
xmpp.use_presence_ids = True
xmpp.process(block=False)
xmpp.run()
xmpp.disconnect()
logging.debug("Done")
else:
logging.debug("Unable to connect.")
def main():
# Setup the command line arguments.
optp = OptionParser()
# Output verbosity options.
optp.add_option('-q', '--quiet', help='set logging to ERROR',
action='store_const', dest='loglevel',
const=logging.ERROR, default=logging.INFO)
optp.add_option('-d', '--debug', help='set logging to DEBUG',
action='store_const', dest='loglevel',
const=logging.DEBUG, default=logging.INFO)
optp.add_option('-v', '--verbose', help='set logging to COMM',
action='store_const', dest='loglevel',
const=5, default=logging.INFO)
# JID and password options.
optp.add_option("-j", "--jid", dest="jid",
help="JID to use")
optp.add_option("-p", "--password", dest="password",
help="password to use")
optp.add_option("-s", "--server", dest="server",
help="server to connect to")
optp.add_option("-b", "--bootstrapip", dest="bootstrapip",
help="bootstrap ip to connect to")
optp.add_option("-P", "--port", dest="port",
help="port to connect to")
opts, args = optp.parse_args()
# Setup logging.
logging.basicConfig(level=opts.loglevel,
format='%(pathname)s %(asctime)s %(levelname)s %(message)s',
filename='/var/log/dhtxmpp_componentd/dhtxmpp_componentd.log',
filemode='w',
)
daemon = dhtxmpp_componentd()
daemon.run(opts)
sys.exit(0)
if __name__ == "__main__":
main()
| 36.280488 | 99 | 0.590252 |
d5f11f0054abb2926a4c0c789681ad79a83a425b | 3,300 | py | Python | alexber/utils/importer.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | null | null | null | alexber/utils/importer.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 8 | 2019-12-15T05:13:27.000Z | 2021-02-16T20:03:40.000Z | alexber/utils/importer.py | AndreyRub/AlexBerUtils | b2d79c98c083533534470b62632a36dfd730be48 | [
"BSD-2-Clause"
] | 2 | 2019-12-12T03:52:37.000Z | 2021-05-21T21:14:34.000Z | """
See here https://medium.com/analytics-vidhya/how-to-write-easily-customizable-code-8b00b43406b2 for documentation.
"""
import importlib
import logging
import inspect
import pkgutil
logger = logging.getLogger(__name__)
#adopted from scrapy
def _walk_modules(path):
"""
Loads a module and all its submodules from the given module path and
returns them. If *any* module throws an exception while importing, that
exception is thrown back.
"""
# Support for namespace packages is added. See PEP 420.
# Namespace packages are a mechanism for splitting a single Python package across multiple directories on disk.
# When interpreted encounter with non-empty __path__ attribute it adds modules found in those locations
# to the current package.
mods = []
mod = importlib.import_module(path)
mods.append(mod)
if hasattr(mod, '__path__'):
for _, subpath, ispkg in pkgutil.iter_modules(mod.__path__):
fullpath = path + '.' + subpath
if ispkg:
mods += _walk_modules(fullpath)
else:
submod = importlib.import_module(fullpath)
mods.append(submod)
return mods
#adopted from mock.mock._dot_lookup
def _dot_lookup(thing, comp, import_path):
'''
Recursively import packages (if needed) by dotes.
'''
try:
return getattr(thing, comp)
except AttributeError:
importlib.import_module(import_path)
_walk_modules(import_path)
return getattr(thing, comp)
#adopted from mock.mock._importer
def importer(target):
'''
Convert str to Python construct that target is represented.
This method will recursively import packages (if needed)
Following dot notation from left to right. If the component
exists in packagage (is defined and imported) it will be used,
otherwrise, it will be imported.
This method supports PEP 420 (implicit Namespace Packages).
Note: only compile-time construct is supported.
Note: no instances will be returned from here, only classes.
:param target: str to lookup
:return: function/module/class, etc
'''
components = target.split('.')
import_path = components.pop(0)
thing = importlib.import_module(import_path)
_walk_modules(import_path)
for comp in components:
import_path += f".{comp}"
thing = _dot_lookup(thing, comp, import_path)
return thing
def new_instance(target, *args, **kwargs):
'''
Convert str to Python construct that target is represented.
This method will recursively import packages (if needed)
Following dot notation from left to right. If the component
exists in package (is defined and imported) it will be used,
otherwrise, it will be imported.
If target doesn't represent the class, it will be returned as is.
If target is the class, instance of it will be created,
args and kwargs will be passed in to appropriate
__new__() / __init__() / __init_subclass__() methods.
:param target:
:param args: - position args for c-tor
:param kwargs: - key-value args for c-tor
:return:
'''
thing = importer(target)
ret = thing
if inspect.isclass(thing):
ret = thing(*args, **kwargs)
return ret | 31.132075 | 115 | 0.68697 |
db597cf6e57ff546b3806df5340ecd3c2ddcbe9d | 6,813 | py | Python | tensorboard/plugins/npmi/npmi_plugin_test.py | gbaned/tensorboard | 63bc56bbe36d22fc8f2993e38f53efb6722e5edf | [
"Apache-2.0"
] | 1 | 2021-07-21T15:54:17.000Z | 2021-07-21T15:54:17.000Z | tensorboard/plugins/npmi/npmi_plugin_test.py | rmothukuru/tensorboard | 63bc56bbe36d22fc8f2993e38f53efb6722e5edf | [
"Apache-2.0"
] | null | null | null | tensorboard/plugins/npmi/npmi_plugin_test.py | rmothukuru/tensorboard | 63bc56bbe36d22fc8f2993e38f53efb6722e5edf | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for the nPMI plugin."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections.abc
import os
import numpy as np
import tensorflow as tf
from tensorboard import context
from tensorboard.backend.event_processing import data_provider
from tensorboard.backend.event_processing import (
plugin_event_multiplexer as event_multiplexer,
)
from tensorboard.plugins import base_plugin
from tensorboard.plugins.npmi import npmi_plugin
from tensorboard.plugins.npmi import summary
tf.compat.v1.enable_v2_behavior()
class NpmiPluginTest(tf.test.TestCase):
def setUp(self):
self.logdir = self.get_temp_dir()
def create_plugin(self, generate_testdata=True):
if generate_testdata:
self.generate_testdata()
multiplexer = event_multiplexer.EventMultiplexer()
multiplexer.AddRunsFromDirectory(self.logdir)
multiplexer.Reload()
provider = data_provider.MultiplexerDataProvider(
multiplexer, self.logdir
)
ctx = base_plugin.TBContext(
logdir=self.logdir, multiplexer=multiplexer, data_provider=provider
)
return npmi_plugin.NpmiPlugin(ctx)
def generate_testdata(self):
run_names = ["run_1", "run_2"]
ground_truth = {
"run_1": [
["Description", "A", "B"],
["name_1", 1.0, -1.0],
["name_2", -0.5, 0.5],
],
"run_2": [
["Description", "A", "B"],
["name_1", 1.0, -1.0],
["name_2", -0.5, np.nan],
],
}
embedding_ground_truth = {
"run_1": [[1.0, 0.5], [-0.5, 0.5]],
"run_2": [[1.0, 0.5], [-0.5, 0.5]],
}
for run_name in run_names:
subdir = os.path.join(self.logdir, run_name)
writer = tf.compat.v2.summary.create_file_writer(subdir)
data = ground_truth[run_name]
python_result = []
python_annotations = []
python_classes = []
for row_index, row in enumerate(data):
if row_index > 0:
python_result.append([])
for col_index, column in enumerate(row):
if row_index == 0:
if col_index > 0:
python_classes.append(column)
else:
if col_index == 0:
python_annotations.append(column)
else:
python_result[len(python_result) - 1].append(column)
with writer.as_default():
tensor_result = tf.convert_to_tensor(python_result)
tensor_annotations = tf.convert_to_tensor(python_annotations)
tensor_classes = tf.convert_to_tensor(python_classes)
tensor_embeddings = tf.convert_to_tensor(
embedding_ground_truth[run_name]
)
summary.npmi_values(tensor_result, 1)
summary.npmi_annotations(tensor_annotations, 1)
summary.npmi_metrics(tensor_classes, 1)
summary.npmi_embeddings(tensor_embeddings, 1)
writer.close()
def testRoutesProvided(self):
plugin = self.create_plugin()
routes = plugin.get_plugin_apps()
self.assertIsInstance(routes["/tags"], collections.abc.Callable)
self.assertIsInstance(routes["/annotations"], collections.abc.Callable)
self.assertIsInstance(routes["/metrics"], collections.abc.Callable)
self.assertIsInstance(routes["/values"], collections.abc.Callable)
self.assertIsInstance(routes["/embeddings"], collections.abc.Callable)
def testTags(self):
plugin = self.create_plugin()
tags = plugin.tags_impl(context.RequestContext(), experiment="exp")
gt_runs = ["run_1", "run_2"]
gt_tags = [
"_npmi_/annotations",
"_npmi_/metrics",
"_npmi_/values",
"_npmi_/embeddings",
]
self.assertItemsEqual(gt_runs, tags.keys())
self.assertItemsEqual(gt_tags, tags["run_1"])
self.assertItemsEqual(gt_tags, tags["run_2"])
def testAnnotations(self):
plugin = self.create_plugin()
annotations = plugin.annotations_impl(
context.RequestContext(), experiment="exp",
)
self.assertItemsEqual(["name_1", "name_2"], annotations["run_1"])
self.assertItemsEqual(["name_1", "name_2"], annotations["run_2"])
def testMetrics(self):
plugin = self.create_plugin()
metrics = plugin.metrics_impl(
context.RequestContext(), experiment="exp",
)
self.assertItemsEqual(["A", "B"], metrics["run_1"])
self.assertItemsEqual(["A", "B"], metrics["run_2"])
def testValues(self):
plugin = self.create_plugin()
values = plugin.values_impl(context.RequestContext(), experiment="exp")
self.assertItemsEqual([1.0, -1.0], values["run_1"][0])
self.assertItemsEqual([0.5, -0.5], values["run_1"][1])
self.assertItemsEqual([1.0, -1.0], values["run_2"][0])
self.assertItemsEqual([-0.5, None], values["run_2"][1])
def testEmbeddings(self):
plugin = self.create_plugin()
embeddings = plugin.embeddings_impl(
context.RequestContext(), experiment="exp"
)
self.assertItemsEqual([1.0, 0.5], embeddings["run_1"][0])
self.assertItemsEqual([-0.5, 0.5], embeddings["run_1"][1])
self.assertItemsEqual([1.0, 0.5], embeddings["run_2"][0])
self.assertItemsEqual([-0.5, 0.5], embeddings["run_2"][1])
def testIsActiveReturnsFalse(self):
"""The plugin should always return false because this is now handled
by TensorBoard core."""
plugin = self.create_plugin(generate_testdata=False)
self.assertFalse(plugin.is_active())
if __name__ == "__main__":
tf.test.main()
| 37.85 | 80 | 0.6059 |
176b419243ff0082a1af59b30e4586b41f1e0543 | 3,186 | py | Python | tests/query/stateless/test_if_exists.py | panda-sheep/nebula-graph | 27f832a16413d6dab06ca2bf9e931547306c3819 | [
"Apache-2.0"
] | 1 | 2021-08-23T05:55:55.000Z | 2021-08-23T05:55:55.000Z | tests/query/stateless/test_if_exists.py | panda-sheep/nebula-graph | 27f832a16413d6dab06ca2bf9e931547306c3819 | [
"Apache-2.0"
] | null | null | null | tests/query/stateless/test_if_exists.py | panda-sheep/nebula-graph | 27f832a16413d6dab06ca2bf9e931547306c3819 | [
"Apache-2.0"
] | null | null | null | # --coding:utf-8--
#
# Copyright (c) 2020 vesoft inc. All rights reserved.
#
# This source code is licensed under Apache 2.0 License,
# attached with Common Clause Condition 1.0, found in the LICENSES directory.
import re
import sys
import pytest
import time
from nebula2.graph import ttypes
import nebula2.Client
from tests.common.nebula_test_suite import NebulaTestSuite
class TestDropSpaceIfExists(NebulaTestSuite):
@classmethod
def prepare(self):
print("Nothing to Prepare")
# issue 1461
def test_drop_space(self):
cmd = 'drop space IF EXISTS shakespaces'
resp = self.execute_query(cmd)
self.check_resp_succeeded(resp)
resp = self.execute('CREATE SPACE shakespaces(partition_num=1024)')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE SPACE IF NOT EXISTS shakespaces(partition_num=1024)')
self.check_resp_succeeded(resp)
cmd = 'drop space shakespaces'
resp = self.execute_query(cmd)
self.check_resp_succeeded(resp)
cmd = 'drop space IF EXISTS shakespaces'
resp = self.execute_query(cmd)
self.check_resp_succeeded(resp)
resp = self.execute('CREATE SPACE IF NOT EXISTS shakespaces(partition_num=1024)')
self.check_resp_succeeded(resp)
time.sleep(self.delay)
resp = self.execute('use shakespaces')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE TAG person(name string, age int, gender string);')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE TAG IF NOT EXISTS person(name string, age int, gender string);')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE EDGE IF NOT EXISTS is_schoolmate(start_year int, end_year int);')
self.check_resp_succeeded(resp)
resp = self.execute('DROP EDGE is_schoolmate')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE EDGE is_schoolmate(start_year int, end_year int);')
self.check_resp_succeeded(resp)
resp = self.execute('DROP TAG person')
self.check_resp_succeeded(resp)
resp = self.execute('DROP TAG IF EXISTS person')
self.check_resp_succeeded(resp)
resp = self.execute('DROP EDGE is_schoolmate')
self.check_resp_succeeded(resp)
resp = self.execute('DROP EDGE IF EXISTS is_schoolmate')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE TAG person(name string, age int, gender string);')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE TAG if not exists person(name string, age int, gender string);')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE EDGE is_schoolmate(start_year int, end_year int);')
self.check_resp_succeeded(resp)
resp = self.execute('CREATE EDGE if not exists is_schoolmate(start_year int, end_year int);')
self.check_resp_succeeded(resp)
cmd = 'drop space shakespaces'
resp = self.execute_query(cmd)
self.check_resp_succeeded(resp)
@classmethod
def cleanup(self):
print("Nothing to cleanup")
| 33.1875 | 101 | 0.686127 |
6e86e01add1b30638f66d7ec85567849a1ca4bc3 | 293 | py | Python | Python3/0343-Integer-Break/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0343-Integer-Break/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0343-Integer-Break/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def integerBreak(self, n):
"""
:type n: int
:rtype: int
"""
# get maximum
if n == 2: return 1
if n == 3: return 2
prod = 1
while n > 4:
prod *= 3
n -= 3
return prod * n | 20.928571 | 30 | 0.37884 |
52056b261a95d0dbf9ca620387ed105ffb0e05a0 | 12,471 | py | Python | examples/albert/run_trainer.py | deniskamazur/hm-debug | cf31951504c38a1ea5e868e607ea74691092561a | [
"MIT"
] | 1 | 2021-08-23T01:16:12.000Z | 2021-08-23T01:16:12.000Z | examples/albert/run_trainer.py | deniskamazur/hm-debug | cf31951504c38a1ea5e868e607ea74691092561a | [
"MIT"
] | 1 | 2021-08-23T01:16:21.000Z | 2021-08-23T01:16:21.000Z | examples/albert/run_trainer.py | deniskamazur/hm-debug | cf31951504c38a1ea5e868e607ea74691092561a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import os
import pickle
from dataclasses import asdict
from pathlib import Path
import torch
import transformers
from datasets import load_from_disk
from torch.utils.data import DataLoader
from torch_optimizer import Lamb
from transformers import DataCollatorForLanguageModeling, HfArgumentParser, TrainingArguments, set_seed
from transformers.models.albert import AlbertConfig, AlbertForPreTraining, AlbertTokenizerFast
from transformers.optimization import get_linear_schedule_with_warmup
from transformers.trainer import Trainer
from transformers.trainer_utils import is_main_process
import hivemind
from hivemind.utils.compression import CompressionType
import utils
from arguments import AlbertTrainingArguments, AveragerArguments, CollaborationArguments, DatasetArguments
logger = logging.getLogger(__name__)
LRSchedulerBase = getattr(torch.optim.lr_scheduler, "_LRScheduler", None)
def setup_logging(training_args):
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if is_main_process(training_args.local_rank) else logging.WARN,
)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s", training_args)
def get_model(training_args, config, tokenizer):
# Find latest checkpoint in output_dir
output_dir = Path(training_args.output_dir)
logger.info(f'Checkpoint dir {output_dir}, contents {list(output_dir.glob("checkpoint*"))}')
latest_checkpoint_dir = max(output_dir.glob("checkpoint*"), default=None, key=os.path.getctime)
if latest_checkpoint_dir is not None:
logger.info(f"Loading model from {latest_checkpoint_dir}")
model = AlbertForPreTraining.from_pretrained(latest_checkpoint_dir)
else:
logger.info(f"Training from scratch")
model = AlbertForPreTraining(config)
model.resize_token_embeddings(len(tokenizer))
return model
def get_optimizer_and_scheduler(training_args, model):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": training_args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
opt = Lamb(
optimizer_grouped_parameters,
lr=training_args.learning_rate,
betas=(training_args.adam_beta1, training_args.adam_beta2),
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
clamp_value=training_args.clamp_value,
debias=True,
)
scheduler = get_linear_schedule_with_warmup(
opt, num_warmup_steps=training_args.warmup_steps, num_training_steps=training_args.max_steps
)
return opt, scheduler
class CollaborativeCallback(transformers.TrainerCallback):
"""
This callback monitors and reports collaborative training progress.
In case of a catastrophic failure, it can also revert training to a backup.
"""
def __init__(
self,
dht: hivemind.DHT,
optimizer: hivemind.CollaborativeOptimizer,
model: torch.nn.Module,
local_public_key: bytes,
statistics_expiration: float,
backup_every_steps: int,
):
super().__init__()
self.model = model
self.dht, self.collaborative_optimizer = dht, optimizer
self.local_public_key = local_public_key
self.statistics_expiration = statistics_expiration
self.last_reported_collaboration_step = -1
self.samples = 0
self.steps = 0
self.loss = 0
self.total_samples_processed = 0
self.backup_every_steps = backup_every_steps
self.latest_backup = self.backup_state()
def on_train_begin(
self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs
):
logger.info("Loading state from peers")
self.collaborative_optimizer.load_state_from_peers()
def on_step_end(
self, args: TrainingArguments, state: transformers.TrainerState, control: transformers.TrainerControl, **kwargs
):
control.should_log = True
if not self.params_are_finite():
self.restore_from_backup(self.latest_backup)
return control
if state.log_history:
self.loss += state.log_history[-1]["loss"]
self.steps += 1
if self.collaborative_optimizer.local_step != self.last_reported_collaboration_step:
self.last_reported_collaboration_step = self.collaborative_optimizer.local_step
self.total_samples_processed += self.samples
samples_per_second = self.collaborative_optimizer.performance_ema.samples_per_second
statistics = utils.LocalMetrics(
step=self.collaborative_optimizer.local_step,
samples_per_second=samples_per_second,
samples_accumulated=self.samples,
loss=self.loss,
mini_steps=self.steps,
)
logger.info(f"Step {self.collaborative_optimizer.local_step}")
logger.info(f"Your current contribution: {self.total_samples_processed} samples")
logger.info(f"Performance: {samples_per_second} samples per second.")
if self.steps:
logger.info(f"Local loss: {self.loss / self.steps}")
if self.collaborative_optimizer.local_step % self.backup_every_steps == 0:
self.latest_backup = self.backup_state()
self.loss = 0
self.steps = 0
if self.collaborative_optimizer.is_synchronized:
self.dht.store(
key=self.collaborative_optimizer.prefix + "_metrics",
subkey=self.local_public_key,
value=statistics.dict(),
expiration_time=hivemind.get_dht_time() + self.statistics_expiration,
return_future=True,
)
self.samples = self.collaborative_optimizer.local_samples_accumulated
return control
@torch.no_grad()
def params_are_finite(self):
for param in self.model.parameters():
if not torch.all(torch.isfinite(param)):
return False
return True
@torch.no_grad()
def backup_state(self) -> bytes:
return pickle.dumps(
{"model": self.model.state_dict(), "optimizer": self.collaborative_optimizer.opt.state_dict()}
)
@torch.no_grad()
def restore_from_backup(self, backup: bytes):
state = pickle.loads(backup)
self.model.load_state_dict(state["model"])
self.collaborative_optimizer.opt.load_state_dict(state["optimizer"])
class NoOpScheduler(LRSchedulerBase):
"""Dummy scheduler for transformers.Trainer. The real scheduler is defined in CollaborativeOptimizer.scheduler"""
def get_lr(self):
return [group["lr"] for group in self.optimizer.param_groups]
def print_lr(self, *args, **kwargs):
if self.optimizer.scheduler:
return self.optimizer.scheduler.print_lr(*args, **kwargs)
def step(self):
logger.debug("Called NoOpScheduler.step")
self._last_lr = self.get_lr()
def state_dict(self):
return {}
def load_state_dict(self, *args, **kwargs):
logger.debug("Called NoOpScheduler.load_state_dict")
def main():
parser = HfArgumentParser((AlbertTrainingArguments, DatasetArguments, CollaborationArguments, AveragerArguments))
training_args, dataset_args, collaboration_args, averager_args = parser.parse_args_into_dataclasses()
logger.info(f"Found {len(collaboration_args.initial_peers)} initial peers: {collaboration_args.initial_peers}")
if len(collaboration_args.initial_peers) == 0:
raise ValueError("Please specify at least one network endpoint in initial peers.")
setup_logging(training_args)
# Set seed before initializing model.
set_seed(training_args.seed)
config = AlbertConfig.from_pretrained(dataset_args.config_path, cache_dir=dataset_args.cache_dir)
tokenizer = AlbertTokenizerFast.from_pretrained(dataset_args.tokenizer_path, cache_dir=dataset_args.cache_dir)
model = get_model(training_args, config, tokenizer)
model.to(training_args.device)
tokenized_datasets = load_from_disk(Path(dataset_args.dataset_path))
# This data collator will take care of randomly masking the tokens.
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer)
opt, scheduler = get_optimizer_and_scheduler(training_args, model)
validators, local_public_key = utils.make_validators(collaboration_args.experiment_prefix)
dht = hivemind.DHT(
start=True,
initial_peers=collaboration_args.initial_peers,
client_mode=collaboration_args.client_mode,
record_validators=validators,
use_ipfs=collaboration_args.use_ipfs,
host_maddrs=collaboration_args.host_maddrs,
announce_maddrs=collaboration_args.announce_maddrs,
)
utils.log_visible_maddrs(dht.get_visible_maddrs(), only_p2p=collaboration_args.use_ipfs)
total_batch_size_per_step = training_args.per_device_train_batch_size * training_args.gradient_accumulation_steps
if torch.cuda.device_count() != 0:
total_batch_size_per_step *= torch.cuda.device_count()
adjusted_target_batch_size = collaboration_args.target_batch_size - collaboration_args.batch_size_lead
collaborative_optimizer = hivemind.CollaborativeOptimizer(
opt=opt,
dht=dht,
scheduler=scheduler,
prefix=collaboration_args.experiment_prefix,
compression_type=CompressionType.Value(collaboration_args.compression),
batch_size_per_step=total_batch_size_per_step,
bandwidth=collaboration_args.bandwidth,
target_batch_size=adjusted_target_batch_size,
client_mode=collaboration_args.client_mode,
verbose=True,
start=True,
**asdict(averager_args),
)
class TrainerWithIndependentShuffling(Trainer):
def get_train_dataloader(self) -> DataLoader:
"""Shuffle data independently for each peer to avoid duplicating batches [important for quality]"""
torch.manual_seed(hash(local_public_key))
return super().get_train_dataloader()
trainer = TrainerWithIndependentShuffling(
model=model,
args=training_args,
tokenizer=tokenizer,
data_collator=data_collator,
train_dataset=tokenized_datasets["train"] if training_args.do_train else None,
eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None,
optimizers=(collaborative_optimizer, NoOpScheduler(collaborative_optimizer)),
callbacks=[
CollaborativeCallback(
dht,
collaborative_optimizer,
model,
local_public_key,
collaboration_args.statistics_expiration,
collaboration_args.backup_every_steps,
)
],
)
trainer.remove_callback(transformers.trainer_callback.PrinterCallback)
trainer.remove_callback(transformers.trainer_callback.ProgressCallback)
# Training
if training_args.do_train:
latest_checkpoint_dir = max(
Path(training_args.output_dir).glob("checkpoint*"), default=None, key=os.path.getctime
)
trainer.train(model_path=latest_checkpoint_dir)
if __name__ == "__main__":
main()
| 39.716561 | 119 | 0.697618 |
6c6d3062d2909b20cb59b5c0b77a7fc98149319d | 1,950 | py | Python | config.py | mjscjj/SCDP | 379d7c52dec58c623c363fc7347a67f269c3c3f3 | [
"MIT"
] | null | null | null | config.py | mjscjj/SCDP | 379d7c52dec58c623c363fc7347a67f269c3c3f3 | [
"MIT"
] | null | null | null | config.py | mjscjj/SCDP | 379d7c52dec58c623c363fc7347a67f269c3c3f3 | [
"MIT"
] | null | null | null | import os
import time
'''
模型的基本配置
BASE_DIR 本地路径
train_path 训练路径
test_path 测试路径
'''
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
train_path = os.path.join(BASE_DIR, "avazu_CTR/train.csv")
test_path = os.path.join(BASE_DIR, "avazu_CTR/test.csv")
data_path = os.path.join(BASE_DIR, "avazu_CTR/sets/")
field2count = os.path.join(BASE_DIR, "avazu_CTR/field2count/")
print(test_path)
'''
模型的基本配置
BASE_DIR 本地路径
train_path 训练路径
test_path 测试路径
'''
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
train_path = os.path.join(BASE_DIR, "avazu_CTR/train.csv")
test_path = os.path.join(BASE_DIR, "avazu_CTR/test.csv")
data_path = os.path.join(BASE_DIR, "avazu_CTR/sets/")
field2count = os.path.join(BASE_DIR, "avazu_CTR/field2count/")
print(test_path)
# 深度网络相关配置
debug_level='INFO'
# 路径和文件配置
encod_train_path = os.path.join(BASE_DIR, "output/model_data/train.txt")
encod_vaild_path = os.path.join(BASE_DIR, "output/model_data/valid.txt")
encod_test_path = os.path.join(BASE_DIR, "output/model_data/test.txt")
ffm_train_path = os.path.join(BASE_DIR, "output/model_data/train_pred.txt")
ffm_valid_path = os.path.join(BASE_DIR, "output/model_data/vaild_pred.txt")
ffm_test_path = os.path.join(BASE_DIR, "output/model_data/test_pred.txt")
dictsizefile = os.path.join(BASE_DIR, "output/model_data/dictsize.csv")
model_ouput_dir = os.path.join(BASE_DIR, "DNN/model_output/")
summary_dir = os.path.join(BASE_DIR, "DNN/summary/")
outlog_dir = os.path.join(BASE_DIR, "DNN/log/")
dnn_log_file = 'train_' + time.strftime('%Y%m%d', time.localtime(time.time())) + '.log'
dnn_log_dir = os.path.join(BASE_DIR, "DNN/log/")
dnn_log_path = os.path.join(dnn_log_dir, dnn_log_file)
encod_cat_index_begin = 6
encod_cat_index_end = 30
valid_switch = 1
model_flag = 'model2'
# 训练参数
batch_size = 100
keep_prob = 0.8
logfrequency = 10
Max_step = 2000000000
Max_epoch = 50
embed_dim = 128
learning_rate = 0.01
decay_rate = 0.96
decay_steps = 5000
oridata_dim = 23
| 28.676471 | 87 | 0.762051 |
cc9fb91050c2eee0b486f333343bd330c607f7dc | 2,835 | py | Python | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_policy_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-11-03T07:54:50.000Z | 2021-11-03T07:54:50.000Z | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_policy_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-as/huaweicloudsdkas/v1/model/pause_scaling_policy_option.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
import pprint
import re
import six
class PauseScalingPolicyOption:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'action': 'str'
}
attribute_map = {
'action': 'action'
}
def __init__(self, action=None):
"""PauseScalingPolicyOption - a model defined in huaweicloud sdk"""
self._action = None
self.discriminator = None
if action is not None:
self.action = action
@property
def action(self):
"""Gets the action of this PauseScalingPolicyOption.
执行或启用或停止伸缩策略操作的标识。执行:execute。启用:resume。停止:pause。
:return: The action of this PauseScalingPolicyOption.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this PauseScalingPolicyOption.
执行或启用或停止伸缩策略操作的标识。执行:execute。启用:resume。停止:pause。
:param action: The action of this PauseScalingPolicyOption.
:type: str
"""
self._action = action
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PauseScalingPolicyOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.540541 | 75 | 0.541446 |
576629e2c91a763478bef85a603bd0f86d4b2ae3 | 633 | py | Python | likelihoods/ACTPol_lite_DR4_onlyEE/__init__.py | s-ilic/ECLAIR | d82e1cf96f4f3676120e94cd46a7ed7734002b0c | [
"MIT"
] | 4 | 2020-04-23T03:30:27.000Z | 2021-08-19T15:59:15.000Z | likelihoods/ACTPol_lite_DR4_onlyEE/__init__.py | s-ilic/ECLAIR | d82e1cf96f4f3676120e94cd46a7ed7734002b0c | [
"MIT"
] | null | null | null | likelihoods/ACTPol_lite_DR4_onlyEE/__init__.py | s-ilic/ECLAIR | d82e1cf96f4f3676120e94cd46a7ed7734002b0c | [
"MIT"
] | null | null | null | import pyactlike
import numpy as np
### Some important variables ###
use_tt = False
use_te = False
use_ee = True
tt_lmax = 5000
bmin = 0
like = pyactlike.ACTPowerSpectrumData(
use_tt=use_tt,
use_te=use_te,
use_ee=use_ee,
tt_lmax=tt_lmax,
bmin=bmin,
)
### ACTPol lite DR4 likelihood
def get_loglike(class_input, likes_input, class_run):
ell = class_run.lensed_cl()['ell'][2:]
f = ell * (ell + 1.) / 2. / np.pi
dell_tt = f * 0.
dell_te = f * 0.
dell_ee = f * class_run.lensed_cl()['ee'][2:] * 1e12 * class_run.T_cmb()**2.
return like.loglike(dell_tt, dell_te, dell_ee, likes_input['yp2'])
| 24.346154 | 80 | 0.655608 |
c6edbb375775ea2bab75e5591ccb559dfbe570ba | 1,451 | py | Python | test_code/aiy_hotword_recording_.py | YeongJunKim/rpi-pyrec | 81ccfce023a877ad6fa8543f199be10742739400 | [
"Apache-2.0"
] | 1 | 2018-08-06T08:28:11.000Z | 2018-08-06T08:28:11.000Z | test_code/aiy_hotword_recording_.py | YeongJunKim/rpi_pyrec | 81ccfce023a877ad6fa8543f199be10742739400 | [
"Apache-2.0"
] | null | null | null | test_code/aiy_hotword_recording_.py | YeongJunKim/rpi_pyrec | 81ccfce023a877ad6fa8543f199be10742739400 | [
"Apache-2.0"
] | null | null | null | # autor : colson (Yeong Jun Kim)
# https://www.github.com/YeongJunKim
from aiy_led import MyLed
from aiy_log import MyLogger
from aiy_play import MyAudio
from aiy_rec import MyRec
import logging
import aiy.voicehat
import datetime
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
logger = MyLogger(level=logging.DEBUG, get="MAIN")
led = MyLed()
audio = MyAudio()
rec = MyRec()
startFlag = 0
stopFlag = 0
def setup():
logger.add_file_stream_handler("logger.log")
def button_callback():
print("button callback")
global startFlag
if startFlag == 0:
startFlag = 1
def start():
global startFlag
button = aiy.voicehat.get_button()
button.on_press(button_callback)
# thread_button_checker = ButtonCheck(name="button_checker")
# thread_button_checker.start()
while True:
if startFlag == 1:
led.set_color(led=(0xFF, 0xFF, 0xFF))
now = datetime.datetime.now()
time = "%04d-%02d-%02d-%02d:%02d:%02d" % (now.year, now.month, now.day, now.hour, now.minute, now.second)
path = "/home/pi/hot-word-backup"
cmd = "sudo arecord -D sysdefault:CARD=0 -d 4 -r 16000 -f S16_LE " + path + "/" + time + ".wav"
rec.record_start(cmd)
audio.play_audio_path(path + "/" + time + ".wav")
led.set_color(led=(0x00, 0x00, 0x00))
startFlag = 0
def main():
start()
if __name__ == '__main__':
main()
| 23.031746 | 117 | 0.635424 |
377bef86e9852db735d05f95209d58ba65cd2da2 | 464 | py | Python | servicos/migrations/0004_auto_20170201_1153.py | eduardoedson/SCP | 52bb96b35b4f6e40272eb2dc65f304ac72279b3f | [
"MIT"
] | null | null | null | servicos/migrations/0004_auto_20170201_1153.py | eduardoedson/SCP | 52bb96b35b4f6e40272eb2dc65f304ac72279b3f | [
"MIT"
] | null | null | null | servicos/migrations/0004_auto_20170201_1153.py | eduardoedson/SCP | 52bb96b35b4f6e40272eb2dc65f304ac72279b3f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-01 11:53
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('servicos', '0003_auto_20170201_1143'),
]
operations = [
migrations.AlterField(
model_name='chamado',
name='descricao',
field=models.TextField(verbose_name='Descrição'),
),
]
| 22.095238 | 61 | 0.625 |
4c629dc90bf81117ab5d4cc6c7ff028b76debea9 | 4,119 | py | Python | kartothek/io/dask/bag.py | kagharpure/kartothek | 6ddab8fc1376f381730cf4db6ca5cd3df5ac99f6 | [
"MIT"
] | null | null | null | kartothek/io/dask/bag.py | kagharpure/kartothek | 6ddab8fc1376f381730cf4db6ca5cd3df5ac99f6 | [
"MIT"
] | null | null | null | kartothek/io/dask/bag.py | kagharpure/kartothek | 6ddab8fc1376f381730cf4db6ca5cd3df5ac99f6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from functools import partial
import dask.bag as db
from kartothek.core import naming
from kartothek.core.factory import _ensure_factory
from kartothek.core.utils import _check_callable
from kartothek.core.uuid import gen_uuid
from kartothek.io.eager import read_dataset_as_metapartitions
from kartothek.io_components.docs import default_docs
from kartothek.io_components.index import update_indices_from_partitions
from kartothek.io_components.metapartition import (
MetaPartition,
parse_input_to_metapartition,
)
from kartothek.io_components.utils import normalize_args
from kartothek.io_components.write import (
raise_if_dataset_exists,
store_dataset_from_partitions,
)
def _store_dataset_from_partitions_flat(mpss, *args, **kwargs):
return store_dataset_from_partitions(
[mp for sublist in mpss for mp in sublist], *args, **kwargs
)
@default_docs
@normalize_args
def store_bag_as_dataset(
bag,
store,
dataset_uuid=None,
metadata=None,
df_serializer=None,
overwrite=False,
metadata_merger=None,
metadata_version=naming.DEFAULT_METADATA_VERSION,
partition_on=None,
metadata_storage_format=naming.DEFAULT_METADATA_STORAGE_FORMAT,
secondary_indices=None,
):
"""
Transform and store a dask.bag of dictionaries containing
dataframes to a kartothek dataset in store.
This is the dask.bag-equivalent of
:func:`store_delayed_as_dataset`. See there
for more detailed documentation on the different possible input types.
Parameters
----------
bag: dask.bag
A dask bag containing dictionaries of dataframes or dataframes.
Returns
-------
A dask.bag.Item dataset object.
"""
_check_callable(store)
if dataset_uuid is None:
dataset_uuid = gen_uuid()
if not overwrite:
raise_if_dataset_exists(dataset_uuid=dataset_uuid, store=store)
input_to_mps = partial(
parse_input_to_metapartition, metadata_version=metadata_version
)
mps = bag.map(input_to_mps)
if partition_on:
mps = mps.map(MetaPartition.partition_on, partition_on=partition_on)
if secondary_indices:
mps = mps.map(MetaPartition.build_indices, columns=secondary_indices)
mps = mps.map(
MetaPartition.store_dataframes,
store=store,
df_serializer=df_serializer,
dataset_uuid=dataset_uuid,
)
aggregate = partial(
_store_dataset_from_partitions_flat,
dataset_uuid=dataset_uuid,
store=store,
dataset_metadata=metadata,
metadata_merger=metadata_merger,
metadata_storage_format=metadata_storage_format,
)
return mps.reduction(perpartition=list, aggregate=aggregate, split_every=False)
@default_docs
def build_dataset_indices__bag(
store, dataset_uuid, columns, partition_size=None, factory=None
):
"""
Function which builds a :class:`~kartothek.core.index.ExplicitSecondaryIndex`.
This function loads the dataset, computes the requested indices and writes
the indices to the dataset. The dataset partitions itself are not mutated.
Parameters
----------
partition_size: Optional[int]
Dask bag partition size. Use a larger numbers to decrease scheduler load and overhead, use smaller numbers for a
fine-grained scheduling and better resilience against worker errors.
Returns
-------
A dask.delayed computation object.
"""
ds_factory = _ensure_factory(
dataset_uuid=dataset_uuid,
store=store,
factory=factory,
load_dataset_metadata=False,
)
mps = read_dataset_as_metapartitions(factory=ds_factory)
return (
db.from_sequence(seq=mps, partition_size=partition_size)
.map(MetaPartition.build_indices, columns=columns)
.map(MetaPartition.remove_dataframes)
.reduction(list, list, split_every=False, out_type=db.Bag)
.flatten()
.map_partitions(list)
.map_partitions(
update_indices_from_partitions, dataset_metadata_factory=ds_factory
)
)
| 29.633094 | 120 | 0.725904 |
e509c4b86ce4c1603c1bb35264489610dc1754d8 | 4,964 | py | Python | qpaint/qpaint.py | Time0o/QPaint | 596d6c1c96f7f448205bf49725f4723a27d80953 | [
"MIT"
] | null | null | null | qpaint/qpaint.py | Time0o/QPaint | 596d6c1c96f7f448205bf49725f4723a27d80953 | [
"MIT"
] | null | null | null | qpaint/qpaint.py | Time0o/QPaint | 596d6c1c96f7f448205bf49725f4723a27d80953 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from sys import argv, exit
from typing import Optional
from PyQt5 import QtCore, QtGui, QtWidgets
class DrawAreaView(QtWidgets.QGraphicsView):
ZOOM_FACTOR = 1.25
def __init__(self):
super().__init__()
self.setTransformationAnchor(QtWidgets.QGraphicsView.AnchorUnderMouse)
def wheelEvent(self, event: QtGui.QWheelEvent):
if not event.modifiers() & QtCore.Qt.ControlModifier:
return
delta = event.angleDelta().y()
if delta > 0:
zoomFactor = self.ZOOM_FACTOR
else:
zoomFactor = 1. / self.ZOOM_FACTOR
self.scale(zoomFactor, zoomFactor)
class ToolsDockWidget(QtWidgets.QDockWidget):
class Tool:
def __init__(self,
name: str,
icon: str,
shortcut: str = None,
helptext: str = None):
self._name = name
self._icon = QtGui.QIcon(icon)
self._shortcut = QtGui.QKeySequence(shortcut) if shortcut else None
self._toolTip = self._name
if shortcut is not None:
self._toolTip += '\nShortcut key: ' + self._shortcut.toString()
if helptext is not None:
self._toolTip += '\n\n' + helptext
def name(self) -> str:
return self._name
def icon(self) -> QtGui.QIcon:
return self._icon
def shortcut(self) -> Optional[QtGui.QKeySequence]:
return self._shortcut
def toolTip(self) -> str:
return self._toolTip
class ToolButtonAreaWidget(QtWidgets.QWidget):
BUTTON_SIZE = QtCore.QSize(30, 30)
def __init__(self):
super().__init__()
self._layout = QtWidgets.QVBoxLayout()
self._layout.setAlignment(QtCore.Qt.AlignTop)
self.setLayout(self._layout)
def addTool(self, tool):
toolButton = QtWidgets.QToolButton()
toolButton.setText('X') # TODO
#toolButton.setIcon(tool.icon())
toolButton.setToolTip(tool.toolTip())
toolButton.setFixedSize(self.BUTTON_SIZE)
self._layout.addWidget(toolButton)
WINDOW_TITLE = 'Tools'
AVAILABLE_TOOLS = {
'brush': Tool('Paintbrush', '', 'B', 'Left click to draw.'),
'eraser': Tool('Eraser', '', 'E', 'Left click to erase.'),
}
def __init__(self):
super().__init__()
self.setWindowTitle(self.WINDOW_TITLE)
self.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self._toolButtonAreaWidget = self.ToolButtonAreaWidget()
self.setWidget(self._toolButtonAreaWidget)
for tool in self.AVAILABLE_TOOLS.values():
self._toolButtonAreaWidget.addTool(tool)
class PaletteDockWidget(QtWidgets.QDockWidget):
class ColorGridWidget(QtWidgets.QWidget):
CELL_WIDTH = 20
CELL_HEIGHT = 20
DEFAULT_COLORS = [
['#FFFFFF', '#000000'],
['#808080', '#404040']
]
def __init__(self):
super().__init__()
self._layout = QtWidgets.QGridLayout()
self._layout.setAlignment(QtCore.Qt.AlignTop)
self._layout.setSpacing(0)
self.setLayout(self._layout)
for r, row in enumerate(self.DEFAULT_COLORS):
for c, color in enumerate(row):
colorCell = QtWidgets.QPushButton()
style = 'background-color: {}; border: 0px;'.format(color)
colorCell.setStyleSheet(style)
colorCell.setFixedSize(self.CELL_WIDTH, self.CELL_HEIGHT)
self._layout.addWidget(colorCell, r, c)
WINDOW_TITLE = 'Palette'
def __init__(self):
super().__init__()
self.setWindowTitle(self.WINDOW_TITLE)
self.setFeatures(QtWidgets.QDockWidget.NoDockWidgetFeatures)
self._colorGridWidget = self.ColorGridWidget()
self.setWidget(self._colorGridWidget)
class EditorWindow(QtWidgets.QMainWindow):
def __init__(self, loadImage: str = None):
super().__init__()
self._drawAreaView = DrawAreaView()
self.setCentralWidget(self._drawAreaView)
self._drawAreaScene = QtWidgets.QGraphicsScene()
self._drawAreaView.setScene(self._drawAreaScene)
self._toolsDockWidget = ToolsDockWidget()
self.addDockWidget(QtCore.Qt.LeftDockWidgetArea, self._toolsDockWidget)
self._paletteDockWidget = PaletteDockWidget()
self.addDockWidget(QtCore.Qt.RightDockWidgetArea, self._paletteDockWidget)
if loadImage is not None:
pixmap = QtGui.QPixmap(loadImage)
self._drawAreaScene.addPixmap(pixmap)
if __name__ == '__main__':
app = QtWidgets.QApplication(argv)
editorWindow = EditorWindow('test/images/lenna.png')
editorWindow.show()
exit(app.exec())
| 28.693642 | 82 | 0.612409 |
961832becae7fc60131c4c8fdc8422b994814d53 | 839 | py | Python | ped/style.py | anordin95/ped | 6d8110568f9c0461fc4b64053ea5c123328d9b88 | [
"MIT"
] | 45 | 2015-09-06T19:50:40.000Z | 2020-11-25T03:04:37.000Z | ped/style.py | anordin95/ped | 6d8110568f9c0461fc4b64053ea5c123328d9b88 | [
"MIT"
] | 7 | 2015-09-07T01:10:27.000Z | 2020-09-25T13:04:53.000Z | ped/style.py | anordin95/ped | 6d8110568f9c0461fc4b64053ea5c123328d9b88 | [
"MIT"
] | 4 | 2015-09-08T13:48:30.000Z | 2020-03-09T16:25:10.000Z | from typing import Optional, IO, Any
import os
import sys
RED = 31
GREEN = 32
BOLD = 1
RESET_ALL = 0
def style(
text: str, fg: Optional[int] = None, *, bold: bool = False, file: IO = sys.stdout
) -> str:
use_color = not os.environ.get("NO_COLOR") and file.isatty()
if use_color:
parts = [
fg and f"\033[{fg}m",
bold and f"\033[{BOLD}m",
text,
f"\033[{RESET_ALL}m",
]
return "".join([e for e in parts if e])
else:
return text
def sprint(text: str, *args: Any, **kwargs: Any) -> None:
file = kwargs.pop("file", sys.stdout)
return print(style(text, file=file, *args, **kwargs), file=file)
def print_error(text: str) -> None:
prefix = style("ERROR", RED, file=sys.stderr)
return sprint(f"{prefix}: {text}", file=sys.stderr)
| 23.971429 | 85 | 0.575685 |
1ac0f6999734e14c44c0d5cb0dfb99613e000b14 | 2,444 | py | Python | chunair/kicad-footprint-generator-master/scripts/Capacitors_SMD/corners.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | 2 | 2022-03-18T23:42:51.000Z | 2022-03-19T15:31:34.000Z | chunair/kicad-footprint-generator-master/scripts/Capacitors_SMD/corners.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | null | null | null | chunair/kicad-footprint-generator-master/scripts/Capacitors_SMD/corners.py | speedypotato/chuni-lite | c8dda8428723f8c4f99075e7cbaa22a44cbc187d | [
"CC-BY-4.0"
] | null | null | null | from KicadModTree import * # NOQA
def add_corners(m, start_pos, end_pos, size_x, size_y, layer, width, offset=(0, 0), chamfers=[]):
# If specified, an 'offset' can be applied to the corners.
# For example, creating corners around a given Rect of a specified size
# offset for the rect line
# For the offset to work properly, start-pos must be top-left, and end-pos must be bottom-right
x1 = min(start_pos[0], end_pos[0])
x2 = max(start_pos[0], end_pos[0])
y1 = min(start_pos[1], end_pos[1])
y2 = max(start_pos[1], end_pos[1])
# Put the offset (if any) back in
start_pos[0] = x1 - offset[0]
start_pos[1] = y1 - offset[1]
end_pos[0] = x2 + offset[0]
end_pos[1] = y2 + offset[1]
if 'topleft' in chamfers:
m.append(Line(start=[start_pos[0], start_pos[1] + size_y], end=[start_pos[0] + size_x, start_pos[1]], layer=layer, width=width))
else:
m.append(Line(start=[start_pos[0], start_pos[1]], end=[start_pos[0] + size_x, start_pos[1]], layer=layer, width=width))
m.append(Line(start=[start_pos[0], start_pos[1]], end=[start_pos[0], start_pos[1] + size_y], layer=layer, width=width))
if 'bottomleft' in chamfers:
m.append(Line(start=[start_pos[0], end_pos[1] - size_y], end=[start_pos[0] + size_x, end_pos[1]], layer=layer, width=width))
else:
m.append(Line(start=[start_pos[0], end_pos[1]], end=[start_pos[0] + size_x, end_pos[1]], layer=layer, width=width))
m.append(Line(start=[start_pos[0], end_pos[1]], end=[start_pos[0], end_pos[1] - size_y], layer=layer, width=width))
if 'topright' in chamfers:
m.append(Line(start=[end_pos[0], start_pos[1] + size_y], end=[end_pos[0] - size_x, start_pos[1]], layer=layer, width=width))
else:
m.append(Line(start=[end_pos[0], start_pos[1]], end=[end_pos[0] - size_x, start_pos[1]], layer=layer, width=width))
m.append(Line(start=[end_pos[0], start_pos[1]], end=[end_pos[0], start_pos[1] + size_y], layer=layer, width=width))
if 'bottomright' in chamfers:
m.append(Line(start=[end_pos[0], end_pos[1] - size_y], end=[end_pos[0] - size_x, end_pos[1]], layer=layer, width=width))
else:
m.append(Line(start=[end_pos[0], end_pos[1]], end=[end_pos[0] - size_x, end_pos[1]], layer=layer, width=width))
m.append(Line(start=[end_pos[0], end_pos[1]], end=[end_pos[0], end_pos[1] - size_y], layer=layer, width=width))
return m
| 52 | 136 | 0.645663 |
be0d22d48659037d1b8ace53578836797cf44eb1 | 1,487 | py | Python | Others/Source/05/5.1/function_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | 1 | 2018-05-30T01:38:23.000Z | 2018-05-30T01:38:23.000Z | Others/Source/05/5.1/function_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | Others/Source/05/5.1/function_test.py | silence0201/Learn-Python | 662da7c0e74221cedb445ba17d5cb1cd3af41c86 | [
"MIT"
] | null | null | null | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee kongyeeku@163.com #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
## 定义一个函数,声明2个形参
#def my_max(x, y) :
# # 定义一个变量z,该变量等于x、y中较大的值
# z = x if x > y else y
# # 返回变量z的值
# return z
def my_max(x, y) :
# 返回一个表达式
return x if x > y else y
# 定义一个函数,声明一个形参
def say_hi(name) :
print("===正在执行say_hi()函数===")
return name + ",您好!"
a = 6
b = 9
# 调用my_max()函数,将函数返回值赋值给result变量
result = my_max(a , b) # ①
print("result:", result)
# 调用say_hi()函数,直接输出函数的返回值
print(say_hi("孙悟空")) # ②
| 40.189189 | 74 | 0.279758 |
b7954d7c8ff17c5a1f81a5ca028ed3619daeae3f | 6,253 | py | Python | ASK/tests/ask1.py | SQAPractical/AssesmentControlePython | ac41cf4bf4df7ef5ca9fa718d26b0333468be6ca | [
"Apache-2.0"
] | null | null | null | ASK/tests/ask1.py | SQAPractical/AssesmentControlePython | ac41cf4bf4df7ef5ca9fa718d26b0333468be6ca | [
"Apache-2.0"
] | null | null | null | ASK/tests/ask1.py | SQAPractical/AssesmentControlePython | ac41cf4bf4df7ef5ca9fa718d26b0333468be6ca | [
"Apache-2.0"
] | null | null | null | import unittest
from selenium import webdriver
#Create User with 5 Alphanumerical & Special characters First name
class MyTestCase(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(
executable_path='/Users/a123/ask_automation/ASK/browsers/chromedriver')
self.driver.get('http://local.school.portnov.com:4520/#/registration')
# def test_ask2(self):
##First name - Alphanumerical & Special char (Happy path)
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys('iV@#7')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
# driver.find_element_by_id('mat-input-2').send_keys('ivanobv@gmail.com')
# driver.find_element_by_id('mat-input-3').send_keys('A007')
# driver.find_element_by_id('mat-input-4').send_keys('12345')
# driver.find_element_by_id('mat-input-5').send_keys('12345')
# driver.find_element_by_css_selector('button[type="submit"]').click()
# registrated = driver.find_element_by_xpath('//mat-card/h4').text
# #print(registrated)
#
#
# self.assertEqual('Registration', registrated)
#
#
# def test_ask13(self):
##First Name - Single char
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys('I')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
# driver.find_element_by_id('mat-input-2').send_keys('ivanobv@gmail.com')
# driver.find_element_by_id('mat-input-3').send_keys('A007')
# driver.find_element_by_id('mat-input-4').send_keys('12345')
# driver.find_element_by_id('mat-input-5').send_keys('12345')
# driver.find_element_by_css_selector('button[type="submit"]').click()
# registrated = driver.find_element_by_xpath('//mat-card/h4').text
# # print(registrated)
#
# self.assertEqual('Registration', registrated)
# def test_ask3(self):
## First Name - Zero character input / Required field
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys('')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
# driver.find_element_by_id('mat-input-2').send_keys('ivanobv@gmail.com')
# driver.find_element_by_id('mat-input-3').send_keys('A007')
# driver.find_element_by_id('mat-input-4').send_keys('12345')
# driver.find_element_by_id('mat-input-5').send_keys('12345')
# driver.find_element_by_css_selector('button[type="submit"]').click()
# error = driver.find_element_by_xpath('//mat-error[@class="mat-error ng-star-inserted"]').text
#
#
# self.assertEqual('This field is required', error)
# def test_ask15(self):
##First Name - Max Character
# driver = self.driver
#
# driver.find_element_by_id("mat-input-0").send_keys('ghaghghghjaggjhjgjgjghahaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghghghjaggjhjgjgjkkkhaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghghghjaggdfhjsjhjgjgjkkkhaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghghghjaggjhjgjgjkkkghghghjaggjhjgjgjghaghghghjaggjhjgjgjkkk')
# driver.find_element_by_id('mat-input-1').send_keys('I')
# driver.find_element_by_id('mat-input-2').send_keys('ivanobv@gmail.com')
# driver.find_element_by_id('mat-input-3').send_keys('A007')
# driver.find_element_by_id('mat-input-4').send_keys('12345')
# driver.find_element_by_id('mat-input-5').send_keys('12345')
# driver.find_element_by_css_selector('button[type="submit"]').click()
# registrated = driver.find_element_by_xpath('//mat-card/h4').text
#
#
# self.assertEqual('Registration', registrated)
# def test_ask16(self):
## First Name - Max character +1
# driver = self.driver
#
#
# driver.find_element_by_id("mat-input-0").send_keys(
# 'ghaghghghjaggjhjgjgjghahaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghmghghjaggjhjgjgjkkkhaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghghghjaggdfhjsjhjgjgjkkkhaghghghjaggjhjgjgjghaghghghjaggjhjgjgjghaghghghjaggjhjgjgjkkkghghghjaggjhjgjgjghaghghghjaggjhjgjgjkkk')
# driver.find_element_by_id('mat-input-1').send_keys('I')
# driver.find_element_by_css_selector('input[placeholder="Email"]').send_keys('ivanobv@gmail.com')
# driver.find_element_by_id('mat-input-3').send_keys('A007')
# driver.find_element_by_id('mat-input-4').send_keys('12345')
# driver.find_element_by_id('mat-input-5').send_keys('12345')
# driver.find_element_by_css_selector('button[type="submit"]').click()
# driver.implicitly_wait(2)
#
# #error = driver.find_element_by_css_selector('.mat-snack-bar-container.ng-tns-c13-16.ng-trigger.ng-trigger-state.error.mat-snack-bar-center.ng-star-inserted').text
#
# error = driver.find_element_by_css_selector('snack-bar-container>simple-snack-bar').text
#
# self.assertEqual("Data too long for column 'name' at row 1\nX", error)
# def test_ask4(self):
# #First Name - Leading space
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys(' Ivan')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
#
# error = driver.find_element_by_xpath('//div/mat-error').text
# self.assertEqual('Whitespaces are not allowed', error)
#
# def test_ask19(self):
##First Name - Leading space
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys('Ivan ')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
#
# error = driver.find_element_by_xpath('//div/mat-error').text
# self.assertEqual('Whitespaces are not allowed', error)
# def test_ask20(self):
#
# #First Name - Space characters inside
# driver = self.driver
# driver.find_element_by_id("mat-input-0").send_keys('Iv an')
# driver.find_element_by_id('mat-input-1').send_keys('Ivanov')
#
# error = driver.find_element_by_xpath('//div/mat-error').text
# self.assertEqual('Whitespaces are not allowed', error)
#
if __name__ == '__main__':
unittest.main()
| 40.869281 | 318 | 0.682233 |
97ae7958297f3e5dace33fb8102080d950b9216d | 9,079 | py | Python | docs/source/conf.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | 64 | 2017-09-08T14:06:47.000Z | 2019-02-20T11:55:21.000Z | docs/source/conf.py | oleksiyVeretiuk/openprocurement.auctions.insider | f380caa9b63dadbfe7bd1f9512e21c486a138457 | [
"Apache-2.0"
] | 13 | 2017-08-09T12:04:30.000Z | 2018-09-07T10:46:59.000Z | # -*- coding: utf-8 -*-
#
# openprocurement.auctions.dgf documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 1 02:29:13 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
import cornice
extensions = [
# 'cornice.ext.sphinxext',
'sphinx.ext.intersphinx',
'sphinx.ext.graphviz',
'sphinxcontrib.httpdomain',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'openprocurement.auctions.insider'
copyright = u'2016, Quintagroup, Ltd'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pkg_resources.get_distribution(project).version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'uk_UA'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openprocurementauctionsdutchdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'openprocurementauctionsdutch.tex', u'openprocurement.auctions.dutch Documentation',
u'Myroslav Opyr', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openprocurementauctionsdutch', u'openprocurement.auctions.dutch Documentation',
[u'Myroslav Opyr', u'Leits'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'openprocurementauctionsdutch', u'openprocurement.auctions.dutch Documentation',
u'Myroslav Opyr', 'openprocurementauctionsdutch', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
locale_dirs = ['locale/'] # path is example but recommended.
gettext_compact = False # optional.
intersphinx_mapping = {
'python': ('http://docs.python.org/dev', None),
'openprocurementapi': ('http://api-docs.openprocurement.org/en/latest', None),
'openprocurementtenderlimited': ('http://openprocurementtenderlimited.readthedocs.org/en/latest/', None),
'documentservice': ('http://documentservice.api-docs.openprocurement.org/en/latest', None),
}
intersphinx_cache_limit = 1
| 32.776173 | 109 | 0.72508 |
0672aba08fae5f6a1e997cd0e30b7d63d5218c04 | 479 | py | Python | env/lib/python3.8/site-packages/plotly/validators/contour/colorbar/_tickprefix.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 76 | 2020-07-06T14:44:05.000Z | 2022-02-14T15:30:21.000Z | env/lib/python3.8/site-packages/plotly/validators/contour/colorbar/_tickprefix.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-08-09T02:30:14.000Z | 2022-03-12T00:50:14.000Z | env/lib/python3.8/site-packages/plotly/validators/contour/colorbar/_tickprefix.py | acrucetta/Chicago_COVI_WebApp | a37c9f492a20dcd625f8647067394617988de913 | [
"MIT",
"Unlicense"
] | 11 | 2020-07-12T16:18:07.000Z | 2022-02-05T16:48:35.000Z | import _plotly_utils.basevalidators
class TickprefixValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="tickprefix", parent_name="contour.colorbar", **kwargs
):
super(TickprefixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "style"),
**kwargs
)
| 31.933333 | 80 | 0.649269 |
e48efd218c6e0602337dc785e53cd7549f8a9abd | 2,329 | py | Python | src/examples/stimuli-representation.py | jacksonlli/learn-hippo | 7695d22e73c334b6d9df7e35cb6e30855db187fe | [
"MIT"
] | 13 | 2020-12-06T02:31:02.000Z | 2022-02-16T16:20:31.000Z | src/examples/stimuli-representation.py | jacksonlli/learn-hippo | 7695d22e73c334b6d9df7e35cb6e30855db187fe | [
"MIT"
] | 1 | 2021-09-19T20:55:32.000Z | 2021-09-19T20:55:32.000Z | src/examples/stimuli-representation.py | jacksonlli/learn-hippo | 7695d22e73c334b6d9df7e35cb6e30855db187fe | [
"MIT"
] | 3 | 2020-12-24T00:52:43.000Z | 2021-12-15T02:12:15.000Z | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from task import SequenceLearning
sns.set(style='white', palette='colorblind', context='poster')
np.random.seed(0)
'''how to use'''
# init
n_param, n_branch = 16, 4
pad_len = 0
n_parts = 2
n_samples = 256
p_rm_ob_enc = 0
p_rm_ob_rcl = 0
n_rm_fixed = False
task = SequenceLearning(
n_param, n_branch, pad_len=pad_len,
p_rm_ob_enc=p_rm_ob_enc,
p_rm_ob_rcl=p_rm_ob_rcl,
n_rm_fixed=n_rm_fixed,
)
# take sample
X, Y = task.sample(n_samples, to_torch=False)
print(f'X shape = {np.shape(X)}, n_example x time x x-dim')
print(f'Y shape = {np.shape(Y)}, n_example x time x y-dim')
'''visualize the sample'''
# pick a sample
i = 0
x, y = X[i], Y[i]
cmap = 'bone'
x_split = np.split(x, (n_param, n_param + n_branch), axis=1)
episodic_sim = False
if episodic_sim:
x_split[0][:n_param] = np.vstack(
[x_split[0][:n_param][0], np.eye(n_param, k=-1)[1:, :]]
)
x_split[0][n_param:] = np.vstack(
[x_split[0][n_param:][0], np.eye(n_param, k=-1)[1:, :]]
)
for t in range(n_param):
obs_feature_id_p1 = np.argmax(x_split[0][:n_param][t])
x_split[1][:n_param][t] = y[obs_feature_id_p1]
obs_feature_id_p2 = np.argmax(x_split[0][n_param:][t])
x_split[1][n_param:][t] = y[obs_feature_id_p2]
mat_list = x_split + [y]
f, axes = plt.subplots(
2, 4, figsize=(12, 9), sharey=True,
gridspec_kw={
'width_ratios': [n_param, n_branch, n_param, n_branch],
'height_ratios': [n_param, n_param]
},
)
title_list = ['Observed feature', 'Observed value',
'Queried feature', 'Queried value']
ylabel_list = ['Part one', 'Part two']
for i, mat in enumerate(mat_list):
[mat_p1, mat_p2] = np.split(mat, [n_param], axis=0)
axes[0, i].imshow(mat[:n_param, :], cmap=cmap)
axes[1, i].imshow(mat[n_param:, :], cmap=cmap)
axes[0, i].set_title(title_list[i], fontname='Helvetica')
axes[0, i].set_xticks([])
for i in [1, 3]:
axes[1, i].set_xticks(range(n_branch))
axes[1, i].set_xticklabels(i for i in np.arange(4) + 1)
for i in range(2):
axes[i, 0].set_yticks(np.arange(0, n_param, 5))
axes[i, 0].set_ylabel(ylabel_list[i], fontname='Helvetica')
f.tight_layout()
f.savefig(f'examples/figs/stimulus-rep.png', dpi=100, bbox_inches='tight')
| 29.481013 | 74 | 0.649206 |
88ed9da2de9d0b2b9ab08d19ecf3c1f7025e6fe1 | 2,714 | py | Python | toolchain/riscv/MSYS/python/Lib/idlelib/window.py | zhiqiang-hu/bl_iot_sdk | 154ee677a8cc6a73e6a42a5ff12a8edc71e6d15d | [
"Apache-2.0"
] | 207 | 2018-10-01T08:53:01.000Z | 2022-03-14T12:15:54.000Z | Thonny/Lib/idlelib/window.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 30 | 2019-01-04T10:14:56.000Z | 2020-10-12T14:00:31.000Z | Thonny/Lib/idlelib/window.py | Pydiderot/pydiderotIDE | a42fcde3ea837ae40c957469f5d87427e8ce46d3 | [
"MIT"
] | 53 | 2019-03-12T16:50:21.000Z | 2022-03-15T23:16:18.000Z | from tkinter import Toplevel, TclError
import sys
class WindowList:
def __init__(self):
self.dict = {}
self.callbacks = []
def add(self, window):
window.after_idle(self.call_callbacks)
self.dict[str(window)] = window
def delete(self, window):
try:
del self.dict[str(window)]
except KeyError:
# Sometimes, destroy() is called twice
pass
self.call_callbacks()
def add_windows_to_menu(self, menu):
list = []
for key in self.dict:
window = self.dict[key]
try:
title = window.get_title()
except TclError:
continue
list.append((title, key, window))
list.sort()
for title, key, window in list:
menu.add_command(label=title, command=window.wakeup)
def register_callback(self, callback):
self.callbacks.append(callback)
def unregister_callback(self, callback):
try:
self.callbacks.remove(callback)
except ValueError:
pass
def call_callbacks(self):
for callback in self.callbacks:
try:
callback()
except:
t, v, tb = sys.exc_info()
print("warning: callback failed in WindowList", t, ":", v)
registry = WindowList()
add_windows_to_menu = registry.add_windows_to_menu
register_callback = registry.register_callback
unregister_callback = registry.unregister_callback
class ListedToplevel(Toplevel):
def __init__(self, master, **kw):
Toplevel.__init__(self, master, kw)
registry.add(self)
self.focused_widget = self
def destroy(self):
registry.delete(self)
Toplevel.destroy(self)
# If this is Idle's last window then quit the mainloop
# (Needed for clean exit on Windows 98)
if not registry.dict:
self.quit()
def update_windowlist_registry(self, window):
registry.call_callbacks()
def get_title(self):
# Subclass can override
return self.wm_title()
def wakeup(self):
try:
if self.wm_state() == "iconic":
self.wm_withdraw()
self.wm_deiconify()
self.tkraise()
self.focused_widget.focus_set()
except TclError:
# This can happen when the Window menu was torn off.
# Simply ignore it.
pass
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_window', verbosity=2)
| 27.414141 | 75 | 0.56448 |
67d661450559a83a9af9619bc1b7849253324dd8 | 7,242 | py | Python | modules/pmg_qt/pymol_gl_widget.py | tranas-open/pymolXT | e11cc7994178e439534f15c7b5eac51b4e678c54 | [
"CNRI-Python"
] | 1 | 2021-02-04T19:35:51.000Z | 2021-02-04T19:35:51.000Z | modules/pmg_qt/pymol_gl_widget.py | tranas-open/pymolXT | e11cc7994178e439534f15c7b5eac51b4e678c54 | [
"CNRI-Python"
] | null | null | null | modules/pmg_qt/pymol_gl_widget.py | tranas-open/pymolXT | e11cc7994178e439534f15c7b5eac51b4e678c54 | [
"CNRI-Python"
] | null | null | null | import sys
from pymol2 import SingletonPyMOL as PyMOL
import pymol
from pymol.Qt import QtCore
from pymol.Qt import QtGui
from pymol.Qt import QtWidgets
Gesture = QtCore.QEvent.Gesture
Qt = QtCore.Qt
from .keymapping import get_modifiers
from .keymapping import get_wheel_button
# don't import the heavy OpenGL (PyOpenGL) module
from pymol._cmd import glViewport
# QOpenGLWidget is supposed to supersede QGLWidget, but has issues (e.g.
# no stereo support)
USE_QOPENGLWIDGET = pymol.IS_MACOS and QtCore.QT_VERSION >= 0x50400
if USE_QOPENGLWIDGET:
BaseGLWidget = QtWidgets.QOpenGLWidget
AUTO_DETECT_STEREO = False
else:
from pymol.Qt import QtOpenGL
BaseGLWidget = QtOpenGL.QGLWidget
# only attempt stereo detection in Qt <= 5.6 (with 5.9+ on Linux I
# get GL_DOUBLEBUFFER=0 with flickering when requesting stereo)
AUTO_DETECT_STEREO = pymol.IS_WINDOWS or QtCore.QT_VERSION < 0x50700
class PyMOLGLWidget(BaseGLWidget):
'''
PyMOL OpenGL Widget
'''
# mouse button map
_buttonMap = {
Qt.LeftButton: 0,
Qt.MidButton: 1,
Qt.RightButton: 2,
}
def __init__(self, parent):
self.gui = parent
self.fb_scale = 1.0
# OpenGL context setup
if USE_QOPENGLWIDGET:
f = QtGui.QSurfaceFormat()
else:
f = QtOpenGL.QGLFormat()
from pymol.invocation import options
# logic equivalent to layer5/main.cpp:launch
if options.multisample:
f.setSamples(4)
if options.force_stereo != -1:
# See layer1/Setting.h for stereo modes
if options.stereo_mode in (1, 12) or (
options.stereo_mode == 0 and AUTO_DETECT_STEREO):
f.setStereo(True)
if options.stereo_mode in (11, 12) and not USE_QOPENGLWIDGET:
f.setAccum(True)
if USE_QOPENGLWIDGET:
super(PyMOLGLWidget, self).__init__(parent=parent)
self.setFormat(f)
self.setUpdateBehavior(QtWidgets.QOpenGLWidget.PartialUpdate)
else:
super(PyMOLGLWidget, self).__init__(f, parent=parent)
# pymol instance
self.pymol = PyMOL()
self.pymol.start()
self.cmd = self.pymol.cmd
# capture python output for feedback
import pcatch
pcatch._install()
# for passive move drag
self.setMouseTracking(True)
# for accepting keyboard input (command line, shortcuts)
self.setFocusPolicy(Qt.ClickFocus)
# for idle rendering
self._timer = QtCore.QTimer()
self._timer.setSingleShot(True)
self._timer.timeout.connect(self._pymolProcess)
# drag n drop
self.setAcceptDrops(True)
# pinch-zoom
self.grabGesture(Qt.PinchGesture)
def sizeHint(self):
# default 640 + internal_gui, 480 + internal_feedback
return QtCore.QSize(860, 498)
##########################
# Input Events
##########################
def event(self, ev):
if ev.type() == Gesture:
return self.gestureEvent(ev)
return super(PyMOLGLWidget, self).event(ev)
def gestureEvent(self, ev):
gesture = ev.gesture(Qt.PinchGesture)
if gesture is None:
return False
if gesture.state() == Qt.GestureStarted:
self.pinch_start_z = self.cmd.get_view()[11]
changeFlags = gesture.changeFlags()
if changeFlags & QtWidgets.QPinchGesture.RotationAngleChanged:
delta = gesture.lastRotationAngle() - gesture.rotationAngle()
self.cmd.turn('z', delta)
if changeFlags & QtWidgets.QPinchGesture.ScaleFactorChanged:
view = list(self.cmd.get_view())
# best guess for https://bugreports.qt.io/browse/QTBUG-48138
totalscalefactor = gesture.totalScaleFactor()
if totalscalefactor == 1.0:
totalscalefactor = gesture.scaleFactor()
z = self.pinch_start_z / totalscalefactor
delta = z - view[11]
view[11] = z
view[15] -= delta
view[16] -= delta
self.cmd.set_view(view)
return True
def _event_x_y_mod(self, ev):
return (
int(self.fb_scale * ev.x()),
int(self.fb_scale * (self.height() - ev.y())),
get_modifiers(ev),
)
def mouseMoveEvent(self, ev):
self.pymol.drag(*self._event_x_y_mod(ev))
def mousePressEvent(self, ev, state=0):
if ev.button() not in self._buttonMap:
return
self.pymol.button(self._buttonMap[ev.button()], state,
*self._event_x_y_mod(ev))
def mouseReleaseEvent(self, ev):
self.mousePressEvent(ev, 1)
def wheelEvent(self, ev):
button = get_wheel_button(ev)
if not button:
return
args = self._event_x_y_mod(ev)
self.pymol.button(button, 0, *args)
self.pymol.button(button, 1, *args)
##########################
# OpenGL
##########################
def paintGL(self):
if not USE_QOPENGLWIDGET:
glViewport(0, 0, int(self.fb_scale * self.width()),
int(self.fb_scale * self.height()))
self.pymol.draw()
self._timer.start(0)
def resizeGL(self, w, h):
if USE_QOPENGLWIDGET:
w = int(w * self.fb_scale)
h = int(h * self.fb_scale)
self.pymol.reshape(w, h, True)
def updateFbScale(self, context):
'''Update PyMOL's display scale factor from the window or screen context
@type context: QWindow or QScreen
'''
self.fb_scale = context.devicePixelRatio()
try:
self.cmd.set('display_scale_factor', int(self.fb_scale))
except BaseException as e:
# fails with modal draw (mpng ..., modal=1)
print(e)
def initializeGL(self):
# Scale framebuffer for Retina displays
try:
window = self.windowHandle()
# QOpenGLWidget workaround
if window is None:
window = self.parent().windowHandle()
self.updateFbScale(window)
window.screenChanged.connect(self.updateFbScale)
window.screen().physicalDotsPerInchChanged.connect(
lambda dpi: self.updateFbScale(window))
except AttributeError:
# Fallback for Qt4
pass
def _pymolProcess(self):
idle = self.pymol.idle()
if idle or self.pymol.getRedisplay():
self.update()
self._timer.start(20)
##########################
# drag n drop
##########################
def dragEnterEvent(self, event):
if event.mimeData().hasUrls:
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if event.mimeData().hasUrls:
for url in event.mimeData().urls():
if url.isLocalFile():
url = url.toLocalFile()
else:
url = url.toString()
self.gui.load_dialog(url)
event.accept()
| 28.85259 | 80 | 0.581331 |
4a74662d75408173070d453b0ec4c576c433c7e7 | 1,132 | py | Python | extreme-learning-machine/elm.py | brenoalef/ICA | 8593a1c53ba536538d99dafb9edfb64413088e24 | [
"MIT"
] | 1 | 2019-02-04T18:05:30.000Z | 2019-02-04T18:05:30.000Z | extreme-learning-machine/elm.py | brenoalef/ICA | 8593a1c53ba536538d99dafb9edfb64413088e24 | [
"MIT"
] | null | null | null | extreme-learning-machine/elm.py | brenoalef/ICA | 8593a1c53ba536538d99dafb9edfb64413088e24 | [
"MIT"
] | null | null | null | import numpy as np
class ELM:
def __init__(self, hidden_units=5, activation="log"):
self.hidden_units = hidden_units
self.activation = activation
def __init_weights(self, n_features):
self.h = np.random.normal(size=(n_features, self.hidden_units))
def __activation(self, X):
if self.__activation == "log":
return 1.0/(1.0 + np.exp(-X.astype(float)))
elif self.activation == "relu":
return np.maximum(X, 0, X)
else:
return (1.0 - np.exp(-X))/(1.0 + np.exp(-X))
def __forward(self, X):
H = X.dot(self.h)
H = self.__activation(H)
return H
def fit(self, X, Y):
X = np.hstack((-np.ones((X.shape[0], 1)), X))
self.__init_weights(X.shape[1])
H = self.__forward(X)
H = np.hstack((-np.ones((H.shape[0], 1)), H))
self.w = np.linalg.pinv(H).dot(Y)
return self
def predict(self, X):
X = np.hstack((-np.ones((X.shape[0], 1)), X))
H = self.__forward(X)
H = np.hstack((-np.ones((H.shape[0], 1)), H))
return H.dot(self.w)
| 30.594595 | 71 | 0.536219 |
8ca54025914ae7709085916bc20aa40400ad8a5a | 2,347 | py | Python | h2o-py/h2o/model/multinomial.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | null | null | null | h2o-py/h2o/model/multinomial.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | null | null | null | h2o-py/h2o/model/multinomial.py | voltek62/h2o-3 | d581245120bf0cb6fab2bc7e8273b4f41f461448 | [
"Apache-2.0"
] | null | null | null | from builtins import zip
from ..frame import H2OFrame
from ..h2o import H2OConnection
from .model_base import ModelBase
class H2OMultinomialModel(ModelBase):
def _make_model(self):
return H2OMultinomialModel()
def confusion_matrix(self, data):
"""
Returns a confusion matrix based of H2O's default prediction threshold for a dataset
"""
if not isinstance(data, H2OFrame): raise ValueError("data argument must be of type H2OFrame, but got {0}"
.format(type(data)))
j = H2OConnection.post_json("Predictions/models/" + self._id + "/frames/" + data.frame_id)
return j["model_metrics"][0]["cm"]["table"]
def hit_ratio_table(self, train=False, valid=False, xval=False):
"""
Retrieve the Hit Ratios
If all are False (default), then return the training metric value.
If more than one options is set to True, then return a dictionary of metrics where the keys are "train", "valid",
and "xval"
:param train: If train is True, then return the R^2 value for the training data.
:param valid: If valid is True, then return the R^2 value for the validation data.
:param xval: If xval is True, then return the R^2 value for the cross validation data.
:return: The R^2 for this regression model.
"""
tm = ModelBase._get_metrics(self, train, valid, xval)
m = {}
for k,v in zip(list(tm.keys()),list(tm.values())): m[k] = None if v is None else v.hit_ratio_table()
return list(m.values())[0] if len(m) == 1 else m
def plot(self, timestep="AUTO", metric="AUTO", **kwargs):
"""
Plots training set (and validation set if available) scoring history for an H2OMultinomialModel. The timestep and metric
arguments are restricted to what is available in its scoring history.
:param timestep: A unit of measurement for the x-axis.
:param metric: A unit of measurement for the y-axis.
:return: A scoring history plot.
"""
if self._model_json["algo"] in ("deeplearning", "drf", "gbm"):
if metric == "AUTO": metric = "classification_error"
elif metric not in ("logloss","classification_error","MSE"):
raise ValueError("metric for H2OMultinomialModel must be one of: AUTO, logloss, classification_error, MSE")
self._plot(timestep=timestep, metric=metric, **kwargs)
| 43.462963 | 124 | 0.681295 |
23053bf4d92eecee6b4eba0dd9ab1d217dd11922 | 17,205 | py | Python | numpy/lib/_iotools.py | qpython-android/QPypi-numpy | 4e5fa5c2e01bb6250537fe44f426f878a240bcc7 | [
"BSD-3-Clause"
] | 7 | 2016-11-29T18:01:53.000Z | 2018-08-21T09:45:15.000Z | numpy/lib/_iotools.py | efiring/numpy-work | f873f11f3b96ff859debdc91a2c172a6b476f7db | [
"BSD-3-Clause"
] | 1 | 2016-06-28T14:03:59.000Z | 2016-06-28T14:03:59.000Z | numpy/lib/_iotools.py | qpython-libs/QPypi-numpy | 4e5fa5c2e01bb6250537fe44f426f878a240bcc7 | [
"BSD-3-Clause"
] | 3 | 2016-10-19T00:41:31.000Z | 2020-03-01T04:57:36.000Z | """
A collection of functions designed to help I/O with ascii file.
"""
__docformat__ = "restructuredtext en"
import numpy as np
import numpy.core.numeric as nx
from __builtin__ import bool, int, long, float, complex, object, unicode, str
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def _to_filehandle(fname, flag='r', return_opened=False):
"""
Returns the filehandle corresponding to a string or a file.
If the string ends in '.gz', the file is automatically unzipped.
Parameters
----------
fname : string, filehandle
Name of the file whose filehandle must be returned.
flag : string, optional
Flag indicating the status of the file ('r' for read, 'w' for write).
return_opened : boolean, optional
Whether to return the opening status of the file.
"""
if _is_string_like(fname):
if fname.endswith('.gz'):
import gzip
fhd = gzip.open(fname, flag)
elif fname.endswith('.bz2'):
import bz2
fhd = bz2.BZ2File(fname)
else:
fhd = file(fname, flag)
opened = True
elif hasattr(fname, 'seek'):
fhd = fname
opened = False
else:
raise ValueError('fname must be a string or file handle')
if return_opened:
return fhd, opened
return fhd
def has_nested_fields(ndtype):
"""
Returns whether one or several fields of a structured array are nested.
"""
for name in ndtype.names or ():
if ndtype[name].names:
return True
return False
def flatten_dtype(ndtype):
"""
Unpack a structured data-type.
"""
names = ndtype.names
if names is None:
return [ndtype]
else:
types = []
for field in names:
(typ, _) = ndtype.fields[field]
flat_dt = flatten_dtype(typ)
types.extend(flat_dt)
return types
class LineSplitter:
"""
Defines a function to split a string at a given delimiter or at given places.
Parameters
----------
comment : {'#', string}
Character used to mark the beginning of a comment.
delimiter : var, optional
If a string, character used to delimit consecutive fields.
If an integer or a sequence of integers, width(s) of each field.
autostrip : boolean, optional
Whether to strip each individual fields
"""
def autostrip(self, method):
"Wrapper to strip each member of the output of `method`."
return lambda input: [_.strip() for _ in method(input)]
#
def __init__(self, delimiter=None, comments='#', autostrip=True):
self.comments = comments
# Delimiter is a character
if (delimiter is None) or _is_string_like(delimiter):
delimiter = delimiter or None
_handyman = self._delimited_splitter
# Delimiter is a list of field widths
elif hasattr(delimiter, '__iter__'):
_handyman = self._variablewidth_splitter
idx = np.cumsum([0]+list(delimiter))
delimiter = [slice(i,j) for (i,j) in zip(idx[:-1], idx[1:])]
# Delimiter is a single integer
elif int(delimiter):
(_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter))
else:
(_handyman, delimiter) = (self._delimited_splitter, None)
self.delimiter = delimiter
if autostrip:
self._handyman = self.autostrip(_handyman)
else:
self._handyman = _handyman
#
def _delimited_splitter(self, line):
line = line.split(self.comments)[0].strip()
if not line:
return []
return line.split(self.delimiter)
#
def _fixedwidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
fixed = self.delimiter
slices = [slice(i, i+fixed) for i in range(len(line))[::fixed]]
return [line[s] for s in slices]
#
def _variablewidth_splitter(self, line):
line = line.split(self.comments)[0]
if not line:
return []
slices = self.delimiter
return [line[s] for s in slices]
#
def __call__(self, line):
return self._handyman(line)
class NameValidator:
"""
Validates a list of strings to use as field names.
The strings are stripped of any non alphanumeric character, and spaces
are replaced by `_`. If the optional input parameter `case_sensitive`
is False, the strings are set to upper case.
During instantiation, the user can define a list of names to exclude, as
well as a list of invalid characters. Names in the exclusion list
are appended a '_' character.
Once an instance has been created, it can be called with a list of names
and a list of valid names will be created.
The `__call__` method accepts an optional keyword, `default`, that sets
the default name in case of ambiguity. By default, `default = 'f'`, so
that names will default to `f0`, `f1`
Parameters
----------
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : string, optional
A string combining invalid characters that must be deleted from the names.
casesensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case_sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
"""
#
defaultexcludelist = ['return','file','print']
defaultdeletechars = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
#
def __init__(self, excludelist=None, deletechars=None, case_sensitive=None):
#
if excludelist is None:
excludelist = []
excludelist.extend(self.defaultexcludelist)
self.excludelist = excludelist
#
if deletechars is None:
delete = self.defaultdeletechars
else:
delete = set(deletechars)
delete.add('"')
self.deletechars = delete
if (case_sensitive is None) or (case_sensitive is True):
self.case_converter = lambda x: x
elif (case_sensitive is False) or ('u' in case_sensitive):
self.case_converter = lambda x: x.upper()
elif 'l' in case_sensitive:
self.case_converter = lambda x: x.lower()
else:
self.case_converter = lambda x: x
#
def validate(self, names, default='f'):
#
if names is None:
return
#
validatednames = []
seen = dict()
#
deletechars = self.deletechars
excludelist = self.excludelist
#
case_converter = self.case_converter
#
for i, item in enumerate(names):
item = case_converter(item)
item = item.strip().replace(' ', '_')
item = ''.join([c for c in item if c not in deletechars])
if not len(item):
item = '%s%d' % (default, i)
elif item in excludelist:
item += '_'
cnt = seen.get(item, 0)
if cnt > 0:
validatednames.append(item + '_%d' % cnt)
else:
validatednames.append(item)
seen[item] = cnt+1
return validatednames
#
def __call__(self, names, default='f'):
return self.validate(names, default)
def str2bool(value):
"""
Tries to transform a string supposed to represent a boolean to a boolean.
Raises
------
ValueError
If the string is not 'True' or 'False' (case independent)
"""
value = value.upper()
if value == 'TRUE':
return True
elif value == 'FALSE':
return False
else:
raise ValueError("Invalid boolean")
class StringConverter:
"""
Factory class for function transforming a string into another object (int,
float).
After initialization, an instance can be called to transform a string
into another object. If the string is recognized as representing a missing
value, a default value is returned.
Parameters
----------
dtype_or_func : {None, dtype, function}, optional
Input data type, used to define a basic function and a default value
for missing data. For example, when `dtype` is float, the :attr:`func`
attribute is set to ``float`` and the default value to `np.nan`.
Alternatively, function used to convert a string to another object.
In that later case, it is recommended to give an associated default
value as input.
default : {None, var}, optional
Value to return by default, that is, when the string to be converted
is flagged as missing.
missing_values : {sequence}, optional
Sequence of strings indicating a missing value.
locked : {boolean}, optional
Whether the StringConverter should be locked to prevent automatic
upgrade or not.
Attributes
----------
func : function
Function used for the conversion
default : var
Default value to return when the input corresponds to a missing value.
type : type
Type of the output.
_status : integer
Integer representing the order of the conversion.
_mapper : sequence of tuples
Sequence of tuples (dtype, function, default value) to evaluate in order.
_locked : boolean
Whether the StringConverter is locked, thereby preventing automatic any
upgrade or not.
"""
#
_mapper = [(nx.bool_, str2bool, False),
(nx.integer, int, -1),
(nx.floating, float, nx.nan),
(complex, complex, nx.nan+0j),
(nx.string_, str, '???')]
(_defaulttype, _defaultfunc, _defaultfill) = zip(*_mapper)
#
@classmethod
def _getsubdtype(cls, val):
"""Returns the type of the dtype of the input variable."""
return np.array(val).dtype.type
#
@classmethod
def upgrade_mapper(cls, func, default=None):
"""
Upgrade the mapper of a StringConverter by adding a new function and its
corresponding default.
The input function (or sequence of functions) and its associated default
value (if any) is inserted in penultimate position of the mapper.
The corresponding type is estimated from the dtype of the default value.
Parameters
----------
func : var
Function, or sequence of functions
Examples
--------
>>> import dateutil.parser
>>> import datetime
>>> dateparser = datetutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
# Func is a single functions
if hasattr(func, '__call__'):
cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
return
elif hasattr(func, '__iter__'):
if isinstance(func[0], (tuple, list)):
for _ in func:
cls._mapper.insert(-1, _)
return
if default is None:
default = [None] * len(func)
else:
default = list(default)
default.append([None] * (len(func)-len(default)))
for (fct, dft) in zip(func, default):
cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
#
def __init__(self, dtype_or_func=None, default=None, missing_values=None,
locked=False):
# Defines a lock for upgrade
self._locked = bool(locked)
# No input dtype: minimal initialization
if dtype_or_func is None:
self.func = str2bool
self._status = 0
self.default = default or False
ttype = np.bool
else:
# Is the input a np.dtype ?
try:
self.func = None
ttype = np.dtype(dtype_or_func).type
except TypeError:
# dtype_or_func must be a function, then
if not hasattr(dtype_or_func, '__call__'):
errmsg = "The input argument `dtype` is neither a function"\
" or a dtype (got '%s' instead)"
raise TypeError(errmsg % type(dtype_or_func))
# Set the function
self.func = dtype_or_func
# If we don't have a default, try to guess it or set it to None
if default is None:
try:
default = self.func('0')
except ValueError:
default = None
ttype = self._getsubdtype(default)
# Set the status according to the dtype
_status = -1
for (i, (deftype, func, default_def)) in enumerate(self._mapper):
if np.issubdtype(ttype, deftype):
_status = i
self.default = default or default_def
break
if _status == -1:
# We never found a match in the _mapper...
_status = 0
self.default = default
self._status = _status
# If the input was a dtype, set the function to the last we saw
if self.func is None:
self.func = func
# If the status is 1 (int), change the function to smthg more robust
if self.func == self._mapper[1][1]:
self.func = lambda x : int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
self.missing_values = set([''])
else:
self.missing_values = set(list(missing_values) + [''])
#
self._callingfunction = self._strict_call
self.type = ttype
self._checked = False
#
def _loose_call(self, value):
try:
return self.func(value)
except ValueError:
return self.default
#
def _strict_call(self, value):
try:
return self.func(value)
except ValueError:
if value.strip() in self.missing_values:
if not self._status:
self._checked = False
return self.default
raise ValueError("Cannot convert string '%s'" % value)
#
def __call__(self, value):
return self._callingfunction(value)
#
def upgrade(self, value):
"""
Tries to find the best converter for `value`, by testing different
converters in order.
The order in which the converters are tested is read from the
:attr:`_status` attribute of the instance.
"""
self._checked = True
try:
self._strict_call(value)
except ValueError:
# Raise an exception if we locked the converter...
if self._locked:
raise ValueError("Converter is locked and cannot be upgraded")
_statusmax = len(self._mapper)
# Complains if we try to upgrade by the maximum
if self._status == _statusmax:
raise ValueError("Could not find a valid conversion function")
elif self._status < _statusmax - 1:
self._status += 1
(self.type, self.func, self.default) = self._mapper[self._status]
self.upgrade(value)
#
def update(self, func, default=None, missing_values='', locked=False):
"""
Sets the :attr:`func` and :attr:`default` attributes directly.
Parameters
----------
func : function
Conversion function.
default : {var}, optional
Default value to return when a missing value is encountered.
missing_values : {var}, optional
Sequence of strings representing missing values.
locked : {False, True}, optional
Whether the status should be locked to prevent automatic upgrade.
"""
self.func = func
self._locked = locked
# Don't reset the default to None if we can avoid it
if default is not None:
self.default = default
# Add the missing values to the existing set
if missing_values is not None:
if _is_string_like(missing_values):
self.missing_values.add(missing_values)
elif hasattr(missing_values, '__iter__'):
for val in missing_values:
self.missing_values.add(val)
else:
self.missing_values = []
# Update the type
try:
tester = func('0')
except ValueError:
tester = None
self.type = self._getsubdtype(tester)
| 34.827935 | 82 | 0.584481 |
62bd316a2a62d2b0213ae0a387fa70472ff30edb | 5,236 | py | Python | basicmodule/views.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | basicmodule/views.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | basicmodule/views.py | CocoBir/django-restful-demo | aeb7f8a0bcff5c52b528c7b0c48f87de5f392320 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
basic module management restful design
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:Created: 2016-8-3
:Copyright: (c) 2016<smileboywtu@gmail.com>
"""
from rest_framework import permissions
from rest_framework import status
from rest_framework import viewsets
from rest_framework.response import Response
from models import BasicModule
from serializers import BasicModuleSerializer
from utils.customer_exceptions import (
DBRelyOnException, ObjectNotExistException,
DBIntegrityException, ParamNotEnoughException,
ParamTypeException,
IntegrityError, ObjectDoesNotExist
)
class BasicModuleViewSet(viewsets.ViewSet):
"""
Basic Module management \n
support operation: \n
- list \n
- retrieve \n
- create \n
- update \n
- partial_update \n
- destroy \n
"""
queryset = BasicModule.objects.all()
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
def list(self, request):
"""
get the basic module list
:param request: rest framework request
:return:
"""
try:
index = int(request.query_params.get('index', 0))
except ValueError:
raise ParamTypeException('index')
try:
limit = int(request.query_params.get('limit', 8))
except ValueError:
raise ParamTypeException('limit')
raw = BasicModule.list(index=index, limit=limit)
serializer = BasicModuleSerializer(raw['datalist'], many=True)
raw['datalist'] = serializer.data
return Response(raw)
def create(self, request):
"""
create new entry
:param request: rest framework request
:return: new entry
"""
serializer = BasicModuleSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
# check if exists
exists = BasicModule.objects.filter(
name=serializer.validated_data['name']).exists()
# raise exception when name in use
if exists: raise DBIntegrityException(
serializer.validated_data['name'])
instance = BasicModule.objects.create(
**serializer.validated_data
)
instance.save()
# must create new serializer
return Response(
BasicModuleSerializer(instance).data
)
def retrieve(self, request, pk=None):
"""
get an entry detail by giving the pk key
:param request: rest framework request
:param pk: primary key
:return: rest framework reaponse
"""
if not pk:
raise ParamNotEnoughException('id')
try:
instance = BasicModule.view(pk)
except ObjectDoesNotExist:
raise ObjectNotExistException(pk)
serializer = BasicModuleSerializer(instance)
return Response(serializer.data)
def update(self, request, pk=None):
"""
update a
:param request:
:param pk:
:return:
"""
if not pk:
raise ParamNotEnoughException('id')
try:
instance = BasicModule.view(pk=pk)
except ObjectDoesNotExist:
raise ObjectNotExistException(pk)
serializer = BasicModuleSerializer(data=request.data)
if serializer.is_valid(raise_exception=True):
instance.name = serializer.validated_data['name']
instance.description = serializer.validated_data.get(
'description',
instance.description
)
try:
instance.save()
except IntegrityError:
raise DBIntegrityException(instance.name)
# return save data
return Response(
BasicModuleSerializer(instance).data
)
def partial_update(self, request, pk=None):
"""
not allow patch operation now
:param request:
:param pk:
:return:
"""
return Response(
"currently patch operation not supported.",
status=status.HTTP_405_METHOD_NOT_ALLOWED
)
def destroy(self, request, pk=None):
"""
here must check if the module reference by others
:param instance: instance to be delete
:return: none
"""
if not pk:
raise ParamNotEnoughException('id')
try:
# check if the object is existing
instance = BasicModule.objects.get(pk=pk)
except ObjectDoesNotExist:
raise ObjectNotExistException(pk)
# check if the instance be referenced
rely = instance.vers.count() or instance.envs.count()
# rely = 0 if len(EnvModules.objects.filter(moduleID=instance)[:1]) == 0 and \
# len(VerModules.objects.filter(moduleID=instance)[:1]) == 0 else 1
if rely:
raise DBRelyOnException(pk)
else:
instance.delete()
return Response({
"code": 0,
'message': '{0} delete successfully'.format(pk)
})
| 30.619883 | 88 | 0.584798 |
788d7fe524c3ee02c28d1db2e2c55332fc30c11a | 9,148 | py | Python | test/functional/test_framework/test_node.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | null | null | null | test/functional/test_framework/test_node.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | null | null | null | test/functional/test_framework/test_node.py | farsider350/AUTX-Core | 6d00d1e027a5a6dffb3b0815a155e4515ced007b | [
"MIT"
] | 1 | 2021-01-03T02:35:54.000Z | 2021-01-03T02:35:54.000Z | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for autxd node under test"""
import decimal
import errno
import http.client
import json
import logging
import os
import subprocess
import time
from .authproxy import JSONRPCException
from .mininode import NodeConn
from .util import (
assert_equal,
get_rpc_proxy,
rpc_url,
wait_until,
p2p_port,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class TestNode():
"""A class for representing a autxd node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, dirname, extra_args, rpchost, timewait, binary, stderr, mocktime, coverage_dir):
self.index = i
self.datadir = os.path.join(dirname, "node" + str(i))
self.rpchost = rpchost
if timewait:
self.rpc_timeout = timewait
else:
# Wait for up to 60 seconds for the RPC server to respond
self.rpc_timeout = 60
if binary is None:
self.binary = os.getenv("BITCOIND", "autxd")
else:
self.binary = binary
self.stderr = stderr
self.coverage_dir = coverage_dir
# Most callers will just need to add extra args to the standard list below. For those callers that need more flexibity, they can just set the args property directly.
self.extra_args = extra_args
self.args = [self.binary, "-datadir=" + self.datadir, "-server", "-keypool=1", "-discover=0", "-rest", "-logtimemicros", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-mocktime=" + str(mocktime), "-uacomment=testnode%d" % i]
self.cli = TestNodeCLI(os.getenv("BITCOINCLI", "autx-cli"), self.datadir)
# Don't try auto backups (they fail a lot when running tests)
self.args.append("-createwalletbackups=0")
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.p2ps = []
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection."""
assert self.rpc_connected and self.rpc is not None, "Error: no RPC connection"
return getattr(self.rpc, name)
def start(self, extra_args=None, stderr=None):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
if stderr is None:
stderr = self.stderr
self.process = subprocess.Popen(self.args + extra_args, stderr=stderr)
self.running = True
self.log.debug("autxd started, waiting for RPC to come up")
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the autxd process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
assert self.process.poll() is None, "autxd exited with status %i during initialization" % self.process.returncode
try:
self.rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir)
self.rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
self.rpc_connected = True
self.url = self.rpc.url
self.log.debug("RPC successfully started")
return
except IOError as e:
if e.errno != errno.ECONNREFUSED: # Port not yet open?
raise # unknown IO error
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ValueError as e: # cookie file not found and no rpcuser or rpcassword. autxd still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
raise AssertionError("Unable to connect to autxd")
def get_wallet_rpc(self, wallet_name):
assert self.rpc_connected
assert self.rpc
wallet_path = "wallet/%s" % wallet_name
return self.rpc / wallet_path
def stop_node(self, wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
self.stop(wait=wait)
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert_equal(return_code, 0)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until(self.is_node_stopped, timeout=timeout)
def node_encrypt_wallet(self, passphrase):
""""Encrypts the wallet.
This causes autxd to shutdown, so this method takes
care of cleaning up resources."""
self.encryptwallet(passphrase)
self.wait_until_stopped()
def add_p2p_connection(self, p2p_conn, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
self.p2ps.append(p2p_conn)
kwargs.update({'rpc': self.rpc, 'callback': p2p_conn})
p2p_conn.add_connection(NodeConn(**kwargs))
return p2p_conn
@property
def p2p(self):
"""Return the first p2p connection
Convenience property - most tests only use a single p2p connection to each
node, so this saves having to write node.p2ps[0] many times."""
assert self.p2ps, "No p2p connection"
return self.p2ps[0]
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
# Connection could have already been closed by other end.
if p.connection is not None:
p.connection.disconnect_node()
self.p2ps = []
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir):
self.args = []
self.binary = binary
self.datadir = datadir
self.input = None
def __call__(self, *args, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line args
self.args = [str(arg) for arg in args]
self.input = input
return self
def __getattr__(self, command):
def dispatcher(*args, **kwargs):
return self.send_cli(command, *args, **kwargs)
return dispatcher
def send_cli(self, command, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [str(arg) for arg in args]
named_args = [str(key) + "=" + str(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir] + self.args
if named_args:
p_args += ["-named"]
p_args += [command] + pos_args + named_args
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
return json.loads(cli_stdout, parse_float=decimal.Decimal)
| 38.762712 | 248 | 0.632707 |
fc5aea6207e9c0c3a885c631e44efa548ae0bc1d | 5,815 | py | Python | facebook_business/adobjects/serverside/tests/event_request_test.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 576 | 2018-05-01T19:09:32.000Z | 2022-03-31T11:45:11.000Z | facebook_business/adobjects/serverside/tests/event_request_test.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 217 | 2018-05-03T07:31:59.000Z | 2022-03-29T14:19:52.000Z | facebook_business/adobjects/serverside/tests/event_request_test.py | MyrikLD/facebook-python-business-sdk | a53c8ba0e8f7d0b41b385c60089f6ba00fa5c814 | [
"CNRI-Python"
] | 323 | 2018-05-01T20:32:26.000Z | 2022-03-29T07:05:12.000Z | # Copyright 2014 Facebook, Inc.
# You are hereby granted a non-exclusive, worldwide, royalty-free license to
# use, copy, modify, and distribute this software in source code or binary
# form for use in connection with the web services and APIs provided by
# Facebook.
# As with any software that integrates with the Facebook platform, your use
# of this software is subject to the Facebook Developer Principles and
# Policies [http://developers.facebook.com/policy/]. This copyright notice
# shall be included in all copies or substantial portions of the software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import json
import time
from unittest import TestCase
from unittest.mock import patch, Mock
from facebook_business import FacebookAdsApi
from facebook_business.adobjects.serverside.event import Event
from facebook_business.adobjects.serverside.event_request import EventRequest
from facebook_business.adobjects.serverside.event_response import EventResponse
from facebook_business.adobjects.serverside.http_method import HttpMethod
from facebook_business.adobjects.serverside.http_service_interface import HttpServiceInterface
from facebook_business.adobjects.serverside.request_options import RequestOptions
from facebook_business.adobjects.serverside.util import Util
from facebook_business.session import FacebookSession
class EventRequestTest(TestCase):
@patch('facebook_business.adobjects.serverside.event_request.AdsPixel')
def test_constructor(self, pixel_mock):
event = Event(event_name='Purchase', event_time=int(time.time()))
expected_event = json.dumps(
{'event_name': event.event_name, 'event_time': event.event_time}
)
pixel_id = 'pixel123'
expected_data = {
'data': [expected_event],
'test_event_code': 'test-code-1',
'namespace_id': '222',
'upload_id': '333',
'upload_tag': 'upload-tag4',
'upload_source': 'upload-source5',
'partner_agent': 'partner-agent-6',
}
event_request = EventRequest(
pixel_id=pixel_id,
events=[event],
test_event_code=expected_data['test_event_code'],
namespace_id=expected_data['namespace_id'],
upload_id=expected_data['upload_id'],
upload_tag=expected_data['upload_tag'],
upload_source=expected_data['upload_source'],
partner_agent=expected_data['partner_agent'],
)
ads_pixel = {
'events_received': 2,
'fbtrace_id': 'traceid1',
'messages': ['1', '2'],
}
expected_event_response = EventResponse(
events_received=2, fbtrace_id='traceid1', messages=['1', '2']
)
pixel_instance_mock = pixel_mock.return_value
pixel_instance_mock.create_event.return_value = ads_pixel
actual_event_response = event_request.execute()
pixel_mock.assert_called_with(pixel_id)
pixel_instance_mock.create_event.assert_called_with(
fields=[], params=expected_data
)
self.assertEqual(actual_event_response, expected_event_response)
def test_http_client(self):
mock_http_client = Mock(HttpServiceInterface)
event = Event(event_name='Purchase', event_time=int(time.time()))
expected_event = json.dumps(
{'event_name': event.event_name, 'event_time': event.event_time}
)
access_token = 'access-token-0'
pixel_id = 'pixel123'
appsecret = 'app-secret-234'
appsecret_proof = Util.appsecret_proof(appsecret, access_token)
expected_params = {
'data': [expected_event],
'test_event_code': 'test-code-1',
'namespace_id': '222',
'upload_id': '333',
'upload_tag': 'upload-tag4',
'upload_source': 'upload-source5',
'access_token': access_token,
'appsecret_proof': appsecret_proof,
}
event_request = EventRequest(
pixel_id=pixel_id,
events=[event],
test_event_code=expected_params['test_event_code'],
namespace_id=expected_params['namespace_id'],
upload_id=expected_params['upload_id'],
upload_tag=expected_params['upload_tag'],
upload_source=expected_params['upload_source'],
http_client=mock_http_client,
access_token=access_token,
appsecret=appsecret
)
expected_event_response = EventResponse(
events_received=2, fbtrace_id='traceid1', messages=['1', '2']
)
mock_http_client.execute.return_value = expected_event_response
expected_headers = FacebookAdsApi.HTTP_DEFAULT_HEADERS
expected_url = '%s/%s/%s/events' % (FacebookSession.GRAPH, FacebookAdsApi.API_VERSION, pixel_id)
expected_request_options = RequestOptions(
ca_bundle_path=Util.ca_bundle_path(),
)
actual_event_response = event_request.execute()
mock_http_client.execute.assert_called_with(
url=expected_url,
method=HttpMethod.POST,
request_options=expected_request_options,
headers=expected_headers,
params=expected_params,
)
self.assertEqual(actual_event_response, expected_event_response)
| 43.721805 | 104 | 0.685125 |
7678b49ecd2d727ffd79eb4c1a5297d3082727f5 | 3,264 | py | Python | tests/test_s3_hashfs.py | danielhfrank/cas-manifest | e78b190d68eac6c22b9c1576ec9807b8b54ebf68 | [
"MIT"
] | 2 | 2021-02-03T02:52:11.000Z | 2021-02-10T18:27:49.000Z | tests/test_s3_hashfs.py | danielhfrank/cas-manifest | e78b190d68eac6c22b9c1576ec9807b8b54ebf68 | [
"MIT"
] | null | null | null | tests/test_s3_hashfs.py | danielhfrank/cas-manifest | e78b190d68eac6c22b9c1576ec9807b8b54ebf68 | [
"MIT"
] | null | null | null | from io import StringIO
import os
from mock import patch
from pathlib import Path
import tempfile
import boto3
from moto import mock_s3
import pytest
from cas_manifest.s3_hashfs import S3HashFS, S3CasInfo, get_extension
BUCKET = 'facet-models-test'
@pytest.fixture(scope='function')
def aws_credentials():
"""Mocked AWS Credentials for moto."""
os.environ['AWS_ACCESS_KEY_ID'] = 'testing'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'testing'
os.environ['AWS_SECURITY_TOKEN'] = 'testing'
os.environ['AWS_SESSION_TOKEN'] = 'testing'
@pytest.fixture(scope='function')
def s3(aws_credentials):
with mock_s3():
yield boto3.client('s3', region_name='us-east-1')
@pytest.fixture
def s3_conn(s3):
s3.create_bucket(Bucket=BUCKET)
yield s3
@pytest.fixture
def fs(s3_conn, tmpdir):
cas_info = S3CasInfo(BUCKET, 'cas')
yield S3HashFS(Path(tmpdir), s3_conn, cas_info)
def test_s3_hashfs(fs, s3_conn):
contents = "DFDFDF"
buf = StringIO(contents)
buf.seek(0)
addr = fs.put(buf)
retrieved = fs.open(addr.id, mode='r').read()
assert(retrieved == contents)
with tempfile.TemporaryDirectory() as tmpdir2:
# Create another fs instance with a different local dir, confirm we can read
# the remote path
fs2 = S3HashFS(Path(tmpdir2), s3_conn, fs.s3_cas_info)
retrieved2 = fs2.open(addr.id, mode='r').read()
assert(retrieved2 == contents)
def test_empty_s3_hashfs(fs):
# First show that `get` will return None on a missing key instead of throwing an error
get_res = fs.get('asdf')
assert(get_res is None)
# Next show that `open` will return an IOError
with pytest.raises(IOError):
fs.open('asdfasd')
@pytest.mark.parametrize("extension", ('.txt', 'txt'))
def test_extensions(fs, extension):
# Behavior should be that when we `put` a file with an extension, subsequent `get`
# requests will yield a HashAddress with the same extension.
# The test parametrization checks for cases where the user does and does not supply a leading .
contents = "DFDFDF"
buf = StringIO(contents)
buf.seek(0)
put_addr = fs.put(buf, extension=extension)
# Remove from local cache to ensure that we send lookup to s3
Path(put_addr.abspath).unlink()
get_addr = fs.get(put_addr.id)
assert(put_addr.abspath == get_addr.abspath)
def test_no_double_upload(fs):
contents = "DFDFDF"
buf = StringIO(contents)
buf.seek(0)
fs.put(buf)
# Now, try putting the same object, and ensure that we don't upload again
with patch.object(fs.s3_conn, 'upload_file') as mock_upload:
buf.seek(0)
fs.put(buf)
mock_upload.assert_not_called()
def test_subdirs(fs, s3_conn):
contents = "DFDFDF"
buf = StringIO(contents)
buf.seek(0)
addr = fs.put(buf)
retrieved = fs.open(addr.id, mode='r').read()
assert(retrieved == contents)
with tempfile.TemporaryDirectory() as tmpdir2:
# Create another fs instance with a different local dir, confirm we can read
# the remote path
fs2 = S3HashFS(Path(tmpdir2) / 'my_subdir', s3_conn, fs.s3_cas_info)
retrieved2 = fs2.open(addr.id, mode='r').read()
assert(retrieved2 == contents)
| 30.222222 | 99 | 0.685049 |
c4ddfb2b1b3c57b4975cac3dc048e1310aa10772 | 2,033 | py | Python | examples/Pipeline/simple_web_service/web_service_java.py | avr248/Serving | bd12b303d3e490278dd94461fa8f70dc24c81ec0 | [
"Apache-2.0"
] | 789 | 2019-04-05T09:20:46.000Z | 2022-03-31T13:43:54.000Z | examples/Pipeline/simple_web_service/web_service_java.py | avr248/Serving | bd12b303d3e490278dd94461fa8f70dc24c81ec0 | [
"Apache-2.0"
] | 1,195 | 2019-04-08T10:05:28.000Z | 2022-03-31T03:43:42.000Z | examples/Pipeline/simple_web_service/web_service_java.py | avr248/Serving | bd12b303d3e490278dd94461fa8f70dc24c81ec0 | [
"Apache-2.0"
] | 229 | 2019-04-05T09:20:57.000Z | 2022-03-30T06:21:22.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle_serving_server.web_service import WebService, Op
import logging
import numpy as np
from numpy import array
import sys
import base64
_LOGGER = logging.getLogger()
np.set_printoptions(threshold=sys.maxsize)
class UciOp(Op):
def init_op(self):
self.separator = ","
def preprocess(self, input_dicts, data_id, log_id):
"""
diff with web_server.py
javaclient input type is INDArray, restful request input is list.
this function simply reshape input to the Specified shape.
"""
(_, input_dict), = input_dicts.items()
_LOGGER.error("UciOp::preprocess >>> log_id:{}, input:{}".format(
log_id, input_dict))
proc_dict = {}
x_value = eval(input_dict["x"])
input_dict["x"] = x_value.reshape(1, 13)
return input_dict, False, None, ""
def postprocess(self, input_dicts, fetch_dict, data_id, log_id):
_LOGGER.info(
"UciOp::postprocess >>> data_id:{}, log_id:{}, fetch_dict:{}".
format(data_id, log_id, fetch_dict))
fetch_dict["price"] = str(fetch_dict["price"][0][0])
return fetch_dict, None, ""
class UciService(WebService):
def get_pipeline_response(self, read_op):
uci_op = UciOp(name="uci", input_ops=[read_op])
return uci_op
uci_service = UciService(name="uci")
uci_service.prepare_pipeline_config("config.yml")
uci_service.run_service()
| 33.327869 | 74 | 0.692573 |
111facb547c8aa30a373b9f1b208b58f437100fa | 1,024 | py | Python | src/Gon/history_starter_cnstock.py | majiajue/Listed-company-news-crawl-and-text-analysis | fd3b23814039cbe8fbb2e25cbadb68238e0d998b | [
"MIT"
] | 635 | 2018-02-25T08:45:06.000Z | 2022-03-30T10:05:23.000Z | src/Gon/history_starter_cnstock.py | NongMaYiSheng/Listed-company-news-crawl-and-text-analysis | fd3b23814039cbe8fbb2e25cbadb68238e0d998b | [
"MIT"
] | 5 | 2018-10-29T16:21:28.000Z | 2022-01-03T12:59:28.000Z | src/Gon/history_starter_cnstock.py | NongMaYiSheng/Listed-company-news-crawl-and-text-analysis | fd3b23814039cbe8fbb2e25cbadb68238e0d998b | [
"MIT"
] | 216 | 2018-02-26T09:27:15.000Z | 2022-03-30T10:05:26.000Z | import __init__
import time
import logging
from Kite import config
from Killua.denull import DeNull
from Killua.deduplication import Deduplication
from Killua.buildstocknewsdb import GenStockNewsDB
from Gon.cnstockspyder import CnStockSpyder
# 1. 爬取历史数据
cnstock_spyder = CnStockSpyder(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK)
for url_to_be_crawled, type_chn in config.WEBSITES_LIST_TO_BE_CRAWLED_CNSTOCK.items():
logging.info("start crawling {} ...".format(url_to_be_crawled))
cnstock_spyder.get_historical_news(url_to_be_crawled, category_chn=type_chn)
logging.info("finished ...")
time.sleep(30)
# 2. 针对历史数据进行去重清洗
Deduplication(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK).run()
# 3. 将历史数据中包含null值的行去掉
DeNull(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK).run()
# 4. 创建新的数据库,针对每一个股票,将所有涉及该股票的新闻都保存在新的数据库,并贴好"利好","利空"和"中性"标签
gen_stock_news_db = GenStockNewsDB()
gen_stock_news_db.get_all_news_about_specific_stock(config.DATABASE_NAME, config.COLLECTION_NAME_CNSTOCK)
| 31.030303 | 105 | 0.825195 |
4795a2d386857866a65818fe49b0c9939b6447fd | 16,858 | py | Python | python/tvm/relay/build_module.py | domin1985/tvm | 7dd6d6e38142cd22567714b57e56c4f8b1937154 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/relay/build_module.py | domin1985/tvm | 7dd6d6e38142cd22567714b57e56c4f8b1937154 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | python/tvm/relay/build_module.py | domin1985/tvm | 7dd6d6e38142cd22567714b57e56c4f8b1937154 | [
"Zlib",
"Unlicense",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"ECL-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Construct the necessary state for the TVM graph executor
from a Relay expression.
"""
import warnings
import numpy as np
from tvm.ir import IRModule
from tvm.ir.transform import PassContext
from tvm.tir import expr as tvm_expr
from .. import nd as _nd, autotvm, register_func
from ..target import Target
from ..contrib import graph_executor as _graph_rt
from . import _build_module
from . import ty as _ty
from . import expr as _expr
from . import function as _function
from .transform import InferType
from .backend import graph_executor_factory as _graph_executor_factory
from .backend import interpreter as _interpreter
from .backend.vm import VMExecutor
def _update_target(target):
target = target if target else Target.current()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
tgts = {}
if isinstance(target, (str, Target)):
dev_type = tvm_expr.IntImm("int32", _nd.device(str(target)).device_type)
tgts[dev_type] = Target(target)
elif isinstance(target, dict):
for dev, tgt in target.items():
dev_type = tvm_expr.IntImm("int32", _nd.device(dev).device_type)
tgts[dev_type] = Target(tgt)
else:
raise TypeError(
"target is expected to be str or "
+ "tvm.target.Target, but received "
+ "{}".format(type(target))
)
return tgts
def _convert_param_map(params):
inputs = {}
for name, param in params.items():
if isinstance(param, np.ndarray):
param = _nd.array(param)
inputs[name] = _expr.const(param)
return inputs
class BuildModule(object):
"""Build an IR module to run on TVM graph executor. This class is used
to expose the `RelayBuildModule` APIs implemented in C++.
"""
def __init__(self):
self.mod = _build_module._BuildModule()
self._get_graph_json = self.mod["get_graph_json"]
self._get_module = self.mod["get_module"]
self._build = self.mod["build"]
self._optimize = self.mod["optimize"]
self._set_params_func = self.mod["set_params"]
self._get_params_func = self.mod["get_params"]
def build(self, mod, target=None, target_host=None, params=None):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IRModule to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
to setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
factory_module : tvm.relay.backend.graph_executor_factory.GraphExecutorFactoryModule
The runtime factory for the TVM graph executor.
"""
target = _update_target(target)
# Setup the params.
if params:
self._set_params(params)
# Build the IR module. If auto_scheduler is not enabled,
# then use the TOPI-defined schedule.
use_auto_scheduler = PassContext.current().config.get(
"relay.backend.use_auto_scheduler", False
)
# Turn off AutoTVM config not found warnings if auto_scheduler is enabled.
old_autotvm_silent = autotvm.GLOBAL_SCOPE.silent
autotvm.GLOBAL_SCOPE.silent = use_auto_scheduler
self._build(mod, target, target_host)
autotvm.GLOBAL_SCOPE.silent = old_autotvm_silent
# Get artifacts
graph_json = self.get_json()
mod = self.get_module()
params = self.get_params()
return graph_json, mod, params
def optimize(self, mod, target=None, params=None):
"""
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The IR module to build.
target : str, :any:`tvm.target.Target`, or dict of str(i.e.
device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context
to target mapping. For homogeneous compilation, it is a build target.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
target = _update_target(target)
# Setup the params.
if params:
self._set_params(params)
mod = self._optimize(mod, target)
# Get artifacts
params = self.get_params()
return mod, params
def _set_params(self, params):
self._set_params_func(_convert_param_map(params))
def get_json(self):
"""Return the json file of the built program."""
return self._get_graph_json()
def get_module(self):
"""Return the built module."""
return self._get_module()
def get_params(self):
"""Return the updated weights."""
params = self._get_params_func()
ret = {}
for key, value in params.items():
ret[key] = value.data
return ret
@register_func("tvm.relay.module_export_library")
def _module_export(module, file_name): # fcompile, addons, kwargs?
return module.export_library(file_name)
@register_func("tvm.relay.build")
def _build_module_no_factory(mod, target=None, target_host=None, params=None, mod_name="default"):
"""A wrapper around build which discards the Python GraphFactoryRuntime.
This wrapper is suitable to be used from other programming languages as
the runtime::Module can be freely passed between language boundaries.
"""
return build(mod, target, target_host, params, mod_name).module
def build(ir_mod, target=None, target_host=None, params=None, mod_name="default"):
# fmt: off
# pylint: disable=line-too-long
"""Helper function that builds a Relay function to run on TVM graph executor.
Parameters
----------
ir_mod : :py:class:`~tvm.IRModule`
The IR module to build. Using relay.Function is deprecated.
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context to
target mapping. For homogeneous compilation, it is a build target.
target_host : str or :any:`tvm.target.Target`, optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
mod_name: Optional[str]
The module name we will build
Returns
-------
graph_json : str
The json string that can be accepted by graph executor.
mod : tvm.Module
The module containing necessary libraries.
params : dict
The parameters of the final graph.
"""
# pylint: enable=line-too-long
# fmt: on
if not isinstance(ir_mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(ir_mod, _function.Function):
if params:
ir_mod = bind_params_by_name(ir_mod, params)
ir_mod = IRModule.from_expr(ir_mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter mod (tvm.relay.function.Function)",
DeprecationWarning,
)
target = _update_target(target)
if isinstance(target_host, (str, Target)):
target_host = Target(target_host)
elif target_host:
raise ValueError("target host must be the type of str, " + "tvm.target.Target, or None")
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
graph_json, runtime_mod, params = bld_mod.build(ir_mod, target, target_host, params)
executor_factory = _graph_executor_factory.GraphExecutorFactoryModule(
ir_mod, target, graph_json, runtime_mod, mod_name, params
)
return executor_factory
def optimize(mod, target=None, params=None):
"""Helper function that optimizes a Relay module.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to build. Using relay.Function is deprecated.
target : str, :any:`tvm.target.Target`, or dict of str(i.e. device/context
name) to str/tvm.target.Target, optional
For heterogeneous compilation, it is a dictionary indicating context to
target mapping. For homogeneous compilation, it is a build target.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
mod : :py:class:`~tvm.IRModule`
The optimized relay module.
params : dict
The parameters of the final graph.
"""
if not isinstance(mod, (IRModule, _function.Function)):
raise ValueError("Type of input parameter mod must be tvm.IRModule")
if isinstance(mod, _function.Function):
if params:
mod = bind_params_by_name(mod, params)
mod = IRModule.from_expr(mod)
warnings.warn(
"Please use input parameter mod (tvm.IRModule) "
"instead of deprecated parameter func (tvm.relay.function.Function)",
DeprecationWarning,
)
target = _update_target(target)
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(list(target.values()))
else:
tophub_context = autotvm.utils.EmptyContext()
with tophub_context:
bld_mod = BuildModule()
mod, params = bld_mod.optimize(mod, target, params)
return mod, params
def bind_params_by_name(func, params):
"""Bind params to function by name.
This could be useful when assembling custom Relay optimization
passes that involve constant folding.
Parameters
----------
func : relay.Function
The function to bind parameters to.
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for constant folding.
Returns
-------
func : relay.Function
The function with parameters bound
"""
inputs = _convert_param_map(params)
return _build_module.BindParamsByName(func, inputs)
class GraphExecutor(_interpreter.Executor):
"""Wrapper around Executor interface.
This executor is used for debug and testing purpoes.
Parameters
----------
mod : :py:class:`~tvm.IRModule`
The module to support the execution.
device : :py:class:`Device`
The runtime device to run the code on.
target : :py:class:`Target`
The target option to build the function.
"""
def __init__(self, mod, device, target):
assert mod is not None
self.mod = mod
self.device = device
self.target = target
def _make_executor(self, expr=None):
if expr:
self.mod["main"] = expr
self.mod = InferType()(self.mod)
ret_type = self.mod["main"].checked_type.ret_type
if _ty.is_dynamic(ret_type):
raise ValueError(
"Graph Executor only supports static graphs, got output type", ret_type
)
mod = build(self.mod, target=self.target)
gmodule = _graph_rt.GraphModule(mod["default"](self.device))
def _unflatten(flat_iter, cur_type):
if isinstance(cur_type, _ty.TensorType):
return next(flat_iter)
if isinstance(cur_type, _ty.TupleType):
fields = []
for field_type in cur_type.fields:
field = _unflatten(flat_iter, field_type)
fields.append(field)
return fields
raise ValueError("Return type", ret_type, "contains unsupported type", cur_type)
def _graph_wrapper(*args, **kwargs):
args = self._convert_args(self.mod["main"], args, kwargs)
# Create map of inputs.
for i, arg in enumerate(args):
gmodule.set_input(i, arg)
# Run the module, and fetch the output.
gmodule.run()
flattened = []
for i in range(gmodule.get_num_outputs()):
flattened.append(gmodule.get_output(i).copyto(_nd.cpu(0)))
unflattened = _unflatten(iter(flattened), ret_type)
return unflattened
return _graph_wrapper
def create_executor(kind="debug", mod=None, device=None, target="llvm"):
"""Factory function to create an executor.
Example
-------
.. code-block:: python
import tvm.relay
import numpy as np
x = tvm.relay.var("x", tvm.relay.TensorType([1], dtype="float32"))
expr = tvm.relay.add(x, tvm.relay.Constant(tvm.nd.array(np.array([1], dtype="float32"))))
tvm.relay.create_executor(
kind="vm", mod=tvm.IRModule.from_expr(tvm.relay.Function([x], expr))
).evaluate()(np.array([2], dtype="float32"))
# returns `array([3.], dtype=float32)`
Parameters
----------
kind : str
The type of executor. Avaliable options are `debug` for the
interpreter, `graph` for the graph executor, and `vm` for the virtual
machine.
mod : :py:class:`~tvm.IRModule`
The Relay module containing collection of functions
device : :py:class:`Device`
The device to execute the code.
target : :py:class:`tvm.Target`
The corresponding context
Returns
-------
executor : :py:class:`~tvm.relay.backend.interpreter.Executor`
"""
if mod is None:
mod = IRModule()
if device is not None:
assert device.device_type == _nd.device(str(target), 0).device_type
else:
device = _nd.device(str(target), 0)
if isinstance(target, str):
target = Target(target)
if kind == "debug":
return _interpreter.Interpreter(mod, device, target)
if kind == "graph":
return GraphExecutor(mod, device, target)
if kind == "vm":
return VMExecutor(mod, device, target)
raise RuntimeError("unknown execution strategy: {0}".format(kind))
| 35.047817 | 119 | 0.649721 |
1b3e8897edba9c9ecf25a0126fd761a575fc76ac | 3,915 | py | Python | beetsplug/syncpl.py | lrnt/beets-syncpl | 9cee89cb47510c48eb26f86cff2a6aa7be6ccb29 | [
"MIT"
] | 6 | 2015-02-02T22:38:59.000Z | 2018-05-04T23:39:19.000Z | beetsplug/syncpl.py | lrnt/beets-syncpl | 9cee89cb47510c48eb26f86cff2a6aa7be6ccb29 | [
"MIT"
] | null | null | null | beetsplug/syncpl.py | lrnt/beets-syncpl | 9cee89cb47510c48eb26f86cff2a6aa7be6ccb29 | [
"MIT"
] | null | null | null | from beets.plugins import BeetsPlugin
from beets import ui, config, library
from os import fsync
from os.path import isdir, isfile, join, relpath
from tempfile import NamedTemporaryFile
from subprocess import Popen, STDOUT, PIPE
from shlex import split as shsplit
def syncpl(lib, opts, args):
config['syncpl'].set_args(opts)
if args:
config['syncpl']['dest'] = args[0]
if not config['syncpl']['dest']:
raise ui.UserError(u'no destination path specified')
if not isdir(config['syncpl']['dest'].get()):
raise ui.UserError(u'invalid destination path')
if not config['syncpl']['playlists'].get() and \
not config['syncpl']['queries'].get():
raise ui.UserError(u'nothing to sync')
if config['syncpl']['playlists'].get():
if not config['syncpl']['playlist_dir'].get():
raise ui.UserError(u'no playlist_dir specified')
if not isdir(config['syncpl']['playlist_dir'].get()):
raise ui.UserError(u'invalid playlist_dir')
items = set()
paths = set()
# Retrieve the playlist items to sync
for playlist in config['syncpl']['playlists'].as_str_seq():
pl_path = join(config['syncpl']['playlist_dir'].get(), playlist)
if not isfile(pl_path):
raise ui.UserError(u'playlist not found: ' + playlist)
with open(pl_path, 'r') as f:
for path in f.readlines():
full_path = join(config['directory'].get(),
path.strip('\n').decode('utf-8'))
items.update(lib.items(query=u'path:"%s"' % full_path))
if config['syncpl']['include_playlist']:
paths.add(playlist.encode('utf-8'))
# Retrieve the query items to sync
for query in config['syncpl']['queries'].as_str_seq():
items.update(lib.items(query=query))
# Retrieve the track and album art paths
for item in items:
paths.add(relpath(item.path, config['directory'].get().encode('utf-8')))
if item.get_album().artpath:
paths.add(relpath(item.get_album().artpath,
config['directory'].get()))
# Write the paths to a reference file for rsync
with NamedTemporaryFile() as tmp:
tmp.write('\n'.join(paths))
tmp.flush()
fsync(tmp.fileno())
args = shsplit('rsync -amv --include="*/" --include-from=%s \
--exclude="*"' % tmp.name)
# Append delete option if specified
if config['syncpl']['delete']:
args.append('--delete-excluded')
# Append source(s) to command args
args.append(join(config['directory'].get(), ''))
if config['syncpl']['playlist_dir']:
args.append(join(config['syncpl']['playlist_dir'].get(), ''))
# Append destinations to command args
args.append(config['syncpl']['dest'].get())
# Run rsync and print progress
cmd = Popen(args, stdout=PIPE, stderr=STDOUT)
for line in iter(cmd.stdout.readline, ""):
ui.print_(line.strip('\n'))
class SyncplPlugin(BeetsPlugin):
def __init__(self):
super(SyncplPlugin, self).__init__()
self.config.add({
u'dest': None,
u'playlist_dir': None,
u'include_playlist': True,
u'delete': False,
u'playlists': [],
u'queries': [],
})
def commands(self):
cmd = ui.Subcommand('syncpl', help='sync music files to a folder')
cmd.parser.add_option(
'-d', '--delete', action='store_true', default=None,
help="delete anything unspecfied in the destination folder")
cmd.parser.add_option(
'-D', '--nodelete', action='store_false', dest='delete',
help="don't delete anything unspecified in the destination folder")
cmd.func = syncpl
return [cmd]
| 34.646018 | 80 | 0.588761 |
8221072b6b2838776330b12779cbd6f0e058f072 | 5,586 | py | Python | fast_to_sql/fast_to_sql.py | jdglaser/fast_to_SQL | 71b38ef8df7e7efeb79793394e94ca3988601140 | [
"MIT"
] | 20 | 2019-09-04T10:15:20.000Z | 2022-02-26T12:04:39.000Z | fast_to_sql/fast_to_sql.py | jdglaser/fast_to_SQL | 71b38ef8df7e7efeb79793394e94ca3988601140 | [
"MIT"
] | 16 | 2019-04-23T07:49:11.000Z | 2021-07-26T15:28:56.000Z | fast_to_sql/fast_to_sql.py | jdglaser/fast_to_SQL | 71b38ef8df7e7efeb79793394e94ca3988601140 | [
"MIT"
] | 6 | 2019-07-30T10:33:53.000Z | 2021-05-13T20:55:40.000Z | """Main script that holds logic for fast_to_sql
"""
from __future__ import absolute_import
import pandas as pd
import numpy as np
import pyodbc
from . import errors
# Global
DTYPE_MAP = {
"int64": "int",
"float64": "float",
"object": "varchar(255)",
"datetime64[ns]": "datetime2",
"bool": "bit"
}
def _check_duplicate_cols(df):
"""Returns duplicate column names (case insensitive)
"""
cols = [c.lower() for c in df.columns]
dups = [x for x in cols if cols.count(x) > 1]
if dups:
raise errors.DuplicateColumns(f"There are duplicate column names. Repeated names are: {dups}. SQL Server dialect requires unique names (case insensitive).")
def _clean_col_name(column):
"""Removes special characters from column names
"""
column = str(column).replace(" ", "_").replace("(","").replace(")","").replace("[","").replace("]","")
column = f"[{column}]"
return column
def _clean_custom(df, custom):
"""Validate and clean custom columns
"""
for k in list(custom):
clean_col = _clean_col_name(k)
if clean_col not in df.columns:
raise errors.CustomColumnException(f"Custom column {k} is not in the dataframe.")
custom[clean_col] = custom.pop(k)
return custom
def _get_data_types(df, custom):
"""Get data types for each column as dictionary
Handles default data type assignment and custom data types
"""
data_types = {}
for c in list(df.columns):
if c in custom:
data_types[c] = custom[c]
continue
dtype = str(df[c].dtype)
if dtype not in DTYPE_MAP:
data_types[c] = "varchar(255)"
else:
data_types[c] = DTYPE_MAP[dtype]
return data_types
def _get_default_schema(cur: pyodbc.Cursor) -> str:
"""Get the default schema of the caller
"""
return str(cur.execute("select SCHEMA_NAME() as scm").fetchall()[0][0])
def _get_schema(cur: pyodbc.Cursor, table_name: str):
"""Get schema and table name - returned as tuple
"""
t_spl = table_name.split(".")
if len(t_spl) > 1:
return t_spl[0], ".".join(t_spl[1:])
else:
return _get_default_schema(cur), table_name
def _clean_table_name(table_name):
"""Cleans the table name
"""
return table_name.replace("'","''")
def _check_exists(cur,schema,table,temp):
"""Check in conn if table exists
"""
if temp:
return cur.execute(
f"IF OBJECT_ID('tempdb..#[{table}]') IS NOT NULL select 1 else select 0"
).fetchall()[0][0]
else:
return cur.execute(
f"IF EXISTS (SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = '{table}' and TABLE_SCHEMA = '{schema}') select 1 else select 0"
).fetchall()[0][0]
def _generate_create_statement(schema, table, cols, temp):
"""Generates a create statement
"""
cols = ",".join([f'\n\t{k} {v}' for k, v in cols.items()])
schema_if_temp = f"[#{table}]" if temp else f"[{schema}].[{table}]"
return f"create table {schema_if_temp}\n({cols}\n)"
def _check_parameter_if_exists(if_exists):
"""Raises an error if parameter 'if_exists' is not correct
"""
if if_exists not in ('append', 'fail', 'replace'):
raise errors.WrongParam(f"Incorrect parameter value {if_exists} for 'if_exists'. Can be 'append', 'fail', or 'replace'")
def fast_to_sql(df, name, conn, if_exists='append', custom=None, temp=False, copy=False):
"""Main fast_to_sql function.
Writes pandas dataframe to sql using pyodbc fast_executemany
"""
if copy:
df = df.copy()
# Assign null custom
if custom is None:
custom = {}
# Handle series
if isinstance(df, pd.Series):
df = df.to_frame()
# Clean table name
name = _clean_table_name(name)
# Clean columns
columns = [_clean_col_name(c) for c in list(df.columns)]
df.columns = columns
# Check for duplicate column names
_check_duplicate_cols(df)
custom = _clean_custom(df, custom)
# Assign data types
data_types = _get_data_types(df, custom)
# Get schema
cur = conn.cursor()
schema, name = _get_schema(cur, name)
if schema == '':
schema = cur.execute("SELECT SCHEMA_NAME()").fetchall()[0][0]
exists = _check_exists(cur, schema, name, temp)
# Handle existing table
create_statement = ''
if exists:
_check_parameter_if_exists(if_exists)
if if_exists == "replace":
cur.execute(f"drop table [{schema}].[{name}]")
create_statement = _generate_create_statement(schema, name, data_types, temp)
cur.execute(create_statement)
elif if_exists == "fail":
fail_msg = f"Table [{schema}].[{name}] already exists." if temp else f"Temp table #[{name}] already exists in this connection"
raise errors.FailError(fail_msg)
else:
create_statement = _generate_create_statement(schema, name, data_types, temp)
cur.execute(create_statement)
# Run insert
if temp:
insert_sql = f"insert into [#{name}] values ({','.join(['?' for v in data_types])})"
else:
insert_sql = f"insert into [{schema}].[{name}] values ({','.join(['?' for v in data_types])})"
insert_cols = df.values.tolist()
insert_cols = [[None if type(cell) == float and np.isnan(cell) else cell for cell in row] for row in insert_cols]
cur.fast_executemany = True
cur.executemany(insert_sql, insert_cols)
cur.close()
return create_statement
| 32.858824 | 164 | 0.629073 |
21cd8f2ac03551e98e87f12d543b27cac8207b09 | 4,547 | py | Python | configs/representation/archive/uvc_lab/uvc2_r18_diff_coord_video_2x8x1_50e_kinetics400_lab.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/archive/uvc_lab/uvc2_r18_diff_coord_video_2x8x1_50e_kinetics400_lab.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | configs/representation/archive/uvc_lab/uvc2_r18_diff_coord_video_2x8x1_50e_kinetics400_lab.py | happywu/mmaction2-CycleContrast | 019734e471dffd1161b7a9c617ba862d2349a96c | [
"Apache-2.0"
] | null | null | null | # model settings
temperature = 0.01
with_norm = True
model = dict(
type='UVCTrackerV2',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
strides=(1, 2, 1, 1),
norm_eval=False,
zero_init_residual=True),
cls_head=dict(
type='UVCHead',
loss_feat=dict(type='CosineSimLoss'),
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='MSELoss', loss_weight=10.),
in_channels=512,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='center'))
# model training and testing settings
train_cfg = dict(
patch_size=96,
img_as_ref=True,
img_as_tar=True,
diff_crop=True,
skip_cycle=True,
center_ratio=0.)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
strides=(1, 2, 1, 1),
out_indices=(3, ),
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(mean=[50, 0, 0], std=[50, 127, 127], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='RGB2LAB'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='RGB2LAB'),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=4,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
optimizer = dict(type='Adam', lr=1e-4)
optimizer_config = dict(grad_clip=None)
# learning policy
# lr_config = dict(policy='CosineAnnealing', min_lr=0)
lr_config = dict(policy='Fixed')
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='uvc',
name='{{fileBasenameNoExtension}}',
resume=True,
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
| 31.358621 | 76 | 0.632945 |
b1b44e964cf1771cdf178273b1ece6e26fa7cb1b | 46,519 | py | Python | flask_test/aget_test_result.py | rahman-mahmudur/PyART | 36591cd10b2b7a560bbcb47a6cf744b72466f92a | [
"Apache-2.0"
] | null | null | null | flask_test/aget_test_result.py | rahman-mahmudur/PyART | 36591cd10b2b7a560bbcb47a6cf744b72466f92a | [
"Apache-2.0"
] | null | null | null | flask_test/aget_test_result.py | rahman-mahmudur/PyART | 36591cd10b2b7a560bbcb47a6cf744b72466f92a | [
"Apache-2.0"
] | null | null | null | import os,time,math,sys,json,re,string,json
import importlib
import get_dataflow
import pandas as pd
import joblib
import json
import requests
import bs4
import lxml
from sklearn.ensemble import RandomForestClassifier
from nltk.tokenize import word_tokenize
stdlib=['string','re','difflib','textwrap','unicodedata','stringprep','readline','rlcompleter',
'struct','codecs','datatime','calendar','collections','collections.abc','heapq','bisect',
'array','weakref','types','copy','pprint','reprlib','enum','numbers','math','cmath',
'decimal','fractions','random','statistics','itertools','functools','operator','pathlib',
'os.path','fileinput','stat','filecmp','tempfile','glob','fnmatch','linecache','shutil',
'pickle','copyreg','shelve','marshal','dbm','sqlite3','zlib','gzip','bz2','lzma','zipfile',
'tarfile','csv','configparser','netrc','xdrlib','plistlib','hashlib','hmac','secrets',
'os','io','time','argparse','getopt','logging','logging.config','logging.handlers',
'getpass','curses','curses.textpad','curses.ascii','curses.panel','platform','errno',
'ctypes','threading','multiprocessing','multiprocessing.shared_memory','concurrent',
'concurrent.futures','subprocess','sched','queue','_thread','_dummy_thread','dummy_threading',
'contextvars','asyncio','socket','ssl','select','selectors','asyncore','asynchat','signal',
'mmap','email','json','mailcap','mailbox','mimetypes','base64','binhex','binascii',
'quopri','uu','html','html.parser','html.entities','xml','webbrowser','xml.etree.ElementTree',
'xml.dom','xml.dom.minidom','xml.dom.pulldom','xml.sax','xml.sax.handler','xml.sax.saxutils',
'xml.sax.xmlreader','xml.parsers.expat','cgi','cgitb','wsgiref','urllib','urllib.request',
'urllib.response','urllib.parse','urllib.error','urllib.robotparser','http','http.client',
'ftplib','poplib','imaplib','nntplib','smtplib','smtpd','telnetlib','uuid','socketserver',
'http.server','http.cookies','http.cookiejar','xmlrpc','xmlrpc.client','xmlrpc.server',
'ipaddress','audioop','aifc','sunau','wave','chunk','colorsys','imghdr','sndhdr','ossaudiodev',
'gettext','locale','turtle','cmd','shlex','tkinter','tkinter.ttk','tkinter.tix','tkinter.scrolledtext',
'typing','pydoc','doctest','unittest','unittest.mock','unittest.mock','test','test.support',
'test.support.script_helper','bdb','faulthandler','pdb','timeit','trace','tracemalloc','distutils',
'ensurepip','venv','zipapp','sys','sysconfig','builtins','__main__','warnings','dataclasses',
'contextlib','abc','atexit','traceback','__future__','gc','inspect','site','code','codeop','zipimport',
'pkgutil','modulefinder','runpy','importlib','ast','symtable','symbol','token','keyword',
'tokenize','tabnanny','pyclbr','py_compile','compileall','dis','pickletools','formatter','msilib',
'msvcrt','winreg','winsound','posix','pwd','spwd','grp','crypt','termios','tty','pty','fcntl','pipes',
'resource','nis','optparse','imp']
topk_array = [0,0,0,0,0,0,0]
num_of_apis = 0
class ShowProcess():
i = 0
max_steps = 0
max_arrow = 50
infoDone = 'done'
def __init__(self, max_steps, infoDone = 'Done'):
self.max_steps = max_steps
self.i = 0
self.infoDone = infoDone
def show_process(self, i=None):
if i is not None:
self.i = i
else:
self.i += 1
num_arrow = int(self.i * self.max_arrow / self.max_steps)
num_line = self.max_arrow - num_arrow
percent = self.i * 100.0 / self.max_steps
process_bar = '[' + '>' * num_arrow + '-' * num_line + ']'\
+ '%.2f' % percent + '%' + '\r'
sys.stdout.write(process_bar)
sys.stdout.flush()
if self.i >= self.max_steps:
self.close()
def close(self):
print('')
print(self.infoDone)
self.i = 0
def get_file_path(root_path,file_list,dir_list):
global ret_list
dir_or_files = os.listdir(root_path)
for dir_file in dir_or_files:
dir_file_path = os.path.join(root_path,dir_file)
if os.path.isdir(dir_file_path):
dir_list.append(dir_file_path)
get_file_path(dir_file_path,file_list,dir_list)
elif dir_file_path.endswith('.py') and not dir_file_path.endswith('tmp.py'):
#print(dir_file_path)
ret_list.append(dir_file_path)
file_list.append(dir_file_path)
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def get_module_funcs(modulename):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.' in curapi or curapi.startswith(modulename+'.'):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {modulename:ms}
#print(modulename)
rootmodule=''
try:
module=importlib.import_module(modulename)
except Exception:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
try:
module=importlib.import_module(modulename)
except Exception as err:
print(err)
return {}
ms=dir(module)
return {modulename:ms}
def get_alias_funcs(modulename,alias):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.' in curapi or curapi.startswith(modulename+'.'):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {alias:ms}
#print(modulename)
rootmodule=''
try:
module=importlib.import_module(modulename)
except Exception:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
try:
module=importlib.import_module(modulename)
except Exception as err:
print(err)
return {}
ms=dir(module)
return {alias:ms}
def GetMiddleStr(content,startStr,endStr):
startIndex = content.index(startStr)
if startIndex>=0:
startIndex += len(startStr)
endIndex = content.index(endStr)
return content[startIndex:endIndex]
def get_alias_item(modulename,itname,aliasname):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.'+itname in curapi or curapi.startswith(modulename+'.'+itname):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {aliasname:ms}
#print(modulename,itname)
rootmodule=''
submodule=''
try:
module=importlib.import_module(modulename)
except Exception:
try:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
module=importlib.import_module(modulename)
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {aliasname:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
item=getattr(module,itname)
return {aliasname:dir(item)}
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {aliasname:dir(submodule)}
except Exception as err:
print(err)
return {}
def get_item_methods(modulename,itname):
modulename=modulename.strip()
flag=0
ms=[]
for curapi in cur_apis:
items=curapi.split('.')
if modulename+'.'+itname in curapi or curapi.startswith(modulename+'.'+itname):
#print('yes!',curapi)
api=items[-1]
ms.append(api)
flag=1
if flag==1:
ms=list(set(ms))
return {modulename:ms}
#print(modulename,itname)
rootmodule=''
submodule=''
try:
module=importlib.import_module(modulename)
except Exception:
try:
if '.' in modulename:
index=modulename.find('.')
rootmodule=modulename[:index]
os.system('pip3 install '+rootmodule)
else:
os.system('pip3 install '+modulename)
module=importlib.import_module(modulename)
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {itname:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
item=getattr(module,itname)
return {itname:dir(item)}
except Exception:
try:
submodule=importlib.import_module(modulename+'.'+itname)
return {itname:dir(submodule)}
except Exception as err:
print(err)
return {}
def deal_with_current_module(modulename,file,names):
modulename=modulename.strip()
#current_file='/home/user/PRIAN/targetProj/abu/abupy/TLineBu/ABuTLExecute.py'
current_file=file
layer=0
for c in modulename:
if c=='.':
layer+=1
else:
break
#print(layer)
ls7=current_file.split('/')
newdirs=ls7[:(0-layer)]
newdir=''
for d in newdirs:
newdir+=d+'/'
realdir=newdir
#print(realdir)
newdir=newdir+'end'
rootdir=GetMiddleStr(newdir,root_path,'/end')
if modulename=='.':
rootmodule=re.sub('/','.',rootdir)
else:
rootmodule=re.sub('/','.',rootdir)+'.'+modulename[layer:]
#print("Note!",rootmodule)
ret={}
for n in names:
x=get_item_methods(rootmodule,n)
ret.update(x)
return ret
def get_item_funcs(rootmodule,module,item):
try:
module1=importlib.import_module(module)
except Exception:
try:
os.system('pip3 install '+rootmodule)
module1=importlib.import_module(module)
except Exception:
try:
submodule=importlib.import_module(module+'.'+item)
return {item:dir(submodule)}
except Exception as err:
print(err)
return {}
try:
it=getattr(module1,item)
return {item:dir(it)}
except Exception:
try:
submodule=importlib.import_module(module+'.'+item)
return {item:dir(submodule)}
except Exception as err:
print(err)
return {}
def get_real_module(modulename,file):
current_file=file
layer=0
for c in modulename:
if c=='.':
layer+=1
else:
break
#print(layer)
ls7=current_file.split('/')
newdirs=ls7[:(0-layer)]
newdir=''
for d in newdirs:
newdir+=d+'/'
realdir=newdir
#print(realdir)
newdir=newdir+'end'
rootdir=GetMiddleStr(newdir,root_path,'/end')
if modulename=='.':
rootmodule=re.sub('/','.',rootdir)
else:
rootmodule=re.sub('/','.',rootdir)+'.'+modulename[layer:]
#print("Note!",rootmodule)
return rootmodule
def get_module_methods(file):
modulemethods=[]
all_candidates={}
with open(file) as f:
lines=f.readlines()
for line in lines:
line=line.strip()
#in most cases, we choose to get all fuctions of the module imported directly using inspect
#maybe need all classes and all methods of the classes in the module
if re.match('import [a-zA-Z0-9\.\_\,\s]+$',line) and ' as ' not in line:
#print(1,line)
modulename=line.split('import')[-1].strip()
if ',' not in modulename:
x1=get_module_funcs(modulename)
all_candidates.update(x1)
else:
ls3=modulename.split(',')
#global all_candidates
for j in ls3:
itemname=j.strip()
x2=get_module_funcs(itemname)
all_candidates.update(x2)
#should choose another example
elif re.match('import [a-zA-Z0-9\.\_\,]+ as [a-zA-Z0-9\.\_\,\s]+$',line):
#print(2,line)
if ',' not in line:
modulename=GetMiddleStr(line,'import',' as ').strip()
alias=line.split(' as ')[-1].strip()
#print(modulename,alias)
x3=get_alias_funcs(modulename,alias)
#global all_candidates
all_candidates.update(x3)
#many combing methods, checked by ','
else:
body=line.split('import')[-1].strip()
#print("multias:",body)
mas=body.split(',')
#print(mas)
for ma in mas:
if ' as ' in ma:
ls4=ma.split(' as ')
maname=ls4[0].strip()
aliasname=ls4[1].strip()
#print(maname,aliasname)
x4=get_alias_funcs(maname,aliasname)
#global all_candidates
all_candidates.update(x4)
else:
maname=ma.strip()
#print(maname)
x5=get_module_funcs(maname)
#global all_candidates
all_candidates.update(x5)
elif re.match('from [a-zA-Z0-9\.\_]+ import [a-zA-Z0-9\_\.\*\,\s]+$',line) and 'as' not in line:
#print(3,line)
modulename=GetMiddleStr(line,'from','import').strip()
itemname=line.split('import')[-1].strip()
names=[]
if ',' in itemname:
ns=itemname.split(',')
for n in ns:
names.append(n.strip())
else:
names.append(itemname)
#print(modulename,names)
if modulename.startswith('.'):
#print(modulename)
#print(file)
x6=deal_with_current_module(modulename,file,names)
#global all_candidates
all_candidates.update(x6)
continue
'''
firmname=modulename.split('.')[0]
if firmname==curmodule:
print("current module:",modulename)
deal_with_current_module(modulename,names)
continue
#need other ops get all methods defined in modules
#try1:copy the current proj to root path
'''
for n in names:
x7=get_item_methods(modulename,n)
#global all_candidates
all_candidates.update(x7)
elif re.match('from [a-zA-Z0-9\.\_]+ import [a-zA-Z0-9\_\.\*\,]+ as [a-zA-Z0-9\_\.\*\,\s]+$',line):
#print(4,line)
modulename=GetMiddleStr(line,'from','import').strip()
if modulename.startswith('.'):
#print(modulename)
#print(4,file)
modulename=get_real_module(modulename,file)
#continue
#print(modulename)
#need other ops to change the modulename as absmodule
itemname=line.split('import')[-1]
#print(modulename,itemname)
if ',' not in itemname:
lsx=itemname.split(' as ')
if len(lsx)<2:
continue
itname=lsx[0].strip()
aliasname=lsx[1].strip()
x8=get_alias_item(modulename,itname,aliasname)
#global all_candidates
all_candidates.update(x8)
else:
ls5=itemname.split(',')
for it in ls5:
if ' as ' not in it:
itname=it.strip()
x9=get_item_methods(modulename,itname)
#global all_candidates
all_candidates.update(x9)
else:
itname=it.split(' as ')[0].strip()
aliasname=it.split(' as ')[1].strip()
x10=get_alias_item(modulename,itname,aliasname)
#global all_candidates
all_candidates.update(x10)
#pass
#else:
#print('SyntaxError: invalid syntax')
#print(all_candidates)
return all_candidates
def get_caller(rec):
nrec=re.sub('\(.*\)','',rec)
pindex=nrec.rfind('.')
return nrec[:pindex]
def check(newcontext):
ls=newcontext.split('\n')
i=0
for i in range(len(ls)-1,-1,-1):
if ls[i].strip().startswith('def'):
break
nc=''
for j in range(i,len(ls)):
nc+=ls[j]+'\n'
#nc=newcontext
#print(nc)
nc=re.sub('\'[\\\[\]\(\)\{\}A-Za-z0-9_\,\:]+\'','',nc)
nc=re.sub('\"[\\\[\]\(\)\{\}A-Za-z0-9_\,\:]+\"','',nc)
lk=nc.count('(')
rk=nc.count(')')
ll=nc.count('[')
rl=nc.count(']')
ld=nc.count('{')
rd=nc.count('}')
kc=lk-rk
lc=ll-rl
dc=ld-rd
addc=''
#print(kc,lc,dc)
if kc==lc==dc==0:
return newcontext
else:
ks=''
#print(nc)
for i in range(0,len(nc)):
c=nc[i]
if re.match('[\(\)\[\]\{\}]',c):
ks+=c
#print(ks)
while('{}' in ks or '[]' in ks or '()' in ks):
while '()' in ks:
ks=re.sub('\[\]','',ks)
ks=re.sub('\{\}','',ks)
ks=re.sub('\(\)','',ks)
while '[]' in ks:
ks=re.sub('\{\}','',ks)
ks=re.sub('\(\)','',ks)
ks=re.sub('\[\]','',ks)
while '{}' in ks:
ks=re.sub('\[\]','',ks)
ks=re.sub('\(\)','',ks)
ks=re.sub('\{\}','',ks)
#print(ks)
for i in range(len(ks)-1,-1,-1):
if ks[i]=='(':
addc+=')'
elif ks[i]=='[':
addc+=']'
else:
addc+='}'
#print(newcontext)
#sys.exit(0)
#x=re.sub('return ','',newcontext+addc)
return newcontext+addc
def get_type(finalc,file):
lindex=file.rfind('/')
tmp=file[:lindex]+'/tmp.py'
with open(tmp,'w+') as f:
f.write(finalc)
#with open(tmp2,'w+') as f2:
#f2.write(finalc)
try:
#os.system('pytype '+tmp)
os.system('pytype '+tmp+' > log.txt')
#os.system('rm '+tmp)
except Exception:
sys.exit()
with open('log.txt') as f:
lines=f.readlines()
vtype='None'
for line in lines:
if '[reveal-type]' in line:
tp=line.split(':')[1]
vtype=re.sub('\[reveal\-type\]','',tp)
#print(vtype)
break
#if '[python-compiler-error]' in line:
#sys.exit()
global Nonenum,Anynum,OKnum
if vtype=='None':
#print(tmp)
#sys.exit()
Nonenum+=1
elif vtype=='Any' or vtype=='nothing':
Anynum+=1
else:
OKnum+=1
return vtype
def get_bank(line):
ip=0
for ip in range(0,len(line)):
if line[ip]!=' ':
break
return (line[:ip],ip)
def check_try(code,trycache):
#print(trycache)
ret=code
#l=sorted(trycache)
#print(l)
for i in range(len(trycache)-1,-1,-1):
ret+='\n'+trycache[i][0]+'except Exception:\n'+trycache[i][0]+' '+'pass'
return ret
def get_curr_apis(ft,file):
#print('Note! ',ft,file)
tmp_file=re.sub(root_path,'',file)
rmodule=re.sub('\/','.',tmp_file)
rmodule=rmodule[:-3]
#print("Note!",rmodule)
ret=get_item_methods(rmodule,ft)
#print('Note! ',ret)
return ret
def get_typeshed_apis(ft):
ret=[]
ft=ft.strip()
ft=re.sub('\[.*\]','',ft)
with open('typeshed.txt') as f:
lines=f.readlines()
s1='.'+ft+'.'
s2=ft+'.'
for line in lines:
if s1 in line or line.startswith(s2):
#print('Find typeshed: '+line.strip())
s3=line.strip()
index=s3.rfind('.')
s4=s3[index+1:]
if not s4 in ret:
ret.append(s4)
return ret
#inferred type, caller
def get_candidates(ft,caller,file):
if ft.startswith('Type['):
ft=ft[5:-1]
print('type:',ft)
candidates={}
global if_from_current_proj
if_from_current_proj=1
if ft=='module':
for k,v in module_apis.items():
if k==caller:
candidates={caller:v}
#print(candidates)
return candidates
candidates=get_module_funcs(caller)
elif ft=='str':
candidates={caller:dir(str)}
elif re.match('List\[.*\]',ft):
candidates={caller:dir(list)}
elif re.match('Dict\[.*\]',ft):
apsx=dir(dict)
apsx.append('iteritems')
candidates={caller:apsx}
elif ft=='set' or re.match('Set\[.*\]',ft):
candidates={caller:dir(set)}
elif ft.endswith('[str]'):
candidates=get_candidates(ft[:-5],caller,file)
elif ft=='bool':
candidates={caller:dir(bool)}
elif re.match('Union\[.*\]',ft):
ft=ft+'end'
contents=GetMiddleStr(ft,'Union[',']end')
contents=re.sub('\[.*\]','',contents)
lss=contents.split(',')
tmp=[]
for k in lss:
#print('Note!!')
k=k.strip()
#print(k)
if k=='Any' or k=='nothing':
continue
tpdic=get_candidates(k,caller,file)
for k,v in tpdic.items():
tmp.extend(v)
if_from_current_proj=0
candidates={caller:tmp}
elif re.match('Optional\[.*\]',ft):
#ft=ft+'end'
#contents=GetMiddleStr(ft,'Optional[',']end')
#contents=re.sub('\[.*\]','',contents)
#candidates=get_candidates(ft,caller,file)
candidates={}
if_from_current_proj=0
#elif tuple int float since we haven't found these kinds of caller templely ignore.
#elif re.match('Pattern\[.*\]',ft):
#candidates={caller:dir(re.Pattern)}
#elif re.match('Match\[.*\]',ft):
#candidates={caller:dir(re.Match)}
elif '.' in ft:
index=ft.rfind('.')
module=ft[:index]
item=ft[index+1:]
rindex=ft.find('.')
rootmodule=ft[:rindex]
candidates=get_item_funcs(rootmodule,module,item)
elif ft=='Any' or ft=='None' or ft=='nothing':
candidates=get_all_apis()
if_from_current_proj=0
#print('Note!All types:')
#print(candidates)
return candidates
elif re.match('[a-zA-Z0-9_]+',ft):
#since in many case, the caller calls funcs defined behind the caller, we copy the original file into python lib to get candidates.
candidates=get_curr_apis(ft,file)
#print('Other types: '+ft)
if len(candidates)==0:
typeshed_apis=get_typeshed_apis(ft)
candidates.update({caller:typeshed_apis})
#else:
#if_from_current_proj=1
for k,v in candidates.items():
dag=[]
#print('yes')
#print(v,len(v))
for j in range(0,len(v)):
#print(j)
if not v[j].startswith('__'):
dag.append(v[j])
#print("yes")
#print(dag)
candidates[k]=dag
#print(candidates)
return candidates
def get_callee(rec):
nrec=re.sub('\(.*\)','',rec)
pindex=nrec.rfind('.')
return nrec[pindex+1:],rec[pindex+1:]
def get_total(w,naming_context,files):
ret=0.0
#print(w)
for fi in files:
key=w+'##'+fi
if key in proj_token_count:
ret+=proj_token_count[key]
ret+=naming_context.count(w)
#print(ret)
#sys.exit(0)
return ret
def get_conum(w,n,naming_context,files):
ret=0.0
for fi in files:
k1=w+'##'+fi
k2=n+'##'+fi
if k1 in proj_token_no and k2 in proj_token_no:
x1=proj_token_no[k1]
y1=proj_token_no[k2]
ctis=[x for x in x1 if x in y1]
ret+=float(len(ctis))
return ret
def get_conum_of_line(api,naming_line,naming_context,files):
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
tmp=naming_line.translate(tran_tab)
nl=word_tokenize(tmp)
cs=api.translate(tran_tab)
wcs=word_tokenize(cs)
#print(api,wcs,naming_line,nl)
#sys.exit(0)
total=0.0
conum=0.0
score=0.0
#print(wcs,nl)
#TODO:gao fan le !!!!
for w in wcs:
total=total+get_total(w,naming_context,files)
#print(1)
for n in nl:
conum+=get_conum(w,n,naming_context,files)
if total!=0:
total=float(total)
conum=float(conum)
score=float( conum / total )
return score
#proj_tokens
#proj_depends
def get_line_scores(aps,naming_line,naming_context,file):
line_scores={}
tokens=[]
fi=re.sub('\.py','',file)
index=fi.rfind('/')
curname=fi[index+1:]
#print(curname)
files=[]
for k,v in proj_depends.items():
if k==file:
continue
#print(k)
flag=0
for imports in v:
#print
if curname in imports:
#print(imports)
flag=1
break
if flag==0:
#print(proj_tokens[k])
#sys.exit(0)
files.append(k)
#print(tokens)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
#process_bar.show_process()
continue
line_ret=get_conum_of_line(api,naming_line,naming_context,files)
line_scores[api]=line_ret
return line_scores
def get_total_infile(w,files):
ret=0.0
for fi in files:
key=w+'##'+fi
if key in proj_token_count:
ret+=1.0
return ret
def get_conum_infile(w,item,files):
ret=0.0
for fi in files:
k1=w+'##'+fi
k2=item+'##'+fi
if k1 in proj_token_no and k2 in proj_token_no:
ret+=1.0
return ret
def get_conum_of_con(api,naming_context,files):
code=naming_context.strip()
lines=code.split('\n')
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
rets=0.0
for i in range(0,len(lines)):
tmp=lines[i].translate(tran_tab)
nl=word_tokenize(tmp)
cs=api.translate(tran_tab)
wcs=word_tokenize(cs)
total=0.0
#print(wcs,nl)
for w in wcs:
total=total+get_total_infile(w,files)
conum=0.0
for w in wcs:
for item in nl:
conum=conum+get_conum_infile(w,item,files)
if total!=0:
total=float(total)
conum=float(conum)
score=float( conum / total )
rets+=float(i+1)*score
context_ret=float(float(rets) / float(len(lines)+1.0))
return context_ret
def get_conum_scores(aps,naming_context,file):
conum_scores={}
fi=re.sub('\.py','',file)
index=fi.rfind('/')
curname=fi[index+1:]
#print(curname)
files=[]
for k,v in proj_depends.items():
if k==file:
continue
#print(k)
flag=0
for imports in v:
#print
if curname in imports:
#print(imports)
flag=1
break
if flag==0:
files.append(k)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
con_ret=get_conum_of_con(api,naming_context,files)
conum_scores[api]=con_ret
return conum_scores
def get_results(arr):
print('Ranks :'+str(arr))
mrr=0.0
top1=0
top2=0
top3=0
top4=0
top5=0
top10=0
top20=0
for i in range(0,len(arr)):
mrr+=float(1.0/float(arr[i]))
if arr[i]==1:
top1+=1
top2+=1
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==2:
top2+=1
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==3:
top3+=1
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==4:
top4+=1
top5+=1
top10+=1
top20+=1
elif arr[i]==5:
top5+=1
top10+=1
top20+=1
elif arr[i]<=10:
top10+=1
top20+=1
elif arr[i]<=20:
top20+=1
tp1=float(top1/len(arr))
tp2=float(top2/len(arr))
tp3=float(top3/len(arr))
tp4=float(top4/len(arr))
tp5=float(top5/len(arr))
tp10=float(top10/len(arr))
tp20=float(top20/len(arr))
mrr=float(mrr/float(len(arr)))
print("Top-k:",top1,top2,top3,top4,top5,top10,top20,len(arr))
print("Top-k+mrr:",tp1,tp2,tp3,tp4,tp5,tp10,tp20,mrr)
s=str(tp1)+','+str(tp2)+','+str(tp3)+','+str(tp4)+','+str(tp5)+','+str(tp10)+','+str(tp20)+','+str(mrr)+'\n'
with open('testdata/'+CURRENT_PROJ+'_result.txt','w+') as ft:
ft.write(s)
return [tp1,tp2,tp3,tp4,tp5,tp10, mrr]
def get_time(ts):
totalt=0.0
for t in ts:
totalt+=t
ret=float(totalt/float(len(ts)))
print('Average time: ',ret)
with open('testdata/'+CURRENT_PROJ+'_result.txt','a+') as ft:
ft.write(str(ret)+'\n')
def get_rec_point(file):
print('DEAL-WITH:'+file)
#with open('types/types.txt','a+') as ff:
#ff.write('FILE:'+file)
with open(file) as f:
lines=f.readlines()
#print(lines)
precode=''
trynum=0
trycache=[]
kflag=0
lno=0
#s=''
comment_flag=0
calls=[]
for line in lines:
#print(line)
lno+=1
if line.strip().startswith('#'):
continue
if re.match('[bru]*\'\'\'$',line.strip()) or re.match('[bru]*\"\"\"$',line.strip()):
if comment_flag==0:
comment_flag=1
else:
comment_flag=0
continue
elif (re.match('[bru]*\'\'\'',line.strip()) or re.match('[bru]*\"\"\"',line.strip())) and (re.match('.*[bru]*\'\'\'$',line.strip()) or re.match('.*[bru]*\"\"\"$',line.strip())):
continue
elif re.match('[bru]*\'\'\'',line.strip()) or re.match('[bru]*\"\"\"',line.strip()) or re.match('.*[bru]*\'\'\'$',line.strip()) or re.match('.*[bru]*\"\"\"$',line.strip()):
if comment_flag==0:
comment_flag=1
else:
comment_flag=0
continue
if comment_flag==1:
continue
if 'try:' in line:
trynum+=1
trycache.append(get_bank(line))
elif trynum>0 and ('except' in line or 'finally:' in line):
(bank,lenth)=get_bank(line)
for i in range(len(trycache)-1,-1,-1):
if trycache[i][1]==lenth:
trynum-=1
del trycache[i]
recobj=re.findall('[a-zA-Z0-9_\.\[\]]+\.[a-zA-Z0-9\_]+\(.*\)',line)
#print(recobj)
if len(recobj)==0:
precode+=line
continue
#print(file)
#print(recobj)
rec=recobj[0]
caller=get_caller(rec)
if caller.startswith('['):
caller=caller[1:]
callee,rcallee=get_callee(rec)
if callee.startswith('_') or re.match('[A-Z0-9_]+$',callee) or callee.strip()=='_':
precode+=line
continue
cp=caller+'.'+callee
if cp in calls:
precode+=line
continue
else:
calls.append(cp)
i=0
latest_line=line.replace(rcallee,'unknown_api()')
#print('NOTE!',latest_line)
tpp=precode.strip()
if tpp.endswith(','):
newcontext=tpp[:-1]
finalc=check(newcontext)
#print(finalc)
current_context=finalc+'\n'+latest_line
prelast=precode.strip().split('\n')[-1]
for i in range(0,len(prelast)):
if prelast[i]!=' ':
break
finalc+='\n'+line[:i-4]+'reveal_type('+caller+')'
elif tpp.endswith('(') or tpp.endswith('{') or tpp.endswith('['):
newcontext=tpp
finalc=check(newcontext)
current_context=finalc+'\n'+latest_line
#print(finalc)
prelast=precode.strip().split('\n')[-1]
for i in range(0,len(prelast)):
if prelast[i]!=' ':
break
finalc+='\n'+line[:i]+'reveal_type('+caller+')'
else:
for i in range(0,len(line)):
if line[i]!=' ':
break
#print(i)
#print(line)
newcontext=tpp
finalc=check(newcontext)
finalc+='\n'+line[:i]+'reveal_type('+caller+')'
current_context=precode+latest_line
if len(trycache)>0:
finalc=check_try(finalc,trycache)
#print(finalc)
#print('[Process[1] : Preprocessing # Getting reommendation point, simple type inference, possible API candidates and current incomplete code context.]')
#print(file+'#'+str(lno)+'#'+caller+'#'+callee)
#if '.' in caller:
#ft='Any'
#else:
ft=get_type(finalc,file)
ft=ft.strip()
print(line.strip())
print(file+'#'+str(lno)+'#'+caller+':'+ft+'#'+callee)
#print(Nonenum,Anynum,OKnum)
aps=[]
if ft=='None' or ft=='Any':
if caller=='self':
for d in all_defs:
dname=d.strip().split(' ')[1]
aps.append(dname)
elif caller=='str' or caller=='s' or caller=='string':
ft='str'
elif caller=='sys.stderr' or caller=='sys.stdout' or caller=='sys.stdin':
ft='module'
elif caller=='log':
ft='logging.Logger'
caller=ft
elif re.match('for .* in .*\..*\(.*\).*\:',line.strip()):
aps=dir(dict)
aps.append('iteritems')
else:
#tp=caller.split('.')
#fc=tp[0]
if '.' in caller:
xindex=caller.find('.')
fc=caller[:xindex]
xattr=caller[xindex+1:]
else:
xattr=caller
fc=caller
#print('check module:',fc)
#print('check attr:',xattr)
if fc in stdlib:
ft='module'
print('stdlib!',fc)
#print('module!',caller)
try:
module1=importlib.import_module(caller)
aps=dir(module1)
except Exception:
try:
module2=importlib.import_module(fc)
attr=getattr(module2,xattr)
aps=dir(attr)
except Exception:
aps=[]
else:
for curapi in cur_apis:
if '.'+caller+'.' in curapi:
idx=curapi.find('.'+caller+'.')
canapi=curapi[idx+1:]
if not '.' in canapi:
aps.append(canapi)
print('get api form json!')
print(canapi)
if len(aps)==0:
apis = get_candidates(ft,caller,file)
for k,v in apis.items():
aps.extend(v)
if len(aps)==0:
precode+=line
continue
global pranks,ptimes,pinranks
if re.match('[A-Z]+[A-Za-z]+',callee) or callee.startswith('_'):
print('CONSTRUCTOR,IGNORE')
precode+=line
continue
if callee in aps:
print('API IV')
else:
print('API OOV')
pranks.append(100)
global all_apis_add,all_apis
all_apis_add.append(callee)
tmpx=all_apis['all_apis']
tmpx.extend(all_apis_add)
tmpx=list(set(tmpx))
all_apis['all_apis']=tmpx
ptimes.append(0.0)
precode+=line
continue
#ss=''
#for ap in aps:
#ss=ss+ap+','
#ss=ss[:-1]+'\n'
#s=caller+':'+ft+'#'+callee+'\n'
s1=time.time()
#print('[Process[2] : Constructing dataflow hints.]')
current_dataflow=get_dataflow.get_current_dataflow2(current_context,caller)
#print(maxflow)
if len(current_dataflow)==0:
precode+=line
continue
maxflow=max(current_dataflow,key=len)
#print(maxflow)
dataflow_scores=get_dataflow.get_dataflow_scores(aps,maxflow,current_dataflow,ft,callee)
tosim_scores=get_dataflow.get_tosim_scores(aps,maxflow,current_dataflow,ft,callee)
try:
naming_line=re.sub(callee,'',line)
except Exception as err:
print(err)
print(line)
sys.exit()
precode+=line
continue
naming_context=precode
line_scores=get_line_scores(aps,naming_line,naming_context,file)
e1=time.time()
print(e1-s1)
label=0
apis=[]
with open('test.csv','w+') as f:
f.write('f1,f2,f3,f4\n')
start=time.time()
if ft=='None' or ft=='Any' or ft=='nothing':
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
if api==callee:
label=1
else:
label=0
apis.append(api)
try:
s=str(dataflow_scores[api])+','+str(tosim_scores[api])+','+str(line_scores[api])+',0.0\n'
with open('test.csv','a+') as f:
f.write(s)
except Exception as err:
print(err)
sys.exit(0)
else:
flag=0
conum_scores=get_conum_scores(aps,naming_context,file)
for api in aps:
if api.startswith('__') or re.match('[A-Z0-9_]+$',api) or api.strip()=='_':
continue
if api==callee:
label=1
else:
label=0
apis.append(api)
try:
s=str(dataflow_scores[api])+','+str(tosim_scores[api])+','+str(line_scores[api])+','+str(conum_scores[api])+'\n'
with open('test.csv','a+') as f:
f.write(s)
except Exception as err:
print(err)
sys.exit(0)
test_data=pd.read_csv('test.csv')
#print(apis)
#print(len(apis))
#print(test_data)
clf=joblib.load('traincsv/'+CURRENT_PROJ+'1.pkl')
result=clf.predict_proba(test_data)
candidates={}
for i in range(0,len(apis)):
candidates[apis[i]]=result[i][1]
cans=sorted(candidates.items(), key=lambda x: x[1], reverse=True)
#print(cans)
end = time.time()
ts=end - start
print(ts)
print('--------------------------------------------------------------------------------------------------')
print('Recommended Functions for Caller: ' + caller)
print('--------------------------------------------------------------------------------------------------')
lenthk=len(cans)
exists_rec = []
if lenthk > 10:
lenthk = 10
for i in range(0,lenthk):
print(str(i+1)+' : ' + caller + '.' + cans[i][0] + '()')
exists_rec.append(cans[i][0])
rev_cans = sorted(candidates.items(), key=lambda x: x[1])
print('--------------------------------------------------------------------------------------------------')
print('Functions not Reccomended for Caller: ' + caller)
print('--------------------------------------------------------------------------------------------------')
lenghk=len(rev_cans)
if lenthk > 10:
lenthk = 10
for i in range(0,lenthk):
if rev_cans[i][0] not in exists_rec:
print(str(i+1)+' : ' + caller + '.' + rev_cans[i][0] + '()')
# Temporarily commented out Google search feature for faster testing of accuracy. Uncomment to reimplement.
"""
print('--------------------------------------------------------------------------------------------------')
print('Press c to continue or type a number from the reccomended function list to search google for more information about the function: ')
google_input = input()
while google_input != 'c':
if google_input == '1':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[0][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '2':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[1][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '3':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[2][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '4':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[3][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '5':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[4][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '6':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[5][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '7':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[6][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '8':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[7][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '9':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[8][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
elif google_input == '10':
req_result = requests.get('https://www.google.com/search?q=python+' + cans[9][0]).text
soup = bs4.BeautifulSoup(req_result, 'html.parser')
headings = soup.find_all('h3')
links = soup.find_all('a')
print('Top google search result for function: ')
print('--------------------------------------------------------------------------------------------------')
for heading in headings:
if 'python' in heading.getText().lower():
print(heading.getText())
break
for link in links:
link_string = link.get('href').lower()
if 'python' in link_string and 'www' in link_string and 'google' not in link_string:
print(link.get('href').replace('/url?q=', '').split('&', 1)[0])
break
print('--------------------------------------------------------------------------------------------------')
print('Type another number to search google or press c to continue')
google_input = input()
"""
rank=21
for k in range(0,len(cans)):
if cans[k][0]==callee:
rank=k+1
#print('Ranked '+str(rank))
if rank > 20:
pranks.append(rank)
#if atag==1:
#aranks.append(rank)
# Record: PRIAN cannot recommend, jumo to next recommendation.
else:
# PRIAN successfully recommends.
pranks.append(rank)
#if atag==1:
#aranks.append(rank)
ptimes.append(ts)
#alltimes+=ts+'\n'
pinranks.append(rank)
precode+=line
temp_arr = get_results(pinranks)
topk_array[0] += temp_arr[0]
topk_array[1] += temp_arr[1]
topk_array[2] += temp_arr[2]
topk_array[3] += temp_arr[3]
topk_array[4] += temp_arr[4]
topk_array[5] += temp_arr[5]
topk_array[6] += temp_arr[6]
global num_of_apis
if topk_array[5] != 0:
num_of_apis += 1
get_results(pranks)
#get_time(ptimes)
def count_all_apis():
#TODO:count all apis,including module_apis,builtin_apis,proj_apis
ret=[]
for k,v in module_apis.items():
for f in v:
if (not f.startswith('__')) and (not re.match('[A-Z0-9]+',f)) and (not f in ret):
ret.append(f)
#print(ret)
with open('testJson/'+CURRENT_PROJ+'.json') as f:
lines=f.readlines()
for line in lines:
line=line.strip()
index=line.rfind('.')
item=line[index+1:]
if (not item.startswith('__')) and (not item in ret):
ret.append(item)
with open('builtin.txt') as f2:
l2=f2.readlines()
for line2 in l2:
it=line2.strip()
if not it in ret:
ret.append(it)
return {'all_apis':ret}
def dealwith(curfile):
global module_apis,all_apis
module_apis={}
all_apis={}
module_apis=get_module_methods(curfile)
all_apis=count_all_apis()
tmpx=all_apis['all_apis']
tmpx.extend(all_apis_add)
tmpx=list(set(tmpx))
all_apis['all_apis']=tmpx
get_rec_point(curfile)
def get_all_apis():
return all_apis
def get_proj_tokens(iret_list):
global proj_token_count,proj_token_no,proj_depends
del_estr = string.punctuation + string.digits
replace = " "*len(del_estr)
tran_tab = str.maketrans(del_estr, replace)
#tmp=lines[i].strip().translate(tran_tab)
#file_label=0
for file in iret_list:
#file_label+=1
with open(file,encoding='ISO-8859-1') as f:
lines=f.readlines()
line_label=0
for i in range(0,len(lines)):
line_label+=1
if lines[i].strip()=='':
continue
elif re.sub(' ','',lines[i].strip())=='':
continue
elif 'import ' in lines[i]:
if file in proj_depends:
imports=proj_depends[file]
else:
imports=[]
imports.append(lines[i])
proj_depends[file]=imports
tmp=lines[i].strip().translate(tran_tab)
tokens=word_tokenize(tmp)
for tk in tokens:
token=tk+'##'+file
if token in proj_token_count:
tcount=proj_token_count[token]
else:
tcount=0
tcount+=lines[i].count(tk)
proj_token_count[token]=tcount
if token in proj_token_no:
no=proj_token_no[token]
else:
no=[]
no.append(line_label)
proj_token_no[token]=no
###main entry###
# if __name__=="main":
# __main__(CURRENT_PROJ,filePath)
ret_list=[]
proj_token_count={}
proj_token_no={}
proj_depends={}
cur_apis=[]
module_apis={}
all_apis={}
pranks=[]
ptimes=[]
pinranks=[]
all_apis_add=[]
root_path=''
Nonenum=Anynum=OKnum=0
all_defs=[]
all_recs=''
#alltimes=''
CURRENT_PROJ='flask'
filePath='testdata/'
with open('test.csv','w+') as f:
f.write('')
Nonenum=Anynum=OKnum=0
pranks=[]
ptimes=[]
pinranks=[]
all_apis_add=[]
root_path = filePath+CURRENT_PROJ
print('LOAD-PROJ:',root_path)
file_list = dir_list = []
ret_list=[]
get_file_path(root_path,file_list,dir_list)
#ret_list=list(set(ret_list))
print(len(ret_list))
trainlen=int(len(ret_list)/10*9)
#print(trainlen)
train_list=ret_list[:trainlen]
test_list=ret_list[trainlen:]
print(train_list)
print(test_list)
#sys.exit()
#proj_tokens={}
proj_token_count={}
proj_token_no={}
proj_depends={}
get_proj_tokens(ret_list)
module_apis={}
id=0
special_flag=0
if_from_current_proj=0
callps=[]
all_apis={}
#======MAIN FUNC ENTRY======
for ifile in test_list:
dealwith(ifile)
#with open('/home/user/PyART/testdatak/'+CURRENT_PROJ+'_time.txt','w+') as f:
#f.write(str(ptimes))
for x, y in enumerate(topk_array):
topk_array[x] = y/num_of_apis
print("Top K Averages for Random Forest: Top 1: " + str(topk_array[0]) + " Top 2: " + str(topk_array[1]) + " Top 3: " + str(topk_array[2]) + " Top 4: " + str(topk_array[3]) + " Top 5: " + str(topk_array[4]) + " Top 10: " + str(topk_array[5]) + " MRR: " + str(topk_array[6])) | 26.643184 | 274 | 0.6208 |
4f3d11426954538b02554adeac339fdd3a495c7d | 734 | py | Python | nehushtan/mysql/MySQLAnyTable.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
] | null | null | null | nehushtan/mysql/MySQLAnyTable.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
] | 1 | 2020-11-20T03:10:23.000Z | 2020-11-20T09:30:34.000Z | nehushtan/mysql/MySQLAnyTable.py | sinri/nehushtan | 6fda496e16a8d443a86c617173d35f31c392beb6 | [
"MIT"
] | 1 | 2021-10-13T10:16:58.000Z | 2021-10-13T10:16:58.000Z | # Copyright (c) 2020. Sinri Edogawa
from nehushtan.mysql.MySQLKit import MySQLKit
from nehushtan.mysql.MySQLTableMixin import MySQLTableMixin
class MySQLAnyTable(MySQLTableMixin):
def __init__(self, mysql_kit: MySQLKit, table_name: str, schema_name: str = ''):
super().__init__()
self._mysql_kit = mysql_kit
self._table_name = table_name
self._schema_name = schema_name
def __del__(self):
"""
Since 0.4.10
"""
self._mysql_kit = None
def get_mysql_kit(self) -> MySQLKit:
return self._mysql_kit
def mapping_table_name(self) -> str:
return self._table_name
def mapping_schema_name(self) -> str:
return self._schema_name
| 25.310345 | 84 | 0.666213 |
d2f8ace96e9e1e61eaac9eb1a3345f58ed2863c2 | 71,813 | py | Python | utils/utils.py | chumingqian/Model_Compression_For_YOLOV4 | 3bc803ff6ebb4000bf1f2cafc61c7711fea7a2ab | [
"Apache-2.0"
] | 13 | 2020-12-14T02:22:47.000Z | 2021-08-07T09:58:09.000Z | utils/utils.py | chumingqian/Model_Compression_For_YOLOV4 | 3bc803ff6ebb4000bf1f2cafc61c7711fea7a2ab | [
"Apache-2.0"
] | 2 | 2021-02-02T17:37:40.000Z | 2021-02-10T01:40:11.000Z | utils/utils.py | chumingqian/Model_Compression_For_YOLOV4 | 3bc803ff6ebb4000bf1f2cafc61c7711fea7a2ab | [
"Apache-2.0"
] | 3 | 2020-12-14T02:22:48.000Z | 2021-08-01T19:03:18.000Z | import glob
import math
import os
import random
import shutil
import subprocess
from pathlib import Path
from sys import platform
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torchvision
from tqdm import tqdm
import torch.nn.functional as F
from . import torch_utils # , google_utils
# Set printoptions
torch.set_printoptions(linewidth=320, precision=5, profile='long')
np.set_printoptions(linewidth=320, formatter={'float_kind': '{:11.5g}'.format}) # format short g, %precision=5
matplotlib.rc('font', **{'size': 11})
# Prevent OpenCV from multithreading (to use PyTorch DataLoader)
cv2.setNumThreads(0)
def init_seeds(seed=0):
random.seed(seed)
np.random.seed(seed)
torch_utils.init_seeds(seed=seed)
def seed_torch(seed=1029):
random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def load_classes(path):
# Loads *.names file at 'path'
with open(path, 'r') as f:
names = f.read().split('\n')
return list(filter(None, names)) # filter removes empty strings (such as last line)
def labels_to_class_weights(labels, nc=80):
# Get class weights (inverse frequency) from training labels
if labels[0] is None: # no labels loaded
return torch.Tensor()
labels = np.concatenate(labels, 0) # labels.shape = (866643, 5) for COCO
classes = labels[:, 0].astype(np.int) # labels = [class xywh]
weights = np.bincount(classes, minlength=nc) # occurences per class
# Prepend gridpoint count (for uCE trianing)
# gpi = ((320 / 32 * np.array([1, 2, 4])) ** 2 * 3).sum() # gridpoints per image
# weights = np.hstack([gpi * len(labels) - weights.sum() * 9, weights * 9]) ** 0.5 # prepend gridpoints to start
weights[weights == 0] = 1 # replace empty bins with 1
weights = 1 / weights # number of targets per class
weights /= weights.sum() # normalize
return torch.from_numpy(weights)
def labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):
# Produces image weights based on class mAPs
n = len(labels)
class_counts = np.array([np.bincount(labels[i][:, 0].astype(np.int), minlength=nc) for i in range(n)])
image_weights = (class_weights.reshape(1, nc) * class_counts).sum(1)
# index = random.choices(range(n), weights=image_weights, k=1) # weight image sample
return image_weights
def coco_class_weights(): # frequency of each class in coco train2014
n = [187437, 4955, 30920, 6033, 3838, 4332, 3160, 7051, 7677, 9167, 1316, 1372, 833, 6757, 7355, 3302, 3776, 4671,
6769, 5706, 3908, 903, 3686, 3596, 6200, 7920, 8779, 4505, 4272, 1862, 4698, 1962, 4403, 6659, 2402, 2689,
4012, 4175, 3411, 17048, 5637, 14553, 3923, 5539, 4289, 10084, 7018, 4314, 3099, 4638, 4939, 5543, 2038, 4004,
5053, 4578, 27292, 4113, 5931, 2905, 11174, 2873, 4036, 3415, 1517, 4122, 1980, 4464, 1190, 2302, 156, 3933,
1877, 17630, 4337, 4624, 1075, 3468, 135, 1380]
weights = 1 / torch.Tensor(n)
weights /= weights.sum()
# with open('data/coco.names', 'r') as f:
# for k, v in zip(f.read().splitlines(), n):
# print('%20s: %g' % (k, v))
return weights
def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)
# https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/
# a = np.loadtxt('data/coco.names', dtype='str', delimiter='\n')
# b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\n')
# x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco
# x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet
x = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,
35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]
return x
def xyxy2xywh(x):
# Transform box coordinates from [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right) to [x, y, w, h]
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Transform box coordinates from [x, y, w, h] to [x1, y1, x2, y2] (where xy1=top-left, xy2=bottom-right)
y = torch.zeros_like(x) if isinstance(x, torch.Tensor) else np.zeros_like(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
# def xywh2xyxy(box):
# # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2]
# if isinstance(box, torch.Tensor):
# x, y, w, h = box.t()
# return torch.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).t()
# else: # numpy
# x, y, w, h = box.T
# return np.stack((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).T
#
#
# def xyxy2xywh(box):
# # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h]
# if isinstance(box, torch.Tensor):
# x1, y1, x2, y2 = box.t()
# return torch.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).t()
# else: # numpy
# x1, y1, x2, y2 = box.T
# return np.stack(((x1 + x2) / 2, (y1 + y2) / 2, x2 - x1, y2 - y1)).T
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = max(img1_shape) / max(img0_shape) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def ap_per_class(tp, conf, pred_cls, target_cls):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rafaelpadilla/Object-Detection-Metrics.
# Arguments
tp: True positives (nparray, nx1 or nx10).
conf: Objectness value from 0-1 (nparray).
pred_cls: Predicted object classes (nparray).
target_cls: True object classes (nparray).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Sort by objectness
i = np.argsort(-conf)
tp, conf, pred_cls = tp[i], conf[i], pred_cls[i]
# Find unique classes
unique_classes = np.unique(target_cls)
# Create Precision-Recall curve and compute AP for each class
pr_score = 0.1 # score to evaluate P and R https://github.com/ultralytics/yolov3/issues/898
s = [len(unique_classes), tp.shape[1]] # number class, number iou thresholds (i.e. 10 for mAP0.5...0.95)
ap, p, r = np.zeros(s), np.zeros(s), np.zeros(s)
for ci, c in enumerate(unique_classes):
i = pred_cls == c
n_gt = (target_cls == c).sum() # Number of ground truth objects
n_p = i.sum() # Number of predicted objects
if n_p == 0 or n_gt == 0:
continue
else:
# Accumulate FPs and TPs
fpc = (1 - tp[i]).cumsum(0)
tpc = tp[i].cumsum(0)
# Recall
recall = tpc / (n_gt + 1e-16) # recall curve
r[ci] = np.interp(-pr_score, -conf[i], recall[:, 0]) # r at pr_score, negative x, xp because xp decreases
# Precision
precision = tpc / (tpc + fpc) # precision curve
p[ci] = np.interp(-pr_score, -conf[i], precision[:, 0]) # p at pr_score
# AP from recall-precision curve
for j in range(tp.shape[1]):
ap[ci, j] = compute_ap(recall[:, j], precision[:, j])
# Plot
# fig, ax = plt.subplots(1, 1, figsize=(5, 5))
# ax.plot(recall, precision)
# ax.set_xlabel('Recall')
# ax.set_ylabel('Precision')
# ax.set_xlim(0, 1.01)
# ax.set_ylim(0, 1.01)
# fig.tight_layout()
# fig.savefig('PR_curve.png', dpi=300)
# Compute F1 score (harmonic mean of precision and recall)
f1 = 2 * p * r / (p + r + 1e-16)
return p, r, ap, f1, unique_classes.astype('int32')
def compute_ap(recall, precision):
""" Compute the average precision, given the recall and precision curves.
Source: https://github.com/rbgirshick/py-faster-rcnn.
# Arguments
recall: The recall curve (list).
precision: The precision curve (list).
# Returns
The average precision as computed in py-faster-rcnn.
"""
# Append sentinel values to beginning and end
mrec = np.concatenate(([0.], recall, [min(recall[-1] + 1E-3, 1.)]))
mpre = np.concatenate(([0.], precision, [0.]))
# Compute the precision envelope
mpre = np.flip(np.maximum.accumulate(np.flip(mpre)))
# Integrate area under curve
method = 'interp' # methods: 'continuous', 'interp'
if method == 'interp':
x = np.linspace(0, 1, 101) # 101-point interp (COCO)
ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate
else: # 'continuous'
i = np.where(mrec[1:] != mrec[:-1])[0] # points where x axis (recall) changes
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) # area under curve
return ap
def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False):
# Returns the IoU of box1 to box2. box1 is 4, box2 is nx4
box2 = box2.t()
# Get the coordinates of bounding boxes
if x1y1x2y2: # x1, y1, x2, y2 = box1
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
else: # transform from xywh to xyxy
b1_x1, b1_x2 = box1[0] - box1[2] / 2, box1[0] + box1[2] / 2
b1_y1, b1_y2 = box1[1] - box1[3] / 2, box1[1] + box1[3] / 2
b2_x1, b2_x2 = box2[0] - box2[2] / 2, box2[0] + box2[2] / 2
b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2
# Intersection area
inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \
(torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0)
# Union Area
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1
union = (w1 * h1 + 1e-16) + w2 * h2 - inter
iou = inter / union # iou
if GIoU or DIoU or CIoU:
cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width
ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height
if GIoU: # Generalized IoU https://arxiv.org/pdf/1902.09630.pdf
c_area = cw * ch + 1e-16 # convex area
return iou - (c_area - union) / c_area # GIoU
if DIoU or CIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1
# convex diagonal squared
c2 = cw ** 2 + ch ** 2 + 1e-16
# centerpoint distance squared
rho2 = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4 + ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
if DIoU:
return iou - rho2 / c2 # DIoU
elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47
v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = v / (1 - iou + v)
return iou - (rho2 / c2 + v * alpha) # CIoU
return iou
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.t())
area2 = box_area(box2.t())
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def wh_iou(wh1, wh2):
# Returns the nxm IoU matrix. wh1 is nx2, wh2 is mx2
wh1 = wh1[:, None] # [N,1,2]
wh2 = wh2[None] # [1,M,2]
inter = torch.min(wh1, wh2).prod(2) # [N,M]
return inter / (wh1.prod(2) + wh2.prod(2) - inter) # iou = inter / (area1 + area2 - inter)
class FocalLoss(nn.Module):
# Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5)
def __init__(self, loss_fcn, gamma=1.5, alpha=0.25):
super(FocalLoss, self).__init__()
self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss()
self.gamma = gamma
self.alpha = alpha
self.reduction = loss_fcn.reduction
self.loss_fcn.reduction = 'none' # required to apply FL to each element
def forward(self, pred, true):
loss = self.loss_fcn(pred, true)
# p_t = torch.exp(-loss)
# loss *= self.alpha * (1.000001 - p_t) ** self.gamma # non-zero power for gradient stability
# TF implementation https://github.com/tensorflow/addons/blob/v0.7.1/tensorflow_addons/losses/focal_loss.py
pred_prob = torch.sigmoid(pred) # prob from logits
p_t = true * pred_prob + (1 - true) * (1 - pred_prob)
alpha_factor = true * self.alpha + (1 - true) * (1 - self.alpha)
modulating_factor = (1.0 - p_t) ** self.gamma
loss *= alpha_factor * modulating_factor
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
else: # 'none'
return loss
def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#issuecomment-598028441
# return positive, negative label smoothing BCE targets
return 1.0 - 0.5 * eps, 0.5 * eps
def compute_loss(p, targets, model): # predictions, targets, model
ft = torch.cuda.FloatTensor if p[0].is_cuda else torch.Tensor
lcls, lbox, lobj = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(p, targets, model)
h = model.hyp # hyperparameters
red = 'mean' # Loss reduction (sum or mean)
# Define criteria
BCEcls = nn.BCEWithLogitsLoss(pos_weight=ft([h['cls_pw']]), reduction=red)
BCEobj = nn.BCEWithLogitsLoss(pos_weight=ft([h['obj_pw']]), reduction=red)
# class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3
cp, cn = smooth_BCE(eps=0.0)
# focal loss
g = h['fl_gamma'] # focal loss gamma
if g > 0:
BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)
# Compute losses
np, ng = 0, 0 # number grid points, targets
for i, pi in enumerate(p): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
tobj = torch.zeros_like(pi[..., 0]) # target obj
np += tobj.numel()
# Compute losses
nb = len(b)
if nb: # number of targets
ng += nb
ps = pi[b, a, gj, gi] # prediction subset corresponding to targets
# ps[:, 2:4] = torch.sigmoid(ps[:, 2:4]) # wh power loss (uncomment)
# GIoU
pxy = torch.sigmoid(ps[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
pwh = torch.exp(ps[:, 2:4]).clamp(max=1E3) * anchor_vec[i]
pbox = torch.cat((pxy, pwh), 1) # predicted box
giou = bbox_iou(pbox.t(), tbox[i], x1y1x2y2=False, GIoU=True) # giou computation
lbox += (1.0 - giou).sum() if red == 'sum' else (1.0 - giou).mean() # giou loss
tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) # giou ratio
if model.nc > 1: # cls loss (only if multiple classes)
t = torch.full_like(ps[:, 5:], cn) # targets
t[range(nb), tcls[i]] = cp
lcls += BCEcls(ps[:, 5:], t) # BCE
# lcls += CE(ps[:, 5:], tcls[i]) # CE
# Append targets to text file
# with open('targets.txt', 'a') as file:
# [file.write('%11.5g ' * 4 % tuple(x) + '\n') for x in torch.cat((txy[i], twh[i]), 1)]
lobj += BCEobj(pi[..., 4], tobj) # obj loss
lbox *= h['giou']
lobj *= h['obj']
lcls *= h['cls']
if red == 'sum':
bs = tobj.shape[0] # batch size
lobj *= 3 / (6300 * bs) * 2 # 3 / np * 2
if ng:
lcls *= 3 / ng / model.nc
lbox *= 3 / ng
loss = lbox + lobj + lcls
return loss, torch.cat((lbox, lobj, lcls, loss)).detach()
def compute_loss_ssd(cls_logits, bbox_pred, gt_labels, gt_boxes):
"""Compute classification loss and smooth l1 loss.
Args:
confidence (batch_size, num_priors, num_classes): class predictions.
predicted_locations (batch_size, num_priors, 4): predicted locations.
labels (batch_size, num_priors): real labels of all the priors.
gt_locations (batch_size, num_priors, 4): real boxes corresponding all the priors.
"""
num_classes = cls_logits.size(2)
with torch.no_grad():
# derived from cross_entropy=sum(log(p))
loss = -F.log_softmax(cls_logits, dim=2)[:, :, 0]
mask = box_utils.hard_negative_mining(loss, gt_labels, 3)
confidence = cls_logits[mask, :]
classification_loss = F.cross_entropy(confidence.view(-1, num_classes), gt_labels[mask], reduction='sum')
pos_mask = gt_labels > 0
predicted_locations = bbox_pred[pos_mask, :].view(-1, 4)
gt_locations = gt_boxes[pos_mask, :].view(-1, 4)
smooth_l1_loss = F.smooth_l1_loss(predicted_locations, gt_locations, reduction='sum')
num_pos = gt_locations.size(0)
return smooth_l1_loss / num_pos, classification_loss / num_pos
def compute_lost_KD(output_s, output_t, num_classes, batch_size):
T = 3.0
Lambda_ST = 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
output_s = torch.cat([i.view(-1, num_classes + 5) for i in output_s])
output_t = torch.cat([i.view(-1, num_classes + 5) for i in output_t])
loss_st = criterion_st(nn.functional.log_softmax(output_s / T, dim=1),
nn.functional.softmax(output_t / T, dim=1)) * (T * T) / batch_size
return loss_st * Lambda_ST
def compute_lost_KD2(model, targets, output_s, output_t):
reg_m = 0.0
T = 3.0
Lambda_cls, Lambda_box = 0.0001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox = ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
reg_ratio, reg_num, reg_nb = 0, 0, 0
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis_s = (psbox - tbox[i]).pow(2).sum(1)
l2_dis_s_m = l2_dis_s + reg_m
l2_dis_t = (ptbox - tbox[i]).pow(2).sum(1)
l2_num = l2_dis_s_m > l2_dis_t
lbox += l2_dis_s[l2_num].sum()
reg_num += l2_num.sum().item()
reg_nb += nb
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
if reg_nb:
reg_ratio = reg_num / reg_nb
return lcls * Lambda_cls + lbox * Lambda_box, reg_ratio
def compute_lost_KD3(model, targets, output_s, output_t):
T = 3.0
Lambda_cls, Lambda_box = 0.0001, 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox = ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
return lcls * Lambda_cls + lbox * Lambda_box
def at(x):
return F.normalize(x.pow(2).mean(1).view(x.size(0),-1))
def compute_lost_AT(model, targets, output_s, output_t, feature_s, feature_t, batch_size):
T = 3.0
beta = 10000
all_pair_layer_attention_loss = 0
# attention layers
at_layer = [7,16,37,58,78,85,92,99,106]
# feature layer attention loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(at_layer)):
teacher_attention = at(feature_t[at_layer[i]])
student_attention = at(feature_s[at_layer[i]])
single_pair_at_loss = (teacher_attention - student_attention).pow(2).mean()
all_pair_layer_attention_loss = all_pair_layer_attention_loss + single_pair_at_loss
return beta * all_pair_layer_attention_loss
# attention_loss = compute_lost_group_AT(model, targets, pred, output_t, feature_s, feature_t, imgs.size(0))
def compute_lost_group_AT(model, targets, output_s, output_t, feature_s, feature_t, batch_size):
all_pair_layer_attention_loss = 0
# attention layers
at_layer = [7,16,37,58,78,85,92,99,106]
group1 = [7,16,37,58]
group2 = [78, 106]
group3 = [85, 92, 99]
beta1 = 1000000
beta2 = 1000
beta3 = 10000
# feature layer attention loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(at_layer)):
teacher_attention = at(feature_t[at_layer[i]])
student_attention = at(feature_s[at_layer[i]])
single_pair_at_loss = (teacher_attention - student_attention).pow(2).mean()
if (at_layer[i] in group1):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta1 * single_pair_at_loss
if (at_layer[i] in group2):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta2 * single_pair_at_loss
if (at_layer[i] in group3):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta3 * single_pair_at_loss
return all_pair_layer_attention_loss
def compute_lost_group_AT_KD(model, targets, output_s, output_t, feature_s, feature_t, batch_size):
T = 3.0
# Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001
Lambda_cls, Lambda_box, Lambda_feature = 0.01, 0.01, 0.08
criterion_st = torch.nn.KLDivLoss(reduction='sum')
criterion_stf = torch.nn.KLDivLoss(reduction='mean')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
# lbox loss
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# attention loss
all_pair_layer_attention_loss = 0
# attention layers
at_layer = [7,16,37,58,78,85,92,99,106]
group1 = [7,16,37,58]
group2 = [78, 106]
group3 = [85, 92, 99]
beta1 = 1000000
beta2 = 1000
beta3 = 10000
# feature layer attention loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(at_layer)):
teacher_attention = at(feature_t[at_layer[i]])
student_attention = at(feature_s[at_layer[i]])
single_pair_at_loss = (teacher_attention - student_attention).pow(2).mean()
if (at_layer[i] in group1):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta1 * single_pair_at_loss
if (at_layer[i] in group2):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta2 * single_pair_at_loss
if (at_layer[i] in group3):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta3 * single_pair_at_loss
return lcls * Lambda_cls + lbox * Lambda_box + all_pair_layer_attention_loss
def compute_lost_fine_grained_group_AT_KD(model, targets, output_s, output_t, feature_s, feature_t,batch_size, img_size):
T = 3.0
# Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001
Lambda_cls, Lambda_box, Lambda_feature = 0.01, 0.01, 0.08
criterion_st = torch.nn.KLDivLoss(reduction='sum')
criterion_stf = torch.nn.KLDivLoss(reduction='mean')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
# lbox loss
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# attention layers
at_layer = [7,16,37,58,78,85,92,99,106]
group1 = [7,16,37,58]
group2 = [78, 106]
group3 = [85, 92, 99]
beta1 = 1000000
beta2 = 1000
beta3 = 10000
# attention loss
all_pair_layer_attention_loss = 0
merge = v4_indices_merge(indices)
# feature layer attention loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(at_layer)):
single_feature_t = feature_t[at_layer[i]]
single_feature_s = feature_t[at_layer[i]]
single_feature_t = single_feature_t.pow(2).mean(1)
single_feature_s = single_feature_s.pow(2).mean(1)
mask = fine_grained_imitation_feature_mask_v4(single_feature_s, single_feature_t, merge, img_size)
mask = mask.to(targets.device)
fine_grained_t = (single_feature_t * mask).view(batch_size,-1)
fine_grained_s = (single_feature_s * mask).view(batch_size,-1)
teacher_attention = F.normalize(fine_grained_t)
student_attention = F.normalize(fine_grained_s)
single_pair_at_loss = (teacher_attention - student_attention).pow(2).mean()
if (at_layer[i] in group1):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta1 * single_pair_at_loss
if (at_layer[i] in group2):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta2 * single_pair_at_loss
if (at_layer[i] in group3):
all_pair_layer_attention_loss = all_pair_layer_attention_loss + beta3 * single_pair_at_loss
return lcls * Lambda_cls + lbox * Lambda_box + all_pair_layer_attention_loss
def compute_lost_KD4(model, targets, output_s, output_t, feature_s, feature_t, batch_size):
T = 3.0
# Lambda_cls, Lambda_box, Lambda_feature = 0.001, 0.001, 0.001
Lambda_cls, Lambda_box, Lambda_feature = 0.01, 0.01, 0.08
criterion_st = torch.nn.KLDivLoss(reduction='sum')
criterion_stf = torch.nn.KLDivLoss(reduction='mean')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# feature loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
for i in range(len(feature_t)):
feature_t[i] = feature_t[i].pow(2).sum(1)
# feature_t[i] = feature_t[i].abs().sum(1)
feature_s[i] = feature_s[i].pow(2).sum(1)
# feature_s[i] = feature_s[i].abs().sum(1)
lfeature += criterion_stf(nn.functional.log_softmax(feature_s[i] / T),
nn.functional.softmax(feature_t[i] / T)) * (T * T) / batch_size
return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature
def indices_merge(indices):
indices_merge = []
# print("\n ******start indices func \n")
for i in range(len(indices)):
temp = list(indices[i])
#print(" \n before * (2** (5 - i ), temp are :")
#print(temp)
temp[2] = temp[2] * (2 ** (5 - i))
temp[3] = temp[3] * (2 ** (5 - i))
#print(" \n after * (2** (5 - i ),temp is :")
#print(temp)
# print("\n ***,temp[0] to temp[3]:", temp[0], temp[1], temp[2],temp[3])
indices_merge.append(temp)
#print("\n **** Notice ,this is we want : after indices_merge = ******\n")
#print(indices_merge)
#print("\n function indices_merge end****** \n")
return indices_merge
def v4_indices_merge(indices):
indices_merge = []
# print("\n ******start indices func \n")
for i in range(len(indices)):
temp = list(indices[i])
#print(" \n before * (2** (5 - i ), temp are :")
#print(temp)
temp[2] = temp[2] * (2 ** (3 + i))
temp[3] = temp[3] * (2 ** (3 + i))
#print(" \n after * (2** (5 - i ),temp is :")
#print(temp)
# print("\n ***,temp[0] to temp[3]:", temp[0], temp[1], temp[2],temp[3])
indices_merge.append(temp)
#print("\n **** Notice ,this is we want : after indices_merge = ******\n")
#print(indices_merge)
# print("\n function indices_merge end****** \n")
return indices_merge
def fine_grained_imitation_feature_mask(feature_s, feature_t, indices, img_size):
if feature_t.size() != feature_s.size():
print("feature mismatch!")
exit()
B, Gj, Gi = torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda()
feature_size = feature_s.size()[1]
scale = img_size / feature_size
'''
print("\n fine_grained_imiation_feature_mask fun start ****\n")
print('****0000***')
print(feature_size, scale )
print(indices.__sizeof__(), len(indices))
'''
for j in range(len(indices)):
if 2 ** (5 - j) < scale:
break
b, _, gj, gi = indices[j] # image, gridy, gridx
'''
print("***1111****")
print("the following is indices[j]***\n",indices[j])
print("\n**** the b,gj, gi****\n ",b, gj, gi)
'''
gj, gi = (gj / scale).long(), (gi / scale).long()
# print("the gj.size()[0]",gj.size()[0])
for i in range(gj.size()[0]):
if 2 ** (5 - j) == scale:
break
b_temp = (torch.ones(int(2 ** (5 - j) / scale - 1)).cuda() * b[i]).long().cuda()
gj_temp = torch.arange(int(gj[i].item()) + 1, int(gj[i].item() + 2 ** (5 - j) / scale)).cuda()
gi_temp = torch.arange(int(gi[i].item()) + 1, int(gi[i].item() + 2 ** (5 - j) / scale)).cuda()
b = torch.cat((b, b_temp))
gj = torch.cat((gj, gj_temp))
gi = torch.cat((gi, gi_temp))
'''
print("****-----*****")
print(B.size(),Gj.size(),Gi.size())
print(b.size(), gj.size(), gi.size())
print("******5555*****")
'''
B = torch.cat((B, b))
Gj = torch.cat((Gj, gj))
Gi = torch.cat((Gi, gi))
'''
print(" \n the following is concat B", B.size(), B)
print(" \n the following is concat Gj", Gj.size(),Gj)
print(" \n the following is concat Gi", Gi.size(), Gi)
print(" this is %d th times, combine B, Gj, Gi"%i, [B, Gj, Gi],[B, Gj, Gi].__sizeof__())
'''
mask = torch.zeros(feature_s.size())
mask[B, Gj, Gi] = 1
# print("\n fine_grained_imiation_feature_mask fun ending ****\n")
return mask
def fine_grained_imitation_feature_mask_v4(feature_s, feature_t, indices, img_size):
if feature_t.size() != feature_s.size():
print("feature mismatch!")
exit()
B, Gj, Gi = torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda(), torch.Tensor(0).long().cuda()
feature_size = feature_s.size()[1]
scale = img_size / feature_size
'''
print("\n fine_grained_imiation_feature_mask fun start ****\n")
print('****0000***')
print(feature_size, scale )
print(indices.__sizeof__(), len(indices))
'''
for j in range(len(indices)):
if 2 ** (3 + j) < scale:
break
b, _, gj, gi = indices[j] # image, gridy, gridx
'''
print("***1111****")
print("the following is indices[j]***\n",indices[j])
print("\n**** the b,gj, gi****\n ",b, gj, gi)
'''
gj, gi = (gj / scale).long(), (gi / scale).long()
# print("the gj.size()[0]",gj.size()[0])
for i in range(gj.size()[0]):
if 2 ** (3 + j) == scale:
break
b_temp = (torch.ones(int(2 ** (3 + j) / scale - 1)).cuda() * b[i]).long().cuda()
gj_temp = torch.arange(int(gj[i].item()) + 1, int(gj[i].item() + 2 ** (3 + j) / scale)).cuda()
gi_temp = torch.arange(int(gi[i].item()) + 1, int(gi[i].item() + 2 ** (3 + j) / scale)).cuda()
b = torch.cat((b, b_temp))
gj = torch.cat((gj, gj_temp))
gi = torch.cat((gi, gi_temp))
'''
print("****-----*****")
print(B.size(),Gj.size(),Gi.size())
print(b.size(), gj.size(), gi.size())
print("******5555*****")
'''
B = torch.cat((B, b))
Gj = torch.cat((Gj, gj))
Gi = torch.cat((Gi, gi))
'''
print(" \n the following is concat B", B.size(), B)
print(" \n the following is concat Gj", Gj.size(),Gj)
print(" \n the following is concat Gi", Gi.size(), Gi)
print(" this is %d th times, combine B, Gj, Gi"%i, [B, Gj, Gi],[B, Gj, Gi].__sizeof__())
'''
mask = torch.zeros(feature_s.size())
mask[B, Gj, Gi] = 1
# print("\n fine_grained_imiation_feature_mask fun ending ****\n")
return mask
def compute_lost_KD5(model, targets, output_s, output_t, feature_s, feature_t, batch_size, img_size):
T = 3.0
Lambda_cls, Lambda_box, Lambda_feature = 0.1, 0.001, 0.1
criterion_st = torch.nn.KLDivLoss(reduction='mean')
ft = torch.cuda.FloatTensor if output_s[0].is_cuda else torch.Tensor
lcls, lbox, lfeature = ft([0]), ft([0]), ft([0])
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
for i, (ps, pt) in enumerate(zip(output_s, output_t)): # layer index, layer predictions
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
nb = len(b)
if nb: # number of targets
pss = ps[b, a, gj, gi] # prediction subset corresponding to targets
pts = pt[b, a, gj, gi]
psxy = torch.sigmoid(pss[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
psbox = torch.cat((psxy, torch.exp(pss[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
ptxy = torch.sigmoid(pts[:, 0:2]) # pxy = pxy * s - (s - 1) / 2, s = 1.5 (scale_xy)
ptbox = torch.cat((ptxy, torch.exp(pts[:, 2:4]) * anchor_vec[i]), 1).view(-1, 4) # predicted box
l2_dis = (psbox - ptbox).pow(2).sum(1)
lbox += l2_dis.sum()
# cls loss
output_s_i = ps[..., 4:].view(-1, model.nc + 1)
output_t_i = pt[..., 4:].view(-1, model.nc + 1)
lcls += criterion_st(nn.functional.log_softmax(output_s_i / T, dim=1),
nn.functional.softmax(output_t_i / T, dim=1)) * (T * T) / ps.size(0)
# feature loss
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
# print("\n****** ouput indices :")
# print(indices)
#print("\n *******go to the indices_merge function ")
# merge = indices_merge(indices)
merge = v4_indices_merge(indices)
#print("\n****after merge:", merge)
#print("\n ****merge = %s \n ****----- \n the following will be a bug \n "%(merge) )
for i in range(len(feature_t)):
# feature_t[i] = feature_t[i].pow(2).sum(1)
feature_t[i] = feature_t[i].abs().sum(1)
# feature_s[i] = feature_s[i].pow(2).sum(1)
feature_s[i] = feature_s[i].abs().sum(1)
# print("\n go to fine_grained")
mask = fine_grained_imitation_feature_mask_v4(feature_s[i], feature_t[i], merge, img_size)
# mask = fine_grained_imitation_feature_mask(feature_s[i], feature_t[i], indices, img_size)
mask = mask.to(targets.device)
feature_t[i] = (feature_t[i] * mask).view(batch_size, -1)
feature_s[i] = (feature_s[i] * mask).view(batch_size, -1)
lfeature += criterion_st(nn.functional.log_softmax(feature_s[i] / T, dim=1),
nn.functional.softmax(feature_t[i] / T, dim=1)) * (T * T) / batch_size
# print(lcls.data)
# print(lbox.data)
# print(lfeature.data)
return lcls * Lambda_cls + lbox * Lambda_box + lfeature * Lambda_feature
def fine_grained_imitation_mask(feature_s, feature_t, indices):
if len(feature_t) != len(feature_s):
print("feature mismatch!")
exit()
mask = []
for i in range(len(feature_t)):
temp = torch.zeros(feature_s[i].size())
b, a, gj, gi = indices[i] # image, anchor, gridy, gridx
temp[b, a, gj, gi] = 1
mask.append(temp)
return mask
# FineGrainedmask
def compute_lost_KD6(model, targets, output_s, output_t, batch_size):
T = 3.0
Lambda_feature = 0.001
criterion_st = torch.nn.KLDivLoss(reduction='sum')
feature_s = list(output_s)
feature_t = list(output_t)
tcls, tbox, indices, anchor_vec = build_targets(output_s, targets, model)
mask = fine_grained_imitation_mask(feature_s, feature_t, indices)
test = indices_merge(indices)
# feature loss
for i in range(len(mask)):
mask[i] = mask[i].to(targets.device)
feature_t[i] = feature_t[i] * mask[i]
feature_s[i] = feature_s[i] * mask[i]
feature_s = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_s])
feature_t = torch.cat([i.view(-1, 3 * (model.nc + 5)) for i in feature_t])
lfeature = criterion_st(nn.functional.log_softmax(feature_s / T, dim=1),
nn.functional.softmax(feature_t / T, dim=1)) * (T * T) / batch_size
return lfeature * Lambda_feature
def build_targets(p, targets, model):
# targets = [image, class, x, y, w, h]
#print("\n ***** build_targets fun starting ****\n")
#print(" p, targets ,model")
# print(np.array(p).shape, np.array(targets.cpu()).shape)
nt = targets.shape[0]
tcls, tbox, indices, av = [], [], [], []
reject, use_all_anchors = True, True
gain = torch.ones(6, device=targets.device) # normalized to gridspace gain
# m = list(model.modules())[-1]
# for i in range(m.nl):
# anchors = m.anchors[i]
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for i, j in enumerate(model.yolo_layers):
# print("*** the i=%d, *****\n"%i)
#print("*** the j=%d *****\n"%j)
# get number of grid points and anchor vec for this yolo layer
anchors = model.module.module_list[j].anchor_vec if multi_gpu else model.module_list[j].anchor_vec
# iou of targets-anchors
gain[2:] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain
t, a = targets * gain, []
gwh = t[:, 4:6]
if nt:
iou = wh_iou(anchors, gwh) # iou(3,n) = wh_iou(anchors(3,2), gwh(n,2))
if use_all_anchors:
na = anchors.shape[0] # number of anchors
a = torch.arange(na).view(-1, 1).repeat(1, nt).view(-1)
t = t.repeat(na, 1)
else: # use best anchor only
iou, a = iou.max(0) # best iou and anchor
# reject anchors below iou_thres (OPTIONAL, increases P, lowers R)
if reject:
j = iou.view(-1) > model.hyp['iou_t'] # iou threshold hyperparameter
t, a = t[j], a[j]
# Indices
b, c = t[:, :2].long().t() # target image, class
gxy = t[:, 2:4] # grid x, y
gwh = t[:, 4:6] # grid w, h
gi, gj = gxy.long().t() # grid x, y indices
'''
print("\n the following is t:",t.size(), )
print("\n *** output gxy :", gxy.size(),)
print("\n ***** output b,a ",b.size(),a.size(), )
print("\n *** output gj,gi",gj.size(), gi.size,)
print("before append, the indices =",indices)
if i == 0:
print("\n ** gxy, t()", gxy.size(), t.size(),gxy, t)
print("\n ** gxy.long().t",gxy.long().t())
print("\n *** b, a, gj, gi ***",b,a,gj,gi)
'''
indices.append((b, a, gj, gi))
#print("\n after the indices.append, indices :",indices)
# Box
gxy -= gxy.floor() # xy
tbox.append(torch.cat((gxy, gwh), 1)) # xywh (grids)
av.append(anchors[a]) # anchor vec
# Class
tcls.append(c)
if c.shape[0]: # if any targets
assert c.max() < model.nc, 'Model accepts %g classes labeled from 0-%g, however you labelled a class %g. ' \
'See https://github.com/ultralytics/yolov3/wiki/Train-Custom-Data' % (
model.nc, model.nc - 1, c.max())
# print("\n ***** build_targets fun ending ****\n")
return tcls, tbox, indices, av
def non_max_suppression(prediction, conf_thres=0.1, iou_thres=0.6, multi_label=True, classes=None, agnostic=False):
"""
Performs Non-Maximum Suppression on inference results
Returns detections with shape:
nx6 (x1, y1, x2, y2, conf, cls)
"""
# Box constraints
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
method = 'merge'
nc = prediction[0].shape[1] - 5 # number of classes
multi_label &= nc > 1 # multiple labels per box
output = [None] * len(prediction)
for xi, x in enumerate(prediction): # image index, image inference
# Apply conf constraint
x = x[x[:, 4] > conf_thres]
# Apply width-height constraint
x = x[((x[:, 2:4] > min_wh) & (x[:, 2:4] < max_wh)).all(1)]
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
x[..., 5:] *= x[..., 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero().t()
x = torch.cat((box[i], x[i, j + 5].unsqueeze(1), j.float().unsqueeze(1)), 1)
else: # best class only
conf, j = x[:, 5:].max(1)
x = torch.cat((box, conf.unsqueeze(1), j.float().unsqueeze(1)), 1)
# Filter by class
if classes:
x = x[(j.view(-1, 1) == torch.tensor(classes, device=j.device)).any(1)]
# Apply finite constraint
if not torch.isfinite(x).all():
x = x[torch.isfinite(x).all(1)]
# If none remain process next image
n = x.shape[0] # number of boxes
if not n:
continue
# Sort by confidence
# if method == 'fast_batch':
# x = x[x[:, 4].argsort(descending=True)]
# Batched NMS
c = x[:, 5] * 0 if agnostic else x[:, 5] # classes
boxes, scores = x[:, :4].clone() + c.view(-1, 1) * max_wh, x[:, 4] # boxes (offset by class), scores
if method == 'merge': # Merge NMS (boxes merged using weighted mean)
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
if 1 < n < 3E3: # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
try:
# weights = (box_iou(boxes, boxes).tril_() > iou_thres) * scores.view(-1, 1) # box weights
# weights /= weights.sum(0) # normalize
# x[:, :4] = torch.mm(weights.T, x[:, :4])
weights = (box_iou(boxes[i], boxes) > iou_thres) * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
except: # possible CUDA error https://github.com/ultralytics/yolov3/issues/1139
pass
elif method == 'vision':
i = torchvision.ops.boxes.nms(boxes, scores, iou_thres)
elif method == 'fast': # FastNMS from https://github.com/dbolya/yolact
iou = box_iou(boxes, boxes).triu_(diagonal=1) # upper triangular iou matrix
i = iou.max(0)[0] < iou_thres
output[xi] = x[i]
return output
def get_yolo_layers(model):
bool_vec = [x['type'] == 'yolo' for x in model.module_defs]
return [i for i, x in enumerate(bool_vec) if x] # [82, 94, 106] for yolov3
def print_model_biases(model):
# prints the bias neurons preceding each yolo layer
print('\nModel Bias Summary: %8s%18s%18s%18s' % ('layer', 'regression', 'objectness', 'classification'))
try:
multi_gpu = type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
for l in model.yolo_layers: # print pretrained biases
if multi_gpu:
na = model.module.module_list[l].na # number of anchors
b = model.module.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
else:
na = model.module_list[l].na
b = model.module_list[l - 1][0].bias.view(na, -1) # bias 3x85
print(' ' * 20 + '%8g %18s%18s%18s' % (l, '%5.2f+/-%-5.2f' % (b[:, :4].mean(), b[:, :4].std()),
'%5.2f+/-%-5.2f' % (b[:, 4].mean(), b[:, 4].std()),
'%5.2f+/-%-5.2f' % (b[:, 5:].mean(), b[:, 5:].std())))
except:
pass
def strip_optimizer(f='weights/last.pt'): # from utils.utils import *; strip_optimizer()
# Strip optimizer from *.pt files for lighter files (reduced by 2/3 size)
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
torch.save(x, f)
def create_backbone(f='weights/last.pt'): # from utils.utils import *; create_backbone()
# create a backbone from a *.pt file
x = torch.load(f, map_location=torch.device('cpu'))
x['optimizer'] = None
x['training_results'] = None
x['epoch'] = -1
for p in x['model'].values():
try:
p.requires_grad = True
except:
pass
torch.save(x, 'weights/backbone.pt')
def coco_class_count(path='../coco/labels/train2014/'):
# Histogram of occurrences per class
nc = 80 # number classes
x = np.zeros(nc, dtype='int32')
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
x += np.bincount(labels[:, 0].astype('int32'), minlength=nc)
print(i, len(files))
def coco_only_people(path='../coco/labels/train2017/'): # from utils.utils import *; coco_only_people()
# Find images with only people
files = sorted(glob.glob('%s/*.*' % path))
for i, file in enumerate(files):
labels = np.loadtxt(file, dtype=np.float32).reshape(-1, 5)
if all(labels[:, 0] == 0):
print(labels.shape[0], file)
def select_best_evolve(path='evolve*.txt'): # from utils.utils import *; select_best_evolve()
# Find best evolved mutation
for file in sorted(glob.glob(path)):
x = np.loadtxt(file, dtype=np.float32, ndmin=2)
print(file, x[fitness(x).argmax()])
def crop_images_random(path='../images/', scale=0.50): # from utils.utils import *; crop_images_random()
# crops images into random squares up to scale fraction
# WARNING: overwrites images!
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
img = cv2.imread(file) # BGR
if img is not None:
h, w = img.shape[:2]
# create random mask
a = 30 # minimum size (pixels)
mask_h = random.randint(a, int(max(a, h * scale))) # mask height
mask_w = mask_h # mask width
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
cv2.imwrite(file, img[ymin:ymax, xmin:xmax])
def coco_single_class_labels(path='../coco/labels/train2014/', label_class=43):
# Makes single-class coco datasets. from utils.utils import *; coco_single_class_labels()
if os.path.exists('new/'):
shutil.rmtree('new/') # delete output folder
os.makedirs('new/') # make new output folder
os.makedirs('new/labels/')
os.makedirs('new/images/')
for file in tqdm(sorted(glob.glob('%s/*.*' % path))):
with open(file, 'r') as f:
labels = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
i = labels[:, 0] == label_class
if any(i):
img_file = file.replace('labels', 'images').replace('txt', 'jpg')
labels[:, 0] = 0 # reset class to 0
with open('new/images.txt', 'a') as f: # add image to dataset list
f.write(img_file + '\n')
with open('new/labels/' + Path(file).name, 'a') as f: # write label
for l in labels[i]:
f.write('%g %.6f %.6f %.6f %.6f\n' % tuple(l))
shutil.copyfile(src=img_file, dst='new/images/' + Path(file).name.replace('txt', 'jpg')) # copy images
def kmean_anchors(path='./data/coco64.txt', n=9, img_size=(320, 1024), thr=0.20, gen=1000):
# Creates kmeans anchors for use in *.cfg files: from utils.utils import *; _ = kmean_anchors()
# n: number of anchors
# img_size: (min, max) image size used for multi-scale training (can be same values)
# thr: IoU threshold hyperparameter used for training (0.0 - 1.0)
# gen: generations to evolve anchors using genetic algorithm
from utils.datasets import LoadImagesAndLabels
def print_results(k):
k = k[np.argsort(k.prod(1))] # sort small to large
iou = wh_iou(wh, torch.Tensor(k))
max_iou = iou.max(1)[0]
bpr, aat = (max_iou > thr).float().mean(), (iou > thr).float().mean() * n # best possible recall, anch > thr
print('%.2f iou_thr: %.3f best possible recall, %.2f anchors > thr' % (thr, bpr, aat))
print('n=%g, img_size=%s, IoU_all=%.3f/%.3f-mean/best, IoU>thr=%.3f-mean: ' %
(n, img_size, iou.mean(), max_iou.mean(), iou[iou > thr].mean()), end='')
for i, x in enumerate(k):
print('%i,%i' % (round(x[0]), round(x[1])), end=', ' if i < len(k) - 1 else '\n') # use in *.cfg
return k
def fitness(k): # mutation fitness
iou = wh_iou(wh, torch.Tensor(k)) # iou
max_iou = iou.max(1)[0]
return (max_iou * (max_iou > thr).float()).mean() # product
# Get label wh
wh = []
dataset = LoadImagesAndLabels(path, augment=True, rect=True)
nr = 1 if img_size[0] == img_size[1] else 10 # number augmentation repetitions
for s, l in zip(dataset.shapes, dataset.labels):
wh.append(l[:, 3:5] * (s / s.max())) # image normalized to letterbox normalized wh
wh = np.concatenate(wh, 0).repeat(nr, axis=0) # augment 10x
wh *= np.random.uniform(img_size[0], img_size[1], size=(wh.shape[0], 1)) # normalized to pixels (multi-scale)
wh = wh[(wh > 2.0).all(1)] # remove below threshold boxes (< 2 pixels wh)
# Darknet yolov3.cfg anchors
use_darknet = False
if use_darknet and n == 9:
k = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45], [59, 119], [116, 90], [156, 198], [373, 326]])
else:
# Kmeans calculation
from scipy.cluster.vq import kmeans
print('Running kmeans for %g anchors on %g points...' % (n, len(wh)))
s = wh.std(0) # sigmas for whitening
k, dist = kmeans(wh / s, n, iter=30) # points, mean distance
k *= s
wh = torch.Tensor(wh)
k = print_results(k)
# # Plot
# k, d = [None] * 20, [None] * 20
# for i in tqdm(range(1, 21)):
# k[i-1], d[i-1] = kmeans(wh / s, i) # points, mean distance
# fig, ax = plt.subplots(1, 2, figsize=(14, 7))
# ax = ax.ravel()
# ax[0].plot(np.arange(1, 21), np.array(d) ** 2, marker='.')
# fig, ax = plt.subplots(1, 2, figsize=(14, 7)) # plot wh
# ax[0].hist(wh[wh[:, 0]<100, 0],400)
# ax[1].hist(wh[wh[:, 1]<100, 1],400)
# fig.tight_layout()
# fig.savefig('wh.png', dpi=200)
# Evolve
npr = np.random
f, sh, mp, s = fitness(k), k.shape, 0.9, 0.1 # fitness, generations, mutation prob, sigma
for _ in tqdm(range(gen), desc='Evolving anchors'):
v = np.ones(sh)
while (v == 1).all(): # mutate until a change occurs (prevent duplicates)
v = ((npr.random(sh) < mp) * npr.random() * npr.randn(*sh) * s + 1).clip(0.3, 3.0) # 98.6, 61.6
kg = (k.copy() * v).clip(min=2.0)
fg = fitness(kg)
if fg > f:
f, k = fg, kg.copy()
print_results(k)
k = print_results(k)
return k
def print_mutation(hyp, results, bucket=''):
# Print mutation results to evolve.txt (for use with train.py --evolve)
a = '%10s' * len(hyp) % tuple(hyp.keys()) # hyperparam keys
b = '%10.3g' * len(hyp) % tuple(hyp.values()) # hyperparam values
c = '%10.4g' * len(results) % results # results (P, R, mAP, F1, test_loss)
print('\n%s\n%s\nEvolved fitness: %s\n' % (a, b, c))
if bucket:
os.system('gsutil cp gs://%s/evolve.txt .' % bucket) # download evolve.txt
with open('evolve.txt', 'a') as f: # append result
f.write(c + b + '\n')
x = np.unique(np.loadtxt('evolve.txt', ndmin=2), axis=0) # load unique rows
np.savetxt('evolve.txt', x[np.argsort(-fitness(x))], '%10.3g') # save sort by fitness
if bucket:
os.system('gsutil cp evolve.txt gs://%s' % bucket) # upload evolve.txt
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def fitness(x):
# Returns fitness (for use with results.txt or evolve.txt)
w = [0.0, 0.00, 1, 0.00] # weights for [P, R, mAP, F1]@0.5 or [P, R, mAP@0.5, mAP@0.5:0.95]
return (x[:, :4] * w).sum(1)
def output_to_target(output, width, height):
"""
Convert a YOLO model output to target format
[batch_id, class_id, x, y, w, h, conf]
"""
if isinstance(output, torch.Tensor):
output = output.cpu().numpy()
targets = []
for i, o in enumerate(output):
if o is not None:
for pred in o:
box = pred[:4]
w = (box[2] - box[0]) / width
h = (box[3] - box[1]) / height
x = box[0] / width + w / 2
y = box[1] / height + h / 2
conf = pred[4]
cls = int(pred[5])
targets.append([i, cls, x, y, w, h, conf])
return np.array(targets)
# Plotting functions ---------------------------------------------------------------------------------------------------
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
# Plots one bounding box on image img
tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness
color = color or [random.randint(0, 255) for _ in range(3)]
c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
cv2.rectangle(img, c1, c2, color, thickness=tl)
if label:
tf = max(tl - 1, 1) # font thickness
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
cv2.rectangle(img, c1, c2, color, -1) # filled
cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA)
def plot_wh_methods(): # from utils.utils import *; plot_wh_methods()
# Compares the two methods for width-height anchor multiplication
# https://github.com/ultralytics/yolov3/issues/168
x = np.arange(-4.0, 4.0, .1)
ya = np.exp(x)
yb = torch.sigmoid(torch.from_numpy(x)).numpy() * 2
fig = plt.figure(figsize=(6, 3), dpi=150)
plt.plot(x, ya, '.-', label='yolo method')
plt.plot(x, yb ** 2, '.-', label='^2 power method')
plt.plot(x, yb ** 2.5, '.-', label='^2.5 power method')
plt.xlim(left=-4, right=4)
plt.ylim(bottom=0, top=6)
plt.xlabel('input')
plt.ylabel('output')
plt.legend()
fig.tight_layout()
fig.savefig('comparison.png', dpi=200)
def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):
tl = 3 # line thickness
tf = max(tl - 1, 1) # font thickness
if isinstance(images, torch.Tensor):
images = images.cpu().numpy()
if isinstance(targets, torch.Tensor):
targets = targets.cpu().numpy()
# un-normalise
if np.max(images[0]) <= 1:
images *= 255
bs, _, h, w = images.shape # batch size, _, height, width
bs = min(bs, max_subplots) # limit plot images
ns = np.ceil(bs ** 0.5) # number of subplots (square)
# Check if we should resize
scale_factor = max_size / max(h, w)
if scale_factor < 1:
h = math.ceil(scale_factor * h)
w = math.ceil(scale_factor * w)
# Empty array for output
mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8)
# Fix class - colour map
prop_cycle = plt.rcParams['axes.prop_cycle']
# https://stackoverflow.com/questions/51350872/python-from-color-name-to-rgb
hex2rgb = lambda h: tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
color_lut = [hex2rgb(h) for h in prop_cycle.by_key()['color']]
for i, img in enumerate(images):
if i == max_subplots: # if last batch has fewer images than we expect
break
block_x = int(w * (i // ns))
block_y = int(h * (i % ns))
img = img.transpose(1, 2, 0)
if scale_factor < 1:
img = cv2.resize(img, (w, h))
mosaic[block_y:block_y + h, block_x:block_x + w, :] = img
if len(targets) > 0:
image_targets = targets[targets[:, 0] == i]
boxes = xywh2xyxy(image_targets[:, 2:6]).T
classes = image_targets[:, 1].astype('int')
gt = image_targets.shape[1] == 6 # ground truth if no conf column
conf = None if gt else image_targets[:, 6] # check for confidence presence (gt vs pred)
boxes[[0, 2]] *= w
boxes[[0, 2]] += block_x
boxes[[1, 3]] *= h
boxes[[1, 3]] += block_y
for j, box in enumerate(boxes.T):
cls = int(classes[j])
color = color_lut[cls % len(color_lut)]
cls = names[cls] if names else cls
if gt or conf[j] > 0.3: # 0.3 conf thresh
label = '%s' % cls if gt else '%s %.1f' % (cls, conf[j])
plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)
# Draw image filename labels
if paths is not None:
label = os.path.basename(paths[i])[:40] # trim to 40 char
t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,
lineType=cv2.LINE_AA)
# Image border
cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)
if fname is not None:
cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB))
return mosaic
def plot_test_txt(): # from utils.utils import *; plot_test()
# Plot test.txt histograms
x = np.loadtxt('test.txt', dtype=np.float32)
box = xyxy2xywh(x[:, :4])
cx, cy = box[:, 0], box[:, 1]
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0)
ax.set_aspect('equal')
fig.tight_layout()
plt.savefig('hist2d.png', dpi=300)
fig, ax = plt.subplots(1, 2, figsize=(12, 6))
ax[0].hist(cx, bins=600)
ax[1].hist(cy, bins=600)
fig.tight_layout()
plt.savefig('hist1d.png', dpi=200)
def plot_targets_txt(): # from utils.utils import *; plot_targets_txt()
# Plot targets.txt histograms
x = np.loadtxt('targets.txt', dtype=np.float32).T
s = ['x targets', 'y targets', 'width targets', 'height targets']
fig, ax = plt.subplots(2, 2, figsize=(8, 8))
ax = ax.ravel()
for i in range(4):
ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std()))
ax[i].legend()
ax[i].set_title(s[i])
fig.tight_layout()
plt.savefig('targets.jpg', dpi=200)
def plot_evolution_results(hyp): # from utils.utils import *; plot_evolution_results(hyp)
# Plot hyperparameter evolution results in evolve.txt
x = np.loadtxt('evolve.txt', ndmin=2)
f = fitness(x)
weights = (f - f.min()) ** 2 # for weighted results
fig = plt.figure(figsize=(12, 10))
matplotlib.rc('font', **{'size': 8})
for i, (k, v) in enumerate(hyp.items()):
y = x[:, i + 7]
# mu = (y * weights).sum() / weights.sum() # best weighted result
mu = y[f.argmax()] # best single result
plt.subplot(4, 5, i + 1)
plt.plot(mu, f.max(), 'o', markersize=10)
plt.plot(y, f, '.')
plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters
print('%15s: %.3g' % (k, mu))
fig.tight_layout()
plt.savefig('evolve.png', dpi=200)
def plot_results_overlay(start=0, stop=0): # from utils.utils import *; plot_results_overlay()
# Plot training results files 'results*.txt', overlaying train and val losses
s = ['train', 'train', 'train', 'Precision', 'mAP@0.5', 'val', 'val', 'val', 'Recall', 'F1'] # legends
t = ['GIoU', 'Objectness', 'Classification', 'P-R', 'mAP-F1'] # titles
for f in sorted(glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')):
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
fig, ax = plt.subplots(1, 5, figsize=(14, 3.5))
ax = ax.ravel()
for i in range(5):
for j in [i, i + 5]:
y = results[j, x]
if i in [0, 1, 2]:
y[y == 0] = np.nan # dont show zero loss values
ax[i].plot(x, y, marker='.', label=s[j])
ax[i].set_title(t[i])
ax[i].legend()
ax[i].set_ylabel(f) if i == 0 else None # add filename
fig.tight_layout()
fig.savefig(f.replace('.txt', '.png'), dpi=200)
def plot_results(start=0, stop=0, bucket='', id=()): # from utils.utils import *; plot_results()
# Plot training 'results*.txt' as seen in https://github.com/ultralytics/yolov3#training
fig, ax = plt.subplots(2, 5, figsize=(12, 6))
ax = ax.ravel()
s = ['GIoU', 'Objectness', 'Classification', 'Precision', 'Recall',
'val GIoU', 'val Objectness', 'val Classification', 'mAP@0.5', 'F1']
if bucket:
os.system('rm -rf storage.googleapis.com')
files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]
else:
files = glob.glob('results*.txt') + glob.glob('../../Downloads/results*.txt')
for f in sorted(files):
try:
results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T
n = results.shape[1] # number of rows
x = range(start, min(stop, n) if stop else n)
for i in range(10):
y = results[i, x]
if i in [0, 1, 2, 5, 6, 7]:
y[y == 0] = np.nan # dont show zero loss values
# y /= y[0] # normalize
ax[i].plot(x, y, marker='.', label=Path(f).stem, linewidth=2, markersize=8)
ax[i].set_title(s[i])
if i in [5, 6, 7]: # share train and val loss y axes
ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])
except:
print('Warning: Plotting error for %s, skipping file' % f)
fig.tight_layout()
ax[1].legend()
fig.savefig('results.png', dpi=200)
| 40.503666 | 121 | 0.567349 |
a9ba9889d2475624b951348975f9f11cf552ca83 | 360 | py | Python | src/messages/client_test.py | kpbhole/CDN | 725f90c710bf6fd82efe015b387447b6465eacd9 | [
"MIT"
] | 3 | 2019-12-27T11:43:58.000Z | 2020-11-21T10:09:44.000Z | src/messages/client_test.py | kpbhole/CDN | 725f90c710bf6fd82efe015b387447b6465eacd9 | [
"MIT"
] | 1 | 2019-04-19T20:17:59.000Z | 2019-04-19T20:17:59.000Z | src/messages/client_test.py | kpbhole/CDN | 725f90c710bf6fd82efe015b387447b6465eacd9 | [
"MIT"
] | 3 | 2019-05-26T19:23:31.000Z | 2021-04-14T10:07:41.000Z | from dns_request_message import *
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 12345 # Reserve a port for your service.
s.connect((host, port))
msg = DNSRequestMessage(0, "www.google.com", "172.16.2.30", 12312)
msg.send(s) | 30 | 66 | 0.655556 |
cf8584a1e999b237eee7288113115bfa72db2f04 | 24,220 | py | Python | python/tvm/hybrid/parser.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | 3 | 2020-03-12T10:25:51.000Z | 2020-08-05T05:36:23.000Z | python/tvm/hybrid/parser.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | null | null | null | python/tvm/hybrid/parser.py | jheo4/incubator-tvm | c4c61cb766608fb2f0fd8c9facc480a43afed3f5 | [
"Apache-2.0"
] | 1 | 2018-10-19T18:11:41.000Z | 2018-10-19T18:11:41.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Hybrid Script Parser"""
import ast
import operator
import logging
import sys
import types
import numbers
from enum import Enum
from tvm.ir import Array, Range
import tvm.tir
import tvm.te._ffi_api
from tvm.tir import expr as _expr
from tvm.tir import stmt as _stmt
from tvm.tir import ir_pass as _ir_pass
from tvm.te.tensor import Tensor, Operation
from tvm.tir import all as _all
from tvm.tir import any as _any
from .util import _internal_assert
from . import calls
from . import util
from .preprocessor import determine_variable_usage
from .. import api as _api
def concat_list_to_block(lst):
"""Concatenate a list of Python IR nodes to HalideIR Block"""
if not lst:
return util.make_nop()
n = len(lst)
if n == 1:
return lst[0]
return _stmt.SeqStmt(lst)
def visit_list_to_block(visit, lst):
"""Visit and concatenate a list of Python IR nodes to HalideIR Block"""
lst = [visit(stmt) for stmt in lst if not util.is_docstring(stmt)]
lst = [stmt for stmt in lst if not _ir_pass.Equal(stmt, util.make_nop())]
if not lst:
return util.make_nop()
return concat_list_to_block(lst)
class Symbol(Enum):
"""Enumerates types in the symbol table"""
Callable = 0
Input = 1
OutputBuffer = 2
GlobalBuffer = 3
LocalBuffer = 4
SharedBuffer = 5
ConstVar = 6
BufferVar = 7
LoopVar = 8
ConstLoopVar = 9
ThreadBind = 10
def _floordiv(x, y):
if isinstance(x, _expr.ExprOp) or isinstance(y, _expr.ExprOp):
return tvm.tir.floordiv(x, y)
return operator.floordiv(x, y)
def _floormod(x, y):
if isinstance(x, _expr.ExprOp) or isinstance(y, _expr.ExprOp):
return tvm.tir.floormod(x, y)
return operator.mod(x, y)
class HybridParser(ast.NodeVisitor):
"""Python AST visitor pass which finally lowers it to HalideIR"""
_binop_maker = {
ast.Add : operator.add,
ast.Sub : operator.sub,
ast.Mult : operator.mul,
ast.Div : operator.div if sys.version_info[0] == 2 else operator.truediv,
ast.FloorDiv: _floordiv,
ast.Mod : _floormod,
ast.BitOr : operator.or_,
ast.BitAnd : operator.and_,
ast.BitXor : operator.xor,
ast.Gt : operator.gt,
ast.GtE : operator.ge,
ast.Lt : operator.lt,
ast.LtE : operator.le,
ast.Eq : operator.eq,
ast.NotEq : operator.ne,
ast.And : _all,
ast.Or : _any,
}
_unaryop_maker = {
ast.USub : operator.neg,
ast.Invert : operator.invert,
ast.Not : operator.not_
}
def __init__(self, args, usage, symbols, closure_vars, func_name=None):
"""
Parameters
----------
args: A list of tvm.placeholder or tvm.var
Provided by the user, the argument list of the function to be lowered.
usage: A dict of variables used in last in this function
Provided by last lower pass, which collects this information
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
func_name: str
The name of the function to be lowered; if not provided,
the compiler will use the name in the AST
"""
self.args = list(args)
self.usage = usage.copy()
self.symbols = {} # Symbol table
for k, v in symbols.items():
if isinstance(v, types.FunctionType):
self.add_symbol(k, Symbol.Callable, v)
self.closure_vars = closure_vars
self.binds = {} # Thread binds
self.device = 0 # Is it generating device
self.func_name = func_name # The name of the function to be lowered
self.outputs = [] # Output tensors' name
self.side_effect = set() # Tensors with side effects
self.parsed_body = None # The parsed HalideIR body
self.returned = False # If this function has a valid return
def add_symbol(self, key, ty, val): #pylint: disable=invalid-name
"""Add value to the symbol table context"""
if key in self.symbols.keys():
old = str(self.symbols[key])
new = str((ty, val))
_internal_assert(False,
"Name conflict in symbol table! [%s] %s -> %s" % (key, old, new))
self.symbols[key] = ty, val
if ty == Symbol.ThreadBind:
if val.var.name not in self.binds.keys():
self.binds[val.var.name] = val
return
val_ = self.binds[val.var.name]
_internal_assert(_ir_pass.Equal(val_.dom.extent, val.dom.extent),
"Thread extents should be uniform!")
self.symbols[key] = ty, val_
def wrap_up_realize(self, node, body):
"""Wrap up all the variables which will no longer be used"""
to_pop = []
for key, val in self.usage.items():
_, level, _ = val
if key not in self.symbols:
# don't realize the symbols that are never visited
continue
if level != node:
continue
_internal_assert(key in self.symbols.keys(), "Unknown symbol %s!" % key)
ty, entry = self.symbols[key] #pylint: disable=invalid-name
if ty in [Symbol.Input, Symbol.OutputBuffer]:
continue
if 'Buffer' in ty.name:
_buf = entry
_scope = 'global' if ty is Symbol.BufferVar else ty.name[:-6].lower()
to_pop.append(key)
else:
continue
if _scope == 'global':
body = self.wrap_up_binds(body)
_domain = [Range.make_by_min_extent(0, i) for i in _buf.shape]
_dtype = _buf.dtype
_true = _api.convert(True)
body = tvm.tir.Realize(_buf.op, 0, _dtype, _domain, _true, body)
body = tvm.tir.AttrStmt(_buf.op, 'realize_scope', _api.convert(_scope), body)
for elem in to_pop:
self.symbols.pop(elem)
return body
def wrap_up_binds(self, body):
for _, iter_var in self.binds.items():
ext = iter_var.dom.extent
body = tvm.tir.AttrStmt(iter_var, 'thread_extent', ext, body)
self.binds = {}
return body
#pylint: disable=invalid-name, missing-docstring
def visit_Module(self, node):
_internal_assert(len(node.body) == 1, \
"Only one-function source code will be fed to this parser!")
return self.visit(node.body[0])
def visit_FunctionDef(self, node):
_internal_assert(len(node.args.args) == len(self.args), \
"The number of arguments passed to the \
function should be the same as it is defined!")
if self.func_name is None:
self.func_name = node.name
for idx, arg in enumerate(node.args.args):
_attr = 'id' if sys.version_info[0] < 3 else 'arg' # To make py2 and 3 compatible
self.add_symbol(getattr(arg, _attr), Symbol.Input, self.args[idx])
res = visit_list_to_block(self.visit, node.body)
res = self.wrap_up_realize(node, res)
return self.wrap_up_binds(res)
def visit_Expr(self, node):
return self.visit(node.value)
def visit_Name(self, node):
name = node.id
if sys.version_info[0] == 2 and name in ['True', 'False']:
return _api.convert(ast.literal_eval(name))
if name in self.closure_vars:
return _api.convert(self.closure_vars[name])
ty, entry = self.symbols[name]
_internal_assert(name in self.symbols, "Unknown symbol %s!" % name)
if ty in [Symbol.LoopVar, Symbol.Input, Symbol.ConstLoopVar]:
return entry
if ty is Symbol.ThreadBind:
return entry.var
if ty is Symbol.ConstVar:
return entry if isinstance(node.ctx, ast.Load) else None
if ty is Symbol.BufferVar:
if isinstance(node.ctx, ast.Load):
return tvm.tir.Call(entry.dtype, entry.name, [_api.const(0, 'int32')], \
_expr.Call.Halide, entry.op, entry.value_index)
return entry, [_api.const(0, 'int32')]
# Do I need any assertion here?
return entry
def visit_Num(self, node):
if isinstance(node.n, numbers.Integral):
dtype = "int32"
elif isinstance(node.n, float):
dtype = "float32"
else:
_internal_assert(isinstance(node.n, bool),
"The data type should be one of (int, float, bool)")
dtype = "bool"
return _api.const(node.n, dtype)
def visit_NameConstant(self, node):
return _api.convert(node.value)
def visit_AugAssign(self, node):
buf = self.visit(node.target)
rhs = self.visit(node.value)
if isinstance(buf, tuple):
_internal_assert(len(buf) == 2, "LHS is supposed to be (buf, args)!")
buf, args = buf
else:
args = [_api.const(0, 'int32')]
_internal_assert(isinstance(buf, Tensor), "LHS is supposed to be Tensor!")
read = tvm.tir.Call(buf.dtype, buf.name, args, _expr.Call.Halide, buf.op, buf.value_index)
value = HybridParser._binop_maker[type(node.op)](read, rhs)
return tvm.tir.Provide(buf.op, 0, value, args)
def visit_Assign(self, node):
rhs = self.visit(node.value)
if isinstance(rhs, Operation):
rmap = {}
_internal_assert(len(node.targets) == rhs.num_outputs, \
"Unable to detuple the outs to targets")
for i in range(rhs.num_outputs):
_internal_assert(isinstance(node.targets[i], ast.Name),
"You should bind a pure name to the tensors")
self.add_symbol(node.targets[i].id, Symbol.GlobalBuffer, rhs.output(i))
rmap[rhs.outputs[i].op] = rhs.output(i)
return util.replace_io(rhs.body, rmap)
_internal_assert(len(node.targets) == 1, "So far only one-valued assignment is supported!")
lhs = node.targets[0]
if isinstance(rhs, _expr.PrimExpr):
rhs = _ir_pass.Simplify(rhs)
if isinstance(lhs, ast.Name):
#TODO: support defined intermediate buffer later
lhs_ = lhs
lhs = lhs.id
if lhs in self.symbols.keys():
ty, _ = self.symbols[lhs]
_internal_assert(ty != Symbol.LoopVar, \
"Loop variable cannot be overwritten!")
decl, _, rw = self.usage[lhs]
if decl == lhs_:
_internal_assert(lhs not in self.symbols.keys(),
"This value should not be defined before this point!")
if isinstance(rhs, tuple):
shape, dtype, scope = rhs
ph = _api.placeholder(shape, dtype=dtype, name=lhs)
self.add_symbol(lhs, getattr(Symbol, scope.title() + "Buffer"), ph)
if scope == 'output':
self.outputs.append(lhs)
return util.make_nop()
if isinstance(rhs, util.halide_imm_types) and ast.Store not in rw:
self.add_symbol(lhs, Symbol.ConstVar, rhs)
else:
_internal_assert(self.device == 0,
"Single variable not supported in devices' side!\n" + \
"If you are using GPU, please allocate a 'local' spad " + \
"outside the bind body")
ph = _api.placeholder((1, ), dtype=rhs.dtype, name=lhs)
self.add_symbol(lhs, Symbol.BufferVar, ph)
lhs = self.visit(lhs_)
if lhs is not None:
buf, args = lhs
return tvm.tir.Provide(buf.op, 0, rhs, args)
return util.make_nop()
lhs, args = self.visit(lhs)
_internal_assert(isinstance(lhs, Tensor), \
"An array access's LHS is expected to be a expr.Call!")
res = tvm.tir.Provide(lhs.op, lhs.value_index, rhs, args)
return res
def visit_Index(self, node):
if isinstance(node.value, ast.Tuple):
return self.visit(node.value)
return [self.visit(node.value)]
def visit_Attribute(self, node):
buf = self.visit(node.value)
return getattr(buf, node.attr)
def visit_Subscript(self, node):
args = self.visit(node.slice)
arr = self.visit(node.value)
if isinstance(arr, Array):
for i in args:
if isinstance(i, numbers.Integral):
arr = arr[i]
else:
_internal_assert(isinstance(i, (_expr.IntImm,)), \
"All indices are supposed to be constants")
arr = arr[i.value]
return arr
if isinstance(node.ctx, ast.Load):
return tvm.tir.Call(arr.dtype, arr.name, args,
_expr.Call.Halide, arr.op, arr.value_index)
return arr, args
def visit_With(self, node):
if sys.version_info[0] < 3:
context = node.context_expr
option = node.optional_vars
else:
_internal_assert(len(node.items) == 1, "Only one with element is supported so far!")
context = node.items[0].context_expr
option = node.items[0].optional_vars
_internal_assert(isinstance(context, ast.Call), "The object must be a Python func call!")
_internal_assert(isinstance(option, ast.Name), "The object after 'as' must be an id!")
self.annotation[option.id] = context.func.id
return visit_list_to_block(self.visit, node.body)
def visit_If(self, node):
cond = _ir_pass.CanonicalSimplify(self.visit(node.test))
# Return no IfThenElse if proven
if isinstance(cond, _expr.IntImm):
if cond.value:
return visit_list_to_block(self.visit, node.body)
if node.orelse:
return visit_list_to_block(self.visit, node.orelse)
return util.make_nop()
if_body = visit_list_to_block(self.visit, node.body)
if node.orelse:
else_body = visit_list_to_block(self.visit, node.orelse)
else:
else_body = None
return tvm.tir.IfThenElse(cond, if_body, else_body)
def visit_IfExp(self, node):
cond = self.visit(node.test)
if_body = self.visit(node.body)
else_body = self.visit(node.orelse)
return tvm.tir.Select(cond, if_body, else_body)
def visit_Compare(self, node):
_internal_assert(len(node.ops) == len(node.comparators),
"#compare ops != #comparators")
ops = [self.visit(node.left)]
ops += [self.visit(i) for i in node.comparators]
res = []
for i in range(len(node.ops)):
lhs = ops[i]
rhs = ops[i + 1]
res.append(HybridParser._binop_maker[type(node.ops[i])](lhs, rhs))
return _all(*res)
def visit_BoolOp(self, node):
n = len(node.values)
if n == 1:
_internal_assert(isinstance(node.op, ast.Not), \
"Unary is supposed to be not!")
return operator.not_(self.visit(node.values[0]))
_internal_assert(isinstance(node.op, (ast.And, ast.Or)), \
"Binary is supposed to be and/or!")
values = [self.visit(i) for i in node.values]
return HybridParser._binop_maker[type(node.op)](*values)
def visit_UnaryOp(self, node):
operand = self.visit(node.operand)
return HybridParser._unaryop_maker[type(node.op)](operand)
def visit_BinOp(self, node):
lhs = self.visit(node.left)
rhs = self.visit(node.right)
return HybridParser._binop_maker[type(node.op)](lhs, rhs)
def visit_Call(self, node):
# Yet, no function pointer supported
_internal_assert(isinstance(node.func, ast.Name), \
"Only id-function function call is supported so far!")
func_id = node.func.id
args = [self.visit(i) for i in node.args]
# Intrinsics'
if hasattr(calls, func_id):
return getattr(calls, func_id)(func_id, args)
# Contexts'
_internal_assert(func_id in self.symbols.keys(), \
"The function called (%s) is not in the context either!" % func_id)
ty, entry = self.symbols[func_id]
_internal_assert(ty is Symbol.Callable, \
"Are you sure what you call is a function?!")
outs = entry(*args)
op = outs.op if isinstance(outs, Tensor) else outs[0].op
return op
def visit_For(self, node):
iter_var, low, ext, for_type = self.visit(node.iter)
_internal_assert(isinstance(node.target, ast.Name), \
"The loop iterator should be a variable!")
_name = node.target.id
if isinstance(for_type, tuple):
low = _ir_pass.CanonicalSimplify(low)
ext = _ir_pass.CanonicalSimplify(ext)
_internal_assert(isinstance(low, _expr.ConstExpr) and
isinstance(ext, _expr.ConstExpr), \
"Const range should start from a const " + \
"and iterate const times")
low, ext = low.value, ext.value
if ext > 114514:
logging.log(logging.CRITICAL, \
'[Warning] Are you sure to unroll a large loop in Python?')
bodies = []
for i in range(low, low + ext):
self.add_symbol(_name, Symbol.ConstLoopVar, i)
body = visit_list_to_block(self.visit, node.body)
body = self.wrap_up_realize(node, body)
bodies.append(body)
self.symbols.pop(_name)
return concat_list_to_block(bodies)
if iter_var is None:
_internal_assert(for_type is not None, "The loop iterating function parse error!")
offset = iter_var = _api.var(_name)
if not _ir_pass.Equal(low, _api.const(0, 'int32')):
offset = iter_var + low
self.add_symbol(_name, Symbol.LoopVar, offset)
_body = visit_list_to_block(self.visit, node.body)
else:
_internal_assert(for_type is None, "The loop bind function parse error!")
self.add_symbol(_name, Symbol.ThreadBind, iter_var)
self.device += 1
_body = visit_list_to_block(self.visit, node.body)
self.device -= 1
_body = self.wrap_up_realize(node, _body)
if for_type is None:
res = _body
else:
_internal_assert(not isinstance(for_type, tuple), \
"Micro expansion should be handled before!")
res = tvm.tir.For(iter_var, _api.const(0, 'int32'), ext, for_type, 0, _body)
self.symbols.pop(_name)
return res
def visit_Return(self, node):
_internal_assert(all(ty != Symbol.LoopVar for ty, _ in self.symbols.values()), \
"Return should not be in a loop body!")
ids = []
if isinstance(node.value, ast.Name):
ids = [node.value.id]
else:
_internal_assert(isinstance(node.value, ast.Tuple), \
"You should return either a single tensor or a tuple")
_internal_assert(all(isinstance(i, ast.Name) for i in node.value.elts), \
"What do you return?")
ids = [i.id for i in node.value.elts]
_internal_assert(len(set(ids)) == len(ids), "Duplicated tensors in the return tuples")
if len(ids) < len(self.outputs):
logging.log(logging.CRITICAL, '[Warning] Not all the output buffers returned!')
self.outputs = [self.symbols[i][1] for i in ids]
self.returned = True
return util.make_nop()
def visit_Tuple(self, node):
return tuple(self.visit(i) for i in node.elts)
def visit_Str(self, node):
return node.s
def visit_Assert(self, node):
test = self.visit(node.test)
mesg = _api.convert(self.visit(node.msg))
return tvm.tir.AssertStmt(test, mesg, util.make_nop())
def parse_python(src, args, symbols, closure_vars):
"""The helper function of calling the AST visitor
Parameters
----------
src : ast.node or str
If an ast.node, then directly lower it.
If a str, then parse it to ast and lower it.
args : list of Tensors or Vars
The argument lists to the function.
It is NOT encouraged to write a function without arguments.
It is NOT encouraged to write a function with side effect.
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
root : Stmt
The result Halide IR and the parser class instance.
"""
root = ast.parse(src) if isinstance(src, str) else src
_internal_assert(root, ast.AST)
var_usage = determine_variable_usage(root, args, symbols, closure_vars)
parser = HybridParser(args, var_usage, symbols, closure_vars)
parser.parsed_body = parser.visit(root)
_internal_assert(parser.returned, 'No valid return found in the function body!')
return parser
def source_to_op(src, args, symbols, closure_vars):
"""Another level of wrapper
Parameters
----------
src : ast.node or str
If an ast.node, then directly lower it.
If a str, then parse it to ast and lower it.
args : list of Tensors or Vars
The argument lists to the function.
It is NOT encouraged to write a function without arguments.
It is NOT encouraged to write a function with side effect.
symbols : list of str
The symbol list of the global context of the function.
closure_vars: dict
A dict of external name reference captured by this function.
Returns
-------
res : list of output tensors
The result of output tensors of the formed OpNode.
"""
parser = parse_python(src, args, symbols, closure_vars)
input_tensors = []
def get_input_tensors(arg):
if isinstance(arg, Tensor):
input_tensors.append(arg)
elif isinstance(arg, Array):
for i in arg:
get_input_tensors(i)
for i in args:
get_input_tensors(i)
op = tvm.te._ffi_api.HybridOp(parser.func_name, "HybridOp", None, input_tensors,
parser.outputs, parser.parsed_body)
res = [op.output(i) for i in range(len(parser.outputs))]
return res[0] if len(res) == 1 else res
| 36.641452 | 99 | 0.585797 |
faee909e8bc6f41d79d0d83d66a213ccbd748943 | 25,565 | py | Python | pypy/rpython/lltypesystem/test/test_rffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | 1 | 2020-01-21T11:10:51.000Z | 2020-01-21T11:10:51.000Z | pypy/rpython/lltypesystem/test/test_rffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null | pypy/rpython/lltypesystem/test/test_rffi.py | benoitc/pypy | a3e1b12d1d01dc29056b7badc051ffc034297658 | [
"MIT"
] | null | null | null |
import py
import sys
from pypy.rpython.lltypesystem.rffi import *
from pypy.rpython.lltypesystem.rffi import _keeper_for_type # crap
from pypy.rlib.rposix import get_errno, set_errno
from pypy.translator.c.test.test_genc import compile as compile_c
from pypy.rpython.lltypesystem.lltype import Signed, Ptr, Char, malloc
from pypy.rpython.lltypesystem.rstr import STR
from pypy.rpython.lltypesystem import lltype
from pypy.tool.udir import udir
from pypy.rpython.test.test_llinterp import interpret
from pypy.rpython.test.tool import BaseRtypingTest, LLRtypeMixin, OORtypeMixin
from pypy.annotation.annrpython import RPythonAnnotator
from pypy.rpython.rtyper import RPythonTyper
from pypy.translator.backendopt.all import backend_optimizations
from pypy.translator.translator import graphof
from pypy.conftest import option
from pypy.objspace.flow.model import summary
from pypy.translator.tool.cbuild import ExternalCompilationInfo
from pypy.rlib.rarithmetic import r_singlefloat
class BaseTestRffi:
def test_basic(self):
c_source = py.code.Source("""
int someexternalfunction(int x)
{
return (x + 3);
}
""")
eci = ExternalCompilationInfo(separate_module_sources=[c_source])
z = llexternal('someexternalfunction', [Signed], Signed,
compilation_info=eci)
def f():
return z(8)
xf = self.compile(f, [])
assert xf() == 8+3
def test_hashdefine(self):
h_source = """
#define X(i) (i+3)
"""
h_file = udir.join("stuff.h")
h_file.write(h_source)
eci = ExternalCompilationInfo(includes=['stuff.h'],
include_dirs=[udir])
z = llexternal('X', [Signed], Signed, compilation_info=eci)
def f():
return z(8)
xf = self.compile(f, [])
assert xf() == 8+3
def test_string(self):
eci = ExternalCompilationInfo(includes=['string.h'])
z = llexternal('strlen', [CCHARP], Signed, compilation_info=eci)
def f():
s = str2charp("xxx")
res = z(s)
free_charp(s)
return res
xf = self.compile(f, [], backendopt=False)
assert xf() == 3
def test_unicode(self):
eci = ExternalCompilationInfo(includes=['string.h'])
z = llexternal('wcslen', [CWCHARP], Signed, compilation_info=eci)
def f():
s = unicode2wcharp(u"xxx\xe9")
res = z(s)
free_wcharp(s)
return res
xf = self.compile(f, [], backendopt=False)
assert xf() == 4
def test_string_reverse(self):
c_source = py.code.Source("""
#include <string.h>
#include <src/allocator.h>
#include <src/mem.h>
char *f(char* arg)
{
char *ret;
/* lltype.free uses OP_RAW_FREE, we must allocate
* with the matching function
*/
OP_RAW_MALLOC(strlen(arg) + 1, ret, char*)
strcpy(ret, arg);
return ret;
}
""")
eci = ExternalCompilationInfo(separate_module_sources=[c_source],
post_include_bits=['char *f(char*);'])
z = llexternal('f', [CCHARP], CCHARP, compilation_info=eci)
def f():
s = str2charp("xxx")
l_res = z(s)
res = charp2str(l_res)
lltype.free(l_res, flavor='raw')
free_charp(s)
return len(res)
xf = self.compile(f, [], backendopt=False)
assert xf(expected_extra_mallocs=-1) == 3
def test_stringstar(self):
c_source = """
#include <string.h>
int f(char *args[]) {
char **p = args;
int l = 0;
while (*p) {
l += strlen(*p);
p++;
}
return (l);
}
"""
eci = ExternalCompilationInfo(separate_module_sources=[c_source])
z = llexternal('f', [CCHARPP], Signed, compilation_info=eci)
def f():
l = ["xxx", "x", "xxxx"]
ss = liststr2charpp(l)
result = z(ss)
free_charpp(ss)
return result
xf = self.compile(f, [], backendopt=False)
assert xf() == 8
def test_struct(self):
h_source = """
#ifndef _MY_SOURCE_H
#define _MY_SOURCE_H
struct xx {
int one;
char two;
int three;
};
#endif
"""
h_file = udir.join("structxx.h")
h_file.write(h_source)
c_source = """
#include <structxx.h>
int f(struct xx* z)
{
return (z->one + z->three);
}
"""
TP = CStructPtr('xx', ('one', INT), ('two', Char), ('three', INT))
eci = ExternalCompilationInfo(
includes=['structxx.h'],
include_dirs=[udir],
separate_module_sources=[c_source]
)
z = llexternal('f', [TP], INT, compilation_info=eci)
def f():
struct = lltype.malloc(TP.TO, flavor='raw')
struct.c_one = cast(INT, 3)
struct.c_two = '\x33'
struct.c_three = cast(INT, 5)
result = z(struct)
lltype.free(struct, flavor='raw')
return cast(SIGNED, result)
fn = self.compile(f, [], backendopt=False)
assert fn() == 8
def test_externvar(self):
import os
if os.name == 'nt':
# Windows CRT badly aborts when an invalid fd is used.
bad_fd = 0
else:
bad_fd = 12312312
def f():
set_errno(12)
return get_errno()
def g():
try:
os.write(bad_fd, "xxx")
except OSError:
pass
return get_errno()
fn = self.compile(f, [])
assert fn() == 12
gn = self.compile(g, [])
import errno
assert gn() == errno.EBADF
def test_extra_include_dirs(self):
udir.ensure("incl", dir=True)
udir.join("incl", "incl.h").write("#define C 3")
c_source = py.code.Source("""
#include <incl.h>
int fun ()
{
return (C);
}
""")
eci = ExternalCompilationInfo(
includes=['incl.h'],
include_dirs=[str(udir.join('incl'))],
separate_module_sources=[c_source]
)
z = llexternal('fun', [], Signed, compilation_info=eci)
def f():
return z()
res = self.compile(f, [])
assert res() == 3
def test_compile_cast(self):
def f(n):
return cast(SIZE_T, n)
f1 = self.compile(f, [int])
res = f1(-1)
assert res == r_size_t(-1)
def test_opaque_type(self):
h_source = py.code.Source("""
#ifndef _OPAQUE_H
#define _OPAQUE_H
struct stuff {
char data[38];
};
#endif /* _OPAQUE_H */
""")
c_source = py.code.Source("""
#include "opaque.h"
char get(struct stuff* x)
{
x->data[13] = 'a';
return x->data[13];
}
""")
# if it doesn't segfault, than we probably malloced it :-)
h_file = udir.join("opaque.h")
h_file.write(h_source)
from pypy.rpython.tool import rffi_platform
eci = ExternalCompilationInfo(
includes=['opaque.h'],
include_dirs=[str(udir)],
separate_module_sources=[c_source]
)
STUFFP = COpaquePtr('struct stuff', compilation_info=eci)
ll_get = llexternal('get', [STUFFP], CHAR, compilation_info=eci)
def f():
ll_stuff = lltype.malloc(STUFFP.TO, flavor='raw')
result = ll_get(ll_stuff)
lltype.free(ll_stuff, flavor='raw')
return result
f1 = self.compile(f, [])
assert f1() == 'a'
def test_opaque_typedef(self):
code = """
#include <stddef.h>
struct stuff;
typedef struct stuff *stuff_ptr;
static int get(stuff_ptr ptr) { return (ptr != NULL); }
"""
eci = ExternalCompilationInfo(
post_include_bits = [code]
)
STUFFP = COpaquePtr(typedef='stuff_ptr', compilation_info=eci)
ll_get = llexternal('get', [STUFFP], lltype.Signed,
compilation_info=eci)
def f():
return ll_get(lltype.nullptr(STUFFP.TO))
f1 = self.compile(f, [])
assert f1() == 0
def return_char(self, signed):
ctype_pref = ["un", ""][signed]
rffi_type = [UCHAR, SIGNEDCHAR][signed]
h_source = py.code.Source("""
%ssigned char returnchar(void)
{
return 42;
}
""" % (ctype_pref, ))
h_file = udir.join("opaque2%s.h" % (ctype_pref, ))
h_file.write(h_source)
from pypy.rpython.tool import rffi_platform
eci = ExternalCompilationInfo(
includes=[h_file.basename],
include_dirs=[str(udir)]
)
ll_returnchar = llexternal('returnchar', [], rffi_type, compilation_info=eci)
def f():
result = ll_returnchar()
return result
f1 = self.compile(f, [])
assert f1() == chr(42)
def test_generate_return_char_tests(self):
yield self.return_char, False
yield self.return_char, True
def test_prebuilt_constant(self):
py.test.skip("Think how to do it sane")
h_source = py.code.Source("""
int x = 3;
char** z = NULL;
#endif
""")
h_include = udir.join('constants.h')
h_include.write(h_source)
eci = ExternalCompilationInfo(includes=['stdio.h',
str(h_include.basename)],
include_dirs=[str(udir)])
get_x, set_x = CExternVariable(lltype.Signed, 'x', eci)
get_z, set_z = CExternVariable(CCHARPP, 'z', eci)
def f():
one = get_x()
set_x(13)
return one + get_x()
def g():
l = liststr2charpp(["a", "b", "c"])
try:
set_z(l)
return charp2str(get_z()[2])
finally:
free_charpp(l)
fn = self.compile(f, [])
assert fn() == 16
gn = self.compile(g, [])
assert gn() == "c"
def eating_callback(self):
h_source = py.code.Source("""
#ifndef _CALLBACK_H
#define _CALLBACK_H
extern Signed eating_callback(Signed arg, Signed(*call)(Signed));
#endif /* _CALLBACK_H */
""")
h_include = udir.join('callback.h')
h_include.write(h_source)
c_source = py.code.Source("""
Signed eating_callback(Signed arg, Signed(*call)(Signed))
{
Signed res = call(arg);
if (res == -1)
return -1;
return res;
}
""")
eci = ExternalCompilationInfo(includes=['callback.h'],
include_dirs=[str(udir)],
separate_module_sources=[c_source],
export_symbols=['eating_callback'])
args = [SIGNED, CCallback([SIGNED], SIGNED)]
eating_callback = llexternal('eating_callback', args, SIGNED,
compilation_info=eci)
return eating_callback
def test_c_callback(self):
eating_callback = self.eating_callback()
def g(i):
return i + 3
def f():
return eating_callback(3, g)
fn = self.compile(f, [])
assert fn() == 6
assert eating_callback._ptr._obj._callbacks.callbacks == {g: True}
def test_double_callback(self):
eating_callback = self.eating_callback()
def one(i):
return i
def two(i):
return i + 2
def f(i):
if i > 3:
return eating_callback(i, one)
else:
return eating_callback(i, two)
fn = self.compile(f, [int])
assert fn(4) == 4
assert fn(1) == 3
assert eating_callback._ptr._obj._callbacks.callbacks == {one: True,
two: True}
def test_exception_callback(self):
eating_callback = self.eating_callback()
def raising(i):
if i > 3:
raise ValueError
else:
return 3
raising._errorcode_ = -1
def f(i):
return eating_callback(i, raising)
fn = self.compile(f, [int])
assert fn(13) == -1
def test_callback_already_llptr(self):
eating_callback = self.eating_callback()
def g(i):
return i + 3
G = lltype.Ptr(lltype.FuncType([lltype.Signed], lltype.Signed))
def f():
return eating_callback(3, llhelper(G, g))
fn = self.compile(f, [])
assert fn() == 6
def test_pass_opaque_pointer_via_callback(self):
eating_callback = self.eating_callback()
TP = lltype.Ptr(lltype.GcStruct('X', ('x', lltype.Signed)))
struct = lltype.malloc(TP.TO) # gc structure
struct.x = 8
def g(i):
return get_keepalive_object(i, TP).x
pos = register_keepalive(struct)
assert _keeper_for_type(TP).stuff_to_keepalive[pos] is struct
del struct
res = eating_callback(pos, g)
unregister_keepalive(pos, TP)
assert res == 8
def test_nonmoving(self):
d = 'non-moving data stuff'
def f():
raw_buf, gc_buf = alloc_buffer(len(d))
try:
for i in range(len(d)):
raw_buf[i] = d[i]
return str_from_buffer(raw_buf, gc_buf, len(d), len(d)-1)
finally:
keep_buffer_alive_until_here(raw_buf, gc_buf)
assert f() == d[:-1]
fn = self.compile(f, [], gcpolicy='ref')
assert fn() == d[:-1]
def test_nonmoving_unicode(self):
d = u'non-moving data'
def f():
raw_buf, gc_buf = alloc_unicodebuffer(len(d))
try:
for i in range(len(d)):
raw_buf[i] = d[i]
return unicode_from_buffer(raw_buf, gc_buf, len(d), len(d)-1)
finally:
keep_unicodebuffer_alive_until_here(raw_buf, gc_buf)
assert f() == d[:-1]
fn = self.compile(f, [], gcpolicy='ref')
assert fn() == d[:-1]
def test_nonmovingbuffer(self):
d = 'some cool data that should not move'
def f():
buf = get_nonmovingbuffer(d)
try:
counter = 0
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
return counter
finally:
free_nonmovingbuffer(d, buf)
assert f() == len(d)
fn = self.compile(f, [], gcpolicy='ref')
assert fn() == len(d)
def test_nonmovingbuffer_semispace(self):
d = 'cool data'
def f():
counter = 0
for n in range(32):
buf = get_nonmovingbuffer(d)
try:
for i in range(len(d)):
if buf[i] == d[i]:
counter += 1
finally:
free_nonmovingbuffer(d, buf)
return counter
fn = self.compile(f, [], gcpolicy='semispace')
# The semispace gc uses raw_malloc for its internal data structs
# but hopefully less than 30 times. So we should get < 30 leaks
# unless the get_nonmovingbuffer()/free_nonmovingbuffer() pair
# leaks at each iteration. This is what the following line checks.
res = fn(expected_extra_mallocs=range(30))
assert res == 32 * len(d)
class TestRffiInternals:
def test_struct_create(self):
X = CStruct('xx', ('one', INT))
def f():
p = make(X, c_one=cast(INT, 3))
res = p.c_one
lltype.free(p, flavor='raw')
return cast(SIGNED, res)
assert f() == 3
assert interpret(f, []) == 3
def test_structcopy(self):
X2 = lltype.Struct('X2', ('x', SIGNED))
X1 = lltype.Struct('X1', ('a', SIGNED), ('x2', X2), ('p', lltype.Ptr(X2)))
def f():
p2 = make(X2, x=123)
p1 = make(X1, a=5, p=p2)
p1.x2.x = 456
p1bis = make(X1)
p2bis = make(X2)
structcopy(p1bis, p1)
assert p1bis.a == 5
assert p1bis.x2.x == 456
assert p1bis.p == p2
structcopy(p2bis, p2)
res = p2bis.x
lltype.free(p2bis, flavor='raw')
lltype.free(p1bis, flavor='raw')
lltype.free(p2, flavor='raw')
lltype.free(p1, flavor='raw')
return res
assert f() == 123
res = interpret(f, [])
assert res == 123
def test_make_annotation(self):
X = CStruct('xx', ('one', INT))
def f():
p = make(X)
try:
q = make(X)
lltype.free(q, flavor='raw')
finally:
lltype.free(p, flavor='raw')
return 3
assert interpret(f, []) == 3
def test_implicit_cast(self):
z = llexternal('z', [USHORT, ULONG, USHORT, DOUBLE], USHORT,
sandboxsafe=True) # to allow the wrapper to be inlined
def f(x, y, xx, yy):
return z(x, y, xx, yy)
a = RPythonAnnotator()
r = a.build_types(f, [int, int, int, int])
rtyper = RPythonTyper(a)
rtyper.specialize()
a.translator.rtyper = rtyper
backend_optimizations(a.translator)
if option.view:
a.translator.view()
graph = graphof(a.translator, f)
s = summary(graph)
# there should be not too many operations here by now
expected = {'force_cast': 3, 'cast_int_to_float': 1, 'direct_call': 1}
for k, v in expected.items():
assert s[k] == v
def test_stringpolicy1(self):
eci = ExternalCompilationInfo(includes=['string.h'])
strlen = llexternal('strlen', [CCHARP], SIZE_T, compilation_info=eci)
def f():
return cast(SIGNED, strlen("Xxx"))
assert interpret(f, [], backendopt=True) == 3
def test_stringpolicy3(self):
eci = ExternalCompilationInfo(includes=['string.h'])
strlen = llexternal('strlen', [CCHARP], INT, compilation_info=eci)
def f():
ll_str = str2charp("Xxx")
res = strlen(ll_str)
lltype.free(ll_str, flavor='raw')
return res
assert interpret(f, [], backendopt=True) == 3
def test_stringpolicy_mixed(self):
eci = ExternalCompilationInfo(includes=['string.h'])
strlen = llexternal('strlen', [CCHARP], SIZE_T,
compilation_info=eci)
def f():
res1 = strlen("abcd")
ll_str = str2charp("Xxx")
res2 = strlen(ll_str)
lltype.free(ll_str, flavor='raw')
return cast(SIGNED, res1*10 + res2)
assert interpret(f, [], backendopt=True) == 43
def test_around_extcall(self):
if sys.platform == "win32":
py.test.skip('No pipes on windows')
import os
from pypy.annotation import model as annmodel
from pypy.rlib.objectmodel import invoke_around_extcall
from pypy.rpython.extfuncregistry import register_external
read_fd, write_fd = os.pipe()
try:
# we need an external function that is not going to get wrapped around
# before()/after() calls, in order to call it from before()/after()...
def mywrite(s):
os.write(write_fd, s)
def llimpl(s):
s = ''.join(s.chars)
os.write(write_fd, s)
register_external(mywrite, [str], annmodel.s_None, 'll_mywrite',
llfakeimpl=llimpl, sandboxsafe=True)
def before():
mywrite("B")
def after():
mywrite("A")
def f():
os.write(write_fd, "-")
invoke_around_extcall(before, after)
os.write(write_fd, "E")
interpret(f, [])
data = os.read(read_fd, 99)
assert data == "-BEA"
finally:
os.close(write_fd)
os.close(read_fd)
def test_external_callable(self):
""" Try to call some llexternal function with llinterp
"""
z = llexternal('z', [Signed], Signed, _callable=lambda x:x+1)
def f():
return z(2)
res = interpret(f, [])
assert res == 3
def test_size_t_sign(self):
assert r_size_t(-1) > 0
def test_cast(self):
res = cast(SIZE_T, -1)
assert type(res) is r_size_t
assert res == r_size_t(-1)
#
res = cast(lltype.Signed, 42.5)
assert res == 42
res = cast(lltype.SingleFloat, 12.3)
assert res == r_singlefloat(12.3)
res = cast(lltype.SingleFloat, res)
assert res == r_singlefloat(12.3)
res = cast(lltype.Float, r_singlefloat(12.))
assert res == 12.
def test_rffi_sizeof(self):
try:
import ctypes
except ImportError:
py.test.skip("Cannot test without ctypes")
cache = {
lltype.Signed: ctypes.c_long,
lltype.Unsigned: ctypes.c_ulong,
lltype.UniChar: ctypes.c_wchar,
lltype.Char: ctypes.c_ubyte,
DOUBLE: ctypes.c_double,
FLOAT: ctypes.c_float,
SIGNEDCHAR: ctypes.c_byte,
UCHAR: ctypes.c_ubyte,
SHORT: ctypes.c_short,
USHORT: ctypes.c_ushort,
INT: ctypes.c_int,
UINT: ctypes.c_uint,
LONG: ctypes.c_long,
ULONG: ctypes.c_ulong,
LONGLONG: ctypes.c_longlong,
ULONGLONG: ctypes.c_ulonglong,
SIZE_T: ctypes.c_size_t,
}
for ll, ctp in cache.items():
assert sizeof(ll) == ctypes.sizeof(ctp)
assert sizeof(lltype.Typedef(ll, 'test')) == sizeof(ll)
assert not size_and_sign(lltype.Signed)[1]
assert size_and_sign(lltype.Char) == (1, True)
assert size_and_sign(lltype.UniChar)[1]
assert size_and_sign(UINT)[1]
assert not size_and_sign(INT)[1]
def test_rffi_offsetof(self):
import struct
from pypy.rpython.tool import rffi_platform
S = rffi_platform.getstruct("struct S",
"""
struct S {
short a;
int b, c;
}; """,
[("a", INT),
("b", INT),
("c", INT)])
assert sizeof(S) == struct.calcsize("hii")
assert offsetof(S, "c_a") == 0
assert offsetof(S, "c_b") == struct.calcsize("hi") - struct.calcsize("i")
assert offsetof(S, "c_c") == struct.calcsize("hii") - struct.calcsize("i")
ARRAY_OF_CHAR = lltype.Array(CHAR, hints={'nolength': True})
def test_ptradd():
data = "hello, world!"
a = lltype.malloc(ARRAY_OF_CHAR, len(data), flavor='raw')
for i in xrange(len(data)):
a[i] = data[i]
a2 = ptradd(a, 2)
assert lltype.typeOf(a2) == lltype.typeOf(a) == lltype.Ptr(ARRAY_OF_CHAR)
for i in xrange(len(data) - 2):
assert a2[i] == a[i + 2]
lltype.free(a, flavor='raw')
def test_ptradd_interpret():
interpret(test_ptradd, [])
def test_voidptr():
assert repr(VOIDP) == "<* Array of void >"
class TestCRffi(BaseTestRffi):
def compile(self, func, args, **kwds):
return compile_c(func, args, **kwds)
def test_generate_return_char_tests(self):
py.test.skip("GenC does not handle char return values correctly")
def test_enforced_args():
from pypy.annotation.model import s_None
from pypy.rpython.annlowlevel import MixLevelHelperAnnotator
from pypy.translator.interactive import Translation
def f1():
str2charp("hello")
def f2():
str2charp("world")
t = Translation(f1, [])
t.rtype()
mixann = MixLevelHelperAnnotator(t.context.rtyper)
mixann.getgraph(f2, [], s_None)
mixann.finish()
def test_force_cast_unichar():
x = cast(lltype.UniChar, -1)
assert isinstance(x, unicode)
if sys.maxunicode == 65535:
assert cast(LONG, x) == 65535
else:
assert cast(LONG, cast(INT, x)) == -1
| 31.406634 | 85 | 0.515549 |
61d265a45d2223525939a758eb1d9f77efe62fd6 | 3,599 | py | Python | project/boh/reports.py | j2yemi/dso-demo1 | c489d20494b0cbeb1a4c5d5605216c06c625f39a | [
"Apache-2.0"
] | 109 | 2016-05-23T15:00:27.000Z | 2022-02-16T17:15:53.000Z | project/boh/reports.py | j2yemi/dso-demo1 | c489d20494b0cbeb1a4c5d5605216c06c625f39a | [
"Apache-2.0"
] | 116 | 2021-03-29T16:45:28.000Z | 2022-03-31T22:11:07.000Z | project/boh/reports.py | j2yemi/dso-demo1 | c489d20494b0cbeb1a4c5d5605216c06c625f39a | [
"Apache-2.0"
] | 45 | 2016-05-31T20:16:24.000Z | 2022-03-08T14:05:55.000Z | from django.conf import settings
from django.http import HttpResponse
from django.template import loader, Context
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from . import models
class Report(object):
"""Base class used for generating reports."""
def __init__(self, report_type, file_name, file_format, requester):
self.report_type = report_type
self.file_name = file_name
self.file_format = file_format
self.requester = requester
content_types = {
'csv': 'text/csv',
'html': 'text/html'
}
self.content_type = content_types[file_format]
def __str__(self):
return '%s: %s.%s' % (self.report_type, self.file_name, self.file_format)
def generate(self):
raise NotImplementedError(_('Subclasses must override generate()'))
def response(self):
response = HttpResponse(content_type=self.content_type)
if not settings.DEBUG:
response['Content-Disposition'] = 'attachment; filename="%s.%s"' % (self.file_name, self.file_format)
response.write(self.generate())
return response
class EngagementCoverageReport(Report):
def __init__(self, file_name, file_format, organizations, requester):
super(EngagementCoverageReport, self).__init__(_('Engagement Coverage Report'), file_name, file_format, requester)
self.organizations = organizations
def generate(self):
if not self.organizations:
self.organizations = models.Organization.objects.all()
if self.file_format == 'html':
template = loader.get_template('boh/reports/engagement_coverage.html')
context = Context({
'current_datetime': timezone.now(),
'requester': self.requester,
'organizations': self.organizations
})
return template.render(context)
else:
return 'test, test'
class ThreadFixSummaryReport(Report):
def __init__(self, file_name, file_format, organizations, requester):
super(ThreadFixSummaryReport, self).__init__(_('ThreadFix Summary Report'), file_name, file_format, requester)
self.organizations = organizations
def generate(self):
if not self.organizations:
self.organizations = models.Organization.objects.all()
if self.file_format == 'html':
template = loader.get_template('boh/reports/threadfix_summary.html')
context = Context({
'current_datetime': timezone.now(),
'requester': self.requester,
'organizations': self.organizations
})
return template.render(context)
else:
return 'test, test'
class AppSummaryReport(Report):
def __init__(self, file_name, file_format, applications, requester):
super(AppSummaryReport, self).__init__(_('Application Summary Report'), file_name, file_format, requester)
self.applications = applications
def generate(self):
if not self.applications:
self.applications = models.Application.objects.all()
if self.file_format == 'html':
template = loader.get_template('boh/reports/app_summary.html')
context = Context({
'current_datetime': timezone.now(),
'requester': self.requester,
'applications': self.applications
})
return template.render(context)
else:
return 'test, test'
| 34.605769 | 122 | 0.641011 |
22aee79ebccba1ed26e13c3be55f8233d26c978b | 1,201 | py | Python | server/src/posts/migrations/0013_auto_20180829_1605.py | guptachetan1997/SnapShare | c6883ca21d6f86aad025829a49a5d845893cdd9b | [
"MIT"
] | 13 | 2018-03-23T12:38:59.000Z | 2021-09-28T07:27:05.000Z | server/src/posts/migrations/0013_auto_20180829_1605.py | guptachetan1997/SnapShare | c6883ca21d6f86aad025829a49a5d845893cdd9b | [
"MIT"
] | 17 | 2020-01-28T22:21:54.000Z | 2022-01-13T00:44:01.000Z | server/src/posts/migrations/0013_auto_20180829_1605.py | guptachetan1997/SnapShare | c6883ca21d6f86aad025829a49a5d845893cdd9b | [
"MIT"
] | 6 | 2018-08-29T17:01:31.000Z | 2021-08-13T01:40:21.000Z | # Generated by Django 2.0.1 on 2018-08-29 10:35
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('posts', '0012_auto_20180328_0915'),
]
operations = [
migrations.CreateModel(
name='PostTagBridge',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now=True)),
('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_post', to='posts.Post')),
],
),
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='posttagbridge',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tag_name', to='posts.Tag'),
),
]
| 34.314286 | 134 | 0.592007 |
a02c28492b15da6fa3034ff59497b5b25f355cfe | 12,060 | py | Python | elobot.py | samm81/slack-elobot | 01cf66f24d1ab1573888d9ba5275bfb655cbbff8 | [
"MIT"
] | null | null | null | elobot.py | samm81/slack-elobot | 01cf66f24d1ab1573888d9ba5275bfb655cbbff8 | [
"MIT"
] | 10 | 2018-06-27T17:11:23.000Z | 2018-09-06T18:44:28.000Z | elobot.py | samm81/slack-elobot | 01cf66f24d1ab1573888d9ba5275bfb655cbbff8 | [
"MIT"
] | 2 | 2018-06-29T07:47:46.000Z | 2018-08-15T02:43:55.000Z | import time
import json
import re
from slackclient import SlackClient
from tabulate import tabulate
from peewee import *
from datetime import datetime
from dateutil import tz
from itertools import takewhile
from collections import defaultdict
from models import db, Player, Match
HANDLE_RE = '<@([A-z0-9]*)>'
# We allow for an optional backdoor that allows any user to run any command
# Good for debugging
BACKDOOR_ENABLED = False
BACKDOOR_REGEX = re.compile(f'As {HANDLE_RE}:? (.*)', re.IGNORECASE)
BEAT_TERMS = (''
'''crushed
rekt
beat
whooped
destroyed
smashed
demolished
decapitated
smothered
creamed''').split('\n')
WINNER_REGEX = re.compile('I (?:{}) {} (\d+) ?- ?(\d+)'.format("|".join(BEAT_TERMS), HANDLE_RE), re.IGNORECASE)
CONFIRM_REGEX = re.compile('Confirm (\d+)', re.IGNORECASE)
CONFIRM_ALL_REGEX = re.compile('Confirm all', re.IGNORECASE)
DELETE_REGEX = re.compile('Delete (\d+)', re.IGNORECASE)
LEADERBOARD_REGEX = re.compile('Print leaderboard', re.IGNORECASE)
UNCONFIRMED_REGEX = re.compile('Print unconfirmed', re.IGNORECASE)
from_zone = tz.gettz('UTC')
to_zone = tz.gettz('America/Los_Angeles')
class SlackClient(SlackClient):
def is_bot(self, user_id):
return self.api_call('users.info', user=user_id)['user']['is_bot']
def get_name(self, user_id):
return self.api_call('users.info', user=user_id)['user']['profile']['display_name_normalized']
def get_channel_id(self, channel_name):
next_cursor = None
while True:
channels = self.api_call('channels.list', limit=1000, cursor=next_cursor)
for channel in channels['channels']:
if channel['name'] == channel_name:
return channel['id']
next_cursor = channels['response_metadata']['next_cursor']
if not next_cursor:
break
print('Unable to find channel: ' + channel_name)
quit()
class EloBot(object):
players = defaultdict(Player)
def __init__(self, slack_client, channel_id, name, min_streak_len):
self.name = name
self.slack_client = slack_client
self.min_streak_len = min_streak_len
self.channel_id = channel_id
self.last_ping = 0
self.init_players()
self.ensure_connected()
self.run()
def rank_game(self, winner, loser):
# From https://metinmediamath.wordpress.com/2013/11/27/how-to-calculate-the-elo-rating-including-example/
winner_transformed_rating = 10 ** (winner.rating / 400.0)
loser_transformed_rating = 10 ** (loser.rating / 400.0)
winner_expected_score = winner_transformed_rating / (winner_transformed_rating + loser_transformed_rating)
loser_expected_score = loser_transformed_rating / (winner_transformed_rating + loser_transformed_rating)
winner_new_elo = round(winner.rating + winner.k_factor * (1 - winner_expected_score))
loser_new_elo = round(loser.rating + loser.k_factor * (0 - loser_expected_score))
winner_elo_delta = winner_new_elo - winner.rating
loser_elo_delta = loser_new_elo - loser.rating
winner.wins += 1
loser.losses += 1
winner.rating = winner_new_elo
loser.rating = loser_new_elo
return winner_elo_delta, loser_elo_delta
def apply_match(self, match):
"""
Apply a match to the ranking system, changing winner and loser's elos.
Return (winner elo delta, loser elo delta)
"""
if not match.pending:
raise ValueError("Match must be pending to apply.")
with db.transaction():
winner = self.players[match.winner_handle]
loser = self.players[match.loser_handle]
winner_elo_delta, loser_elo_delta = self.rank_game(winner, loser)
match.pending = False
match.save()
return (winner_elo_delta, loser_elo_delta)
def init_players(self):
matches = list(Match.select().order_by(Match.played.asc()))
for match in matches:
print('Recapping match: {}'.format(match.__dict__))
if not match.pending:
winner = self.players[match.winner_handle]
loser = self.players[match.loser_handle]
self.rank_game(winner, loser)
print('Player initialization complete.')
def ensure_connected(self):
sleeptime = 0.1
while not self.slack_client.server.connected:
print('Was disconnected, attemping to reconnect...')
try:
self.slack_client.rtm_connect(auto_reconnect=True, with_team_state=False)
except: # TODO: Except what
pass
time.sleep(sleeptime)
sleeptime = min(30, sleeptime * 2) # Exponential back off with a max wait of 30s
def heartbeat(self):
"""Send a heartbeat if necessary"""
now = int(time.time())
if now > self.last_ping + 3:
self.slack_client.server.ping()
self.last_ping = now
def talk(self, message):
"""Send a message to the Slack channel"""
self.slack_client.api_call('chat.postMessage', channel=self.channel_id, text=message, username=self.name)
def talk_to(self, user_id, message):
message = message[0].lower() + message[1:]
self.talk(f'<@{user_id}>, {message}')
def run(self):
while True:
time.sleep(0.1)
self.heartbeat()
self.ensure_connected()
messages = self.slack_client.rtm_read()
for message in messages:
if 'user' in message and message.get('type', False) == 'message' and message.get('channel', False) == self.channel_id and message.get('text', False):
self.handle_message(message)
def handle_message(self, message):
print(f'Message received:\n{message}')
text = message['text']
user_handle = message['user']
if BACKDOOR_ENABLED and BACKDOOR_REGEX.match(text):
new_user_handle, new_text = re.search(BACKDOOR_REGEX, text).groups()
return self.handle_message({
'user': new_user_handle,
'text': new_text
})
if WINNER_REGEX.match(text):
loser_handle, winner_score, loser_score = re.search(WINNER_REGEX, text).groups()
self.winner(user_handle, loser_handle, int(winner_score), int(loser_score))
elif CONFIRM_REGEX.match(text):
match_id, = re.search(CONFIRM_REGEX, text).groups()
self.confirm(user_handle, match_id)
elif CONFIRM_ALL_REGEX.match(text):
self.confirm_all(user_handle)
elif DELETE_REGEX.match(text):
match_id, = re.search(DELETE_REGEX, text).groups()
self.delete(user_handle, match_id)
elif LEADERBOARD_REGEX.match(text):
self.print_leaderboard()
elif UNCONFIRMED_REGEX.match(text):
self.print_unconfirmed()
def get_match(self, match_id):
"""Get a match or say an error and return None"""
try:
match = Match.select(Match).where(Match.id == match_id).get()
except DoesNotExist:
self.talk(f'No match #{match_id}!')
else:
return match
def get_pending(self, match_id):
"""Get a pending match or say an error and return None"""
match = self.get_match(match_id)
if not match:
return None
if not match.pending:
self.talk(f'Match #{match_id} is not pending!')
return None
return match
def winner(self, winner_handle, loser_handle, winner_score, loser_score):
if winner_handle == loser_handle:
self.talk(f'Winner and loser must be different people!')
else:
try:
match = Match.create(winner_handle=winner_handle, winner_score=winner_score, loser_handle=loser_handle, loser_score=loser_score)
except OverflowError:
self.talk('Score(s) too large!')
else:
self.talk_to(loser_handle, f'Type "Confirm {match.id}" to confirm the above match, or ignore it if it\'s incorrect.')
def confirm_all(self, user_handle):
matches = (Match.select(Match)
.where(Match.loser_handle == user_handle, Match.pending == True)
.order_by(Match.played.asc()))
total_elo_deltas = defaultdict(lambda: 0)
for match in matches:
winner_elo_delta, loser_elo_delta = self.apply_match(match)
total_elo_deltas[match.winner_handle] += winner_elo_delta
total_elo_deltas[match.loser_handle] += loser_elo_delta
self.talk(f'Confirmed {len(matches)} matches!')
for user_handle, elo_delta in total_elo_deltas.items():
self.talk_to(user_handle, 'Your new ELO is {} ({}{}).'.format(
self.players[user_handle].rating,
'+' if elo_delta >= 0 else '',
elo_delta,
))
def confirm(self, user_handle, match_id):
match = self.get_pending(match_id)
if not match: return
if match.loser_handle != user_handle:
self.talk_to(user_handle, f'You are not allowed to confirm match #{match_id}!')
return
winner_elo_delta, loser_elo_delta = self.apply_match(match)
self.talk_to(match.winner_handle, f'Your new ELO is {self.players[match.winner_handle].rating} (+{winner_elo_delta}).')
self.talk_to(match.loser_handle , f'Your new ELO is {self.players[match.loser_handle ].rating} ({loser_elo_delta }).')
def delete(self, user_handle, match_id):
match = self.get_pending(match_id)
if not match: return
if match.winner_handle != user_handle:
self.talk_to(user_handle, f'You are not allowed to delete match #{match_id}!')
match.delete_instance()
self.talk(f'Deleted match #{match_id}.')
def print_leaderboard(self):
table = []
for slack_handle, player in sorted(self.players.items(), key=lambda p: p[1].rating, reverse=True):
win_streak = self.get_win_streak(slack_handle)
streak_text = '(won {} in a row)'.format(win_streak) if win_streak >= self.min_streak_len else ''
table.append([self.slack_client.get_name(slack_handle), player.rating, player.wins, player.losses, streak_text])
self.talk('```' + tabulate(table, headers=['Name', 'ELO', 'Wins', 'Losses', 'Streak']) + '```')
def print_unconfirmed(self):
table = []
for match in Match.select().where(Match.pending == True).order_by(Match.played.desc()).limit(25):
match_played_utc = match.played.replace(tzinfo=from_zone)
match_played_pst = match_played_utc.astimezone(to_zone)
table.append([
match.id,
self.slack_client.get_name(match.loser_handle),
self.slack_client.get_name(match.winner_handle),
'{} - {}'.format(match.winner_score, match.loser_score),
match_played_pst.strftime('%m/%d/%y %I:%M %p')
])
self.talk('```' + tabulate(table, headers=['Match', 'Needs to Confirm', 'Opponent', 'Score', 'Date']) + '```')
def get_win_streak(self, player_slack_id):
win_streak = 0
matches = Match.select().where(Match.pending == False, (player_slack_id == Match.winner_handle) | (player_slack_id == Match.loser_handle)).order_by(Match.played.desc())
return len(list(takewhile(lambda m: m.winner_handle == player_slack_id, matches)))
if __name__ == '__main__':
with open('config.json') as config_data:
config = json.load(config_data)
slack_client = SlackClient(config['slack_token'])
db.connect()
Match.create_table()
EloBot(
slack_client,
slack_client.get_channel_id(config['channel']),
config['bot_name'],
config['min_streak_length'],
)
| 38.653846 | 176 | 0.631758 |
59196f85536f459b595dd295045bfe331a29502b | 372,196 | py | Python | tests/test_constraints.py | jbarberia/PFNET.py | a2f327d84401acc63fd4ce91c99086770ce72a6b | [
"BSD-2-Clause"
] | 3 | 2018-03-21T11:54:38.000Z | 2020-12-29T16:46:14.000Z | tests/test_constraints.py | jbarberia/PFNET.py | a2f327d84401acc63fd4ce91c99086770ce72a6b | [
"BSD-2-Clause"
] | 23 | 2018-03-29T00:42:06.000Z | 2021-01-05T19:15:05.000Z | tests/test_constraints.py | ttinoco/PFNET.py | 3a6845d132ddba6053fc84b1099597d99c0ac5e2 | [
"BSD-2-Clause"
] | 5 | 2018-10-01T19:05:11.000Z | 2020-05-27T06:19:11.000Z | #***************************************************#
# This file is part of PFNET. #
# #
# Copyright (c) 2015, Tomas Tinoco De Rubira. #
# #
# PFNET is released under the BSD 2-clause license. #
#***************************************************#
import os
import unittest
import pfnet as pf
import numpy as np
from . import test_cases
from numpy.linalg import norm
from scipy.sparse import coo_matrix,triu,tril,eye
NUM_TRIALS = 25
EPS = 5.0 # %
TOL = 1e-4
class TestConstraints(unittest.TestCase):
def setUp(self):
# Network
self.T = 2
# Random
np.random.seed(0)
def test_constr_FACTS_EQ(self):
# Constants
h = 1e-8
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude', 'voltage angle'])
net.set_flags('facts',
'variable',
'any',
'all')
self.assertEqual(net.num_vars, (2*net.num_buses+9*net.num_facts)*self.T)
x0 = net.get_var_values()+1e-4*np.random.randn(net.num_vars)
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('FACTS equations',net)
self.assertEqual(constr.name,'FACTS equations')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
num_statcom = len([f for f in net.facts if f.is_STATCOM()])
num_SSSC = len([f for f in net.facts if f.is_SSSC()])
num_UPFC = len([f for f in net.facts if f.is_UPFC()])
num_seriesenabled = len([f for f in net.facts if f.is_in_normal_series_mode()])
num_seriesdisabled = len([f for f in net.facts if f.is_series_link_disabled()])
# Verify analyze
Jnnz = 28*num_seriesenabled;
rowsJ = 4*num_seriesenabled
rowsA = 2*net.num_facts
Annz = 7*net.num_facts
for facts in net.facts:
if not facts.is_regulator():
rowsA = rowsA+1
Annz = Annz+1
if facts.P_max_dc == 0 or facts.is_series_link_disabled():
rowsA = rowsA+1
Annz = Annz+1
if facts.is_series_link_disabled():
rowsA = rowsA+5
Annz = Annz+5
constr.analyze()
self.assertEqual(constr.J_nnz, Jnnz*self.T)
self.assertEqual(constr.A_nnz, Annz*self.T)
self.assertEqual(constr.J_row, rowsJ*self.T)
self.assertEqual(constr.A_row, rowsA*self.T)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertTrue(np.all(y_init == 0.))
y0 = np.random.rand(constr.num_extra_vars)
constr.eval(x0,y0)
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(rowsA*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(J.nnz,Jnnz*self.T)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(rowsA*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(A.nnz,Annz*self.T)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax=b check
for k in range(self.T):
for facts in net.facts:
self.assertTrue(facts.has_flags('variable',
['active power',
'reactive power',
'series voltage magnitude',
'series voltage angle']))
index_Pk = np.where(A.col == facts.index_P_k[k])[0]
index_Pm = np.where(A.col == facts.index_P_m[k])[0]
index_Pdc = np.where(A.col == facts.index_P_dc[k])[0]
index_Qk = np.where(A.col == facts.index_Q_k[k])[0]
index_Qm = np.where(A.col == facts.index_Q_m[k])[0]
index_Qsh = np.where(A.col == facts.index_Q_sh[k])[0]
index_Qs = np.where(A.col == facts.index_Q_s[k])[0]
index_vmags = np.where(A.col == facts.index_v_mag_s[k])[0]
index_vangs = np.where(A.col == facts.index_v_ang_s[k])[0]
self.assertEqual(index_Pk.size,1)
self.assertEqual(index_Qk.size,1)
self.assertEqual(A.data[index_Pk],1.)
self.assertEqual(A.data[index_Qk],1.)
self.assertEqual(b[A.row[index_Pk]],0.)
self.assertEqual(b[A.row[index_Qk]],0.)
if not facts.is_regulator():
self.assertEqual(index_Qsh.size,2)
self.assertEqual(A.data[index_Qsh[0]],1.)
self.assertEqual(A.data[index_Qsh[1]],-1.)
self.assertEqual(b[A.row[index_Qsh[0]]],0.)
self.assertEqual(b[A.row[index_Qsh[1]]],0.)
if facts.P_max_dc ==0 or facts.is_series_link_disabled():
self.assertEqual(index_Pdc.size,2)
self.assertEqual(A.data[index_Pdc[0]],-1.)
self.assertEqual(A.data[index_Pdc[1]],1.)
self.assertEqual(b[A.row[index_Pdc[0]]],0.)
self.assertEqual(b[A.row[index_Pdc[1]]],0.)
else:
self.assertEqual(index_Pdc.size,1)
self.assertEqual(A.data[index_Pdc],-1.)
self.assertEqual(b[A.row[index_Pdc]],0.)
if facts.is_series_link_disabled():
self.assertEqual(index_Pm.size,2)
for index in index_Pm:
self.assertEqual(A.data[index],1.)
self.assertEqual(b[A.row[index]],0.)
self.assertEqual(index_Qm.size,2)
for index in index_Qm:
self.assertEqual(A.data[index],1.)
self.assertEqual(b[A.row[index]],0.)
self.assertEqual(index_Qs.size,2)
self.assertEqual(index_vmags.size,1)
self.assertEqual(index_vangs.size,1)
self.assertEqual(A.data[index_Qs[0]],-1.)
self.assertEqual(A.data[index_Qs[1]],1.)
self.assertEqual(A.data[index_vmags],1.)
self.assertEqual(A.data[index_vangs],1.)
self.assertEqual(b[A.row[index_Qs[0]]],0.)
self.assertEqual(b[A.row[index_Qs[1]]],0.)
self.assertEqual(b[A.row[index_vmags]],0.)
self.assertEqual(b[A.row[index_vangs]],0.)
# f check
flags = {}
for t in range(self.T):
for bus in net.buses:
flags[(t,bus.index)] = False
J_row = 0
for t in range(self.T):
for branch in net.branches:
for bus in [branch.bus_k, branch.bus_m]:
if not flags[(t, bus.index)]:
facts_onthisbus = [facts for facts in net.facts if ((facts.bus_k == bus) and (facts.is_in_normal_series_mode()))]
for facts in facts_onthisbus:
busk = facts.bus_k
busm = facts.bus_m
vmag_k = x0[busk.index_v_mag[t]]
vang_k = x0[busk.index_v_ang[t]]
vmag_m = x0[busm.index_v_mag[t]]
vang_m = x0[busm.index_v_ang[t]]
vmag_s = x0[facts.index_v_mag_s[t]]
vang_s = x0[facts.index_v_ang_s[t]]
P_m = x0[facts.index_P_m[t]]
P_dc = x0[facts.index_P_dc[t]]
Q_m = x0[facts.index_Q_m[t]]
Q_s = x0[facts.index_Q_s[t]]
f1 = -vmag_k*np.cos(vang_k)+vmag_m*np.cos(vang_m)-vmag_s*np.cos(vang_s)
f2 = -vmag_k*np.sin(vang_k)+vmag_m*np.sin(vang_m)-vmag_s*np.sin(vang_s)
f3 = vmag_s*P_m*np.cos(vang_s)-vmag_s*Q_m*np.sin(vang_s)-vmag_m*P_dc*np.cos(vang_m)+vmag_m*Q_s*np.sin(vang_m)
f4 = vmag_s*P_m*np.sin(vang_s)+vmag_s*Q_m*np.cos(vang_s)-vmag_m*P_dc*np.sin(vang_m)-vmag_m*Q_s*np.cos(vang_m)
self.assertAlmostEqual(f1,f[J_row])
self.assertAlmostEqual(f2,f[J_row+1])
self.assertAlmostEqual(f3,f[J_row+2])
self.assertAlmostEqual(f4,f[J_row+3])
J_row += 4
flags[(t,bus.index)] = True
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# With outages
for facts in net.facts:
facts.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_FACTS_PSET_SWITCH(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('facts',
'variable',
'any',
'active power')
self.assertEqual(net.num_vars, 3*net.num_facts*self.T)
# Constraint
constr = pf.Constraint('switching FACTS active power control',net)
self.assertEqual(constr.name,'switching FACTS active power control')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Verify analyze
constr.analyze()
num = len([f for f in net.facts if f.is_in_normal_series_mode() and f.P_max_dc > 0.])
Annz = num*self.T
Arow = Annz
self.assertEqual(constr.A_nnz,Annz)
self.assertEqual(constr.A_row,Arow)
# Verify evaluation
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
b = constr.b
A = constr.A
# After
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax = b Check
for t in range(self.T):
for f in net.facts:
self.assertTrue(f.has_flags('variable', 'active power'))
if f.is_in_normal_series_mode() and f.P_max_dc > 0.:
indexP = np.where(A.col == f.index_P_m[t])[0]
self.assertEqual(indexP.size,1)
self.assertEqual(A.data[indexP],1)
self.assertEqual(b[A.row[indexP]],f.P_set[t])
# With outages
for facts in net.facts:
facts.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_FACTS_QSET_SWITCH(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
num_vsc = net.num_vsc_converters
if num_vsc == 0:
continue
# Vars
net.set_flags('facts',
'variable',
'any',
'reactive power')
self.assertEqual(net.num_vars, 4*net.num_facts*self.T)
# Constraint
constr = pf.Constraint('switching FACTS reactive power control',net)
self.assertEqual(constr.name,'switching FACTS reactive power control')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Verify analyze
constr.analyze()
num = len([f for f in net.facts if f.is_in_normal_series_mode()])
Annz = num*self.T
Arow = Annz
self.assertEqual(constr.A_nnz,Annz)
self.assertEqual(constr.A_row,Arow)
# Verify evaluation
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
b = constr.b
A = constr.A
# After
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax = b Check
for t in range(self.T):
for f in net.facts:
self.assertTrue(f.has_flags('variable', 'reactive power'))
if f.is_in_normal_series_mode():
indexP = np.where(A.col == f.index_Q_m[t])[0]
self.assertEqual(indexP.size,1)
self.assertEqual(A.data[indexP],1)
self.assertEqual(b[A.row[indexP]],f.Q_set[t])
# With outages
for facts in net.facts:
facts.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_REG_PF(self):
# Constants
h = 1e-8
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('vsc converter',
'variable',
'any',
['active power', 'reactive power'])
self.assertEqual(net.num_vars, 2*net.get_num_vsc_converters()*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# PF
for vsc in net.vsc_converters:
if vsc.is_in_f_ac_mode():
vsc.target_power_factor = np.sign(np.random.randn())*np.minimum(np.random.rand(), 0.2)
# Constraint
constr = pf.Constraint('power factor regulation',net)
self.assertEqual(constr.name,'power factor regulation')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
Jnnz = 0
for i in range(net.num_buses):
bus = net.get_bus(i)
for vsc in bus.vsc_converters:
if vsc.is_in_f_ac_mode():
Jnnz += 4
Annz = 4*net.get_num_vsc_converters_in_f_ac_mode()
rowsJ = 2*net.get_num_vsc_converters_in_f_ac_mode()
rowsA = net.get_num_vsc_converters_in_f_ac_mode()
constr.analyze()
self.assertEqual(constr.J_nnz, Jnnz*self.T)
self.assertEqual(constr.A_nnz, Annz*self.T)
self.assertEqual(constr.J_row, rowsJ*self.T)
self.assertEqual(constr.A_row, rowsA*self.T)
self.assertEqual(constr.num_extra_vars, rowsJ*self.T)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertTrue(np.all(y_init == 0.))
y0 = np.random.rand(constr.num_extra_vars)
constr.eval(x0,y0)
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(rowsA*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(J.nnz,Jnnz*self.T)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(rowsA*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(A.nnz,Annz*self.T)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax=b check
for k in range(J.shape[0]//2):
index1 = np.where(A.col == net.num_vars+2*k)[0]
index2 = np.where(A.col == net.num_vars+2*k+1)[0]
self.assertEqual(index1.size,1)
self.assertEqual(index2.size,1)
self.assertEqual(A.row[index1[0]],A.row[index2[0]])
index3 = np.where(A.row == A.row[index1[0]])[0]
self.assertEqual(index3.size,4)
for vsc in net.vsc_converters:
if vsc.is_in_f_ac_mode():
gamma = vsc.target_power_factor
factor = np.sqrt((1-gamma**2.)/(gamma**2.))
for t in range(self.T):
iQ = vsc.index_Q[t]
iP = vsc.index_P[t]
k = np.where(A.col == iQ)[0]
self.assertEqual(k.size, 1)
k = np.where(A.row == A.row[k])[0]
self.assertEqual(k.size, 4)
for kk in k:
if A.col[kk] == iQ:
self.assertEqual(A.data[kk], 1.)
elif A.col[kk] == iP:
if vsc.target_power_factor >= 0:
self.assertAlmostEqual(A.data[kk], -factor)
else:
self.assertAlmostEqual(A.data[kk], factor)
else:
if (A.col[kk]-net.num_vars) % 2 == 0:
self.assertAlmostEqual(A.data[kk], -factor) # y
else:
self.assertAlmostEqual(A.data[kk], factor) # z
# f check
eps = 1e-8
J_row = 0
for t in range(self.T):
for bus in net.buses:
for vsc in bus.vsc_converters:
if vsc.is_in_f_ac_mode():
self.assertTrue(vsc.has_flags('variable', ['active power', 'reactive power']))
y = y0[J_row]
z = y0[J_row+1]
Q = vsc.Q[t]
Qmax = vsc.Q_max
Qmin = vsc.Q_min
CompY = (Q-Qmin)+y-np.sqrt((Q-Qmin)**2.+y**2.+2*eps)
CompZ = (Qmax-Q)+z-np.sqrt((Qmax-Q)**2.+z**2.+2*eps)
self.assertAlmostEqual(CompY,f[J_row])
self.assertAlmostEqual(CompZ,f[J_row+1])
J_row += 2
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# With outages
for conv in net.vsc_converters:
conv.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_REG_PF_SWITCH(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Vars
net.set_flags('vsc converter',
'variable',
'any',
['active power', 'reactive power'])
self.assertEqual(net.num_vars, 2*net.get_num_vsc_converters()*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# PF
for vsc in net.vsc_converters:
if vsc.is_in_f_ac_mode():
vsc.target_power_factor = np.sign(np.random.randn())*np.minimum(np.random.rand(), 0.2)
# Constraint
constr = pf.Constraint('switching power factor regulation',net)
self.assertEqual(constr.name,'switching power factor regulation')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
# Manual count
nnz = 0
num_constr = 0
for vsc in net.vsc_converters:
if vsc.is_in_f_ac_mode() and vsc.has_flags('variable', ['active power', 'reactive power']):
num_constr += 1
nnz += 2
constr.analyze()
self.assertEqual(constr.A.shape[0],num_constr*self.T)
self.assertEqual(nnz*self.T,constr.A_nnz)
constr.eval(x0)
self.assertEqual(0,constr.A_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(num_constr*self.T,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(num_constr*self.T,net.num_vars))
self.assertEqual(A.nnz,nnz*self.T)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Detailed check
Ai = A.row
Aj = A.col
Ad = A.data
self.assertEqual(Ai.size,nnz*self.T)
self.assertEqual(Aj.size,nnz*self.T)
self.assertEqual(Ad.size,nnz*self.T)
nnz = 0
row = 0
for t in range(self.T):
for bus in net.buses:
for vsc in bus.vsc_converters:
if vsc.is_in_f_ac_mode():
gamma = vsc.target_power_factor
factor = np.sqrt(1-gamma**2.)/np.abs(gamma)
self.assertEqual(b[row], 0.)
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], vsc.index_P[t])
if gamma >= 0.:
self.assertAlmostEqual(Ad[nnz], -factor)
else:
self.assertAlmostEqual(Ad[nnz], factor)
nnz += 1
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], vsc.index_Q[t])
self.assertEqual(Ad[nnz], 1.)
nnz += 1
row += 1
self.assertEqual(row,A.shape[0])
self.assertEqual(nnz,A.nnz)
# With outages
for conv in net.vsc_converters:
conv.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_HVDCPF(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case, self.T)
self.assertEqual(net.num_periods, self.T)
# Vars
net.set_flags('dc bus',
'variable',
'any',
'voltage')
# Vars
net.set_flags('vsc converter',
'variable',
'any',
'dc power')
net.set_flags('csc converter',
'variable',
'any',
'dc power')
self.assertEqual(net.num_vars, (net.num_dc_buses +
2*net.num_vsc_converters +
2*net.num_csc_converters)*self.T)
# Constraint
constr = pf.Constraint('HVDC power balance',net)
self.assertEqual(constr.name,'HVDC power balance')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTupleEqual(constr.H_combined.shape, (0,0))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()+1e-1*np.random.randn(net.num_vars)
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr.analyze()
flags = np.zeros(net.num_dc_buses*self.T, dtype=int)
for t in range(self.T):
for bus in net.dc_buses:
flags[bus.di_index[t]] = 1
self.assertEqual(np.sum(flags), flags.size)
A = constr.A
b = constr.b
self.assertTupleEqual(constr.J.shape, (0, net.num_vars))
self.assertTupleEqual(constr.G.shape, (0, net.num_vars))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.f.size, 0)
self.assertTupleEqual(constr.A.shape, (net.num_dc_buses*self.T, net.num_vars))
self.assertEqual(constr.A.nnz, (net.num_vsc_converters +
net.num_csc_converters +
4*net.num_dc_branches)*self.T)
self.assertEqual(constr.b.size, net.num_dc_buses*self.T)
i_mis_manual = np.zeros(net.num_dc_buses*self.T)
i_mis = A*x0-b
for t in range(self.T):
for bus in net.dc_buses:
for branch in bus.branches:
self.assertTrue(branch.bus_k.has_flags('variable', 'voltage'))
self.assertTrue(branch.bus_m.has_flags('variable', 'voltage'))
ikm = (x0[branch.bus_k.index_v[t]]-x0[branch.bus_m.index_v[t]])/branch.r
self.assertEqual(ikm, branch.get_i_km(x0)[t])
if bus.is_equal(branch.bus_k):
i_out = ikm
else:
i_out = -ikm
i_mis_manual[bus.di_index[t]] -= i_out
for conv in net.vsc_converters:
self.assertTrue(conv.has_flags('variable', 'dc power'))
i_in = x0[conv.index_i_dc[t]]
i_mis_manual[conv.dc_bus.di_index[t]] += i_in
for conv in net.csc_converters:
self.assertTrue(conv.has_flags('variable', 'dc power'))
i_in = x0[conv.index_i_dc[t]]
i_mis_manual[conv.dc_bus.di_index[t]] += i_in
if not i_mis.size:
self.assertTrue(np.all(i_mis_manual == i_mis))
else:
self.assertLessEqual(np.max(np.abs(i_mis_manual-i_mis)), 1e-10)
net.set_var_values(x0)
for t in range(self.T):
for bus in net.dc_buses:
self.assertNotEqual(bus.v[t], 0.)
self.assertNotEqual(bus.v[t], 1.)
for conv in net.vsc_converters:
self.assertNotEqual(conv.P_dc[t], 0.)
self.assertNotEqual(conv.i_dc[t], 0.)
# Test with no variables
net.clear_flags()
self.assertEqual(net.num_vars, 0)
constr.analyze()
A = constr.A
b = constr.b
self.assertTupleEqual(constr.J.shape, (0, net.num_vars))
self.assertTupleEqual(constr.G.shape, (0, net.num_vars))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.f.size, 0)
self.assertTupleEqual(constr.A.shape, (net.num_dc_buses*self.T, 0))
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.b.size, net.num_dc_buses*self.T)
x0 = net.get_var_values()
self.assertEqual(x0.size, 0)
i_mis_manual = np.zeros(net.num_dc_buses*self.T)
i_mis = A*x0-b
for t in range(self.T):
for bus in net.dc_buses:
for branch in bus.branches:
self.assertFalse(branch.bus_k.has_flags('variable', 'voltage'))
self.assertFalse(branch.bus_m.has_flags('variable', 'voltage'))
ikm = branch.i_km[t]
if bus.is_equal(branch.bus_k):
i_out = ikm
else:
i_out = -ikm
i_mis_manual[bus.di_index[t]] -= i_out
for conv in net.vsc_converters:
self.assertFalse(conv.has_flags('variable', 'dc power'))
i_in = conv.i_dc[t]
i_mis_manual[conv.dc_bus.di_index[t]] += i_in
for conv in net.csc_converters:
self.assertFalse(conv.has_flags('variable', 'dc power'))
i_in = conv.i_dc[t]
i_mis_manual[conv.dc_bus.di_index[t]] += i_in
if not i_mis.size:
self.assertTrue(np.all(i_mis_manual == i_mis))
else:
self.assertLessEqual(np.max(np.abs(i_mis_manual-i_mis)), 1e-10)
# With outages
for br in net.dc_branches:
br.in_service = False
for vsc in net.vsc_converters:
vsc.in_service = False
for csc in net.csc_converters:
csc.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], net.num_dc_buses*self.T)
self.assertEqual(constr.b.size, net.num_dc_buses*self.T)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
net.make_all_in_service()
# With outages
for bus in net.dc_buses:
bus.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_VSC_EQ(self):
# Constants
h = 1e-10
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('vsc converter',
'variable',
'any',
['dc power', 'active power'])
net.set_flags('dc bus',
'variable',
'any',
'voltage')
# Check if dc bus indexes are setting to unique values
busindicesv = [bus.index_v for bus in net.dc_buses]
self.assertEqual(len(np.unique(busindicesv)), net.num_dc_buses*self.T)
# Check if vsc different variables index are setting to unique values
vscindicesPac = [vsc.index_P for vsc in net.vsc_converters]
self.assertEqual(len(np.unique(vscindicesPac)), net.num_vsc_converters*self.T)
vscindicesPdc = [vsc.index_P_dc for vsc in net.vsc_converters]
self.assertEqual(len(np.unique(vscindicesPdc)), net.num_vsc_converters*self.T)
vscindicesidc = [vsc.index_i_dc for vsc in net.vsc_converters]
self.assertEqual(len(np.unique(vscindicesidc)), net.num_vsc_converters*self.T)
self.assertEqual(net.num_vars, (3*net.num_vsc_converters+net.num_dc_buses)*self.T)
# Constraint
constr = pf.Constraint('VSC converter equations',net)
self.assertEqual(constr.name,'VSC converter equations')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()+1e-4*np.random.randn(net.num_vars)
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Verify analyze
constr.analyze()
Annz = 3*net.num_vsc_converters*self.T
Jnnz =3*net.num_vsc_converters*self.T
Arow = net.num_vsc_converters*self.T
Jrow = net.num_vsc_converters*self.T
self.assertEqual(constr.A_nnz,Annz)
self.assertEqual(constr.A_row,Arow)
self.assertEqual(constr.J_nnz,Jnnz)
self.assertEqual(constr.J_row,Jrow)
# Verify evaluation
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.J_nnz,Jnnz)
self.assertEqual(constr.J_row,Jrow)
f = constr.f
J = constr.J
b = constr.b
A = constr.A
# After
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTupleEqual(b.shape,(Arow,))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTupleEqual(f.shape,(Jrow,))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTupleEqual(J.shape,(Jrow,net.num_vars))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
self.assertTupleEqual(A.shape,(Arow,net.num_vars))
# Ax = b check
coeffB = [vsc.loss_coeff_B for vsc in net.vsc_converters]
sumcoefB = np.sum(np.abs(coeffB))
self.assertAlmostEqual(norm(A.data,1),2*Arow+sumcoefB*self.T) # Almost, because of float type, to avoid precision errors
for k in range(self.T):
for vsc in net.vsc_converters:
self.assertTrue(vsc.has_flags('variable',['dc power','active power']))
indexP = np.where(A.col == vsc.index_P[k])[0]
indexPdc = np.where(A.col == vsc.index_P_dc[k])[0]
indexidc = np.where(A.col == vsc.index_i_dc[k])[0]
self.assertEqual(indexP.size,1)
self.assertEqual(indexPdc.size,1)
self.assertEqual(indexidc.size,1)
self.assertEqual(A.data[indexP],1.)
self.assertEqual(A.data[indexPdc],1.)
if vsc.P_dc_set[k] <= 0:
self.assertEqual(A.data[indexidc],-vsc.loss_coeff_B)
else:
self.assertEqual(A.data[indexidc],vsc.loss_coeff_B)
self.assertEqual(b[A.row[indexP]],-1.*vsc.loss_coeff_A)
# f check
J_row = 0
for t in range(self.T):
for bus in net.dc_buses:
vsc_onthisbus = [vsc for vsc in net.vsc_converters if vsc.dc_bus == bus]
for vsc in vsc_onthisbus:
indexPdc = np.where(J.col == vsc.index_P_dc[t])[0]
indexidc = np.where(J.col == vsc.index_i_dc[t])[0]
indexv = np.where(J.col == bus.index_v[t])[0]
dP = x0[J.col[indexPdc]] - x0[J.col[indexidc]]*x0[J.col[indexv]]
self.assertAlmostEqual(f[J_row],dP)
J_row += 1
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# With outages
for conv in net.vsc_converters:
conv.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_VSC_DC_PSET(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('vsc converter',
'variable',
'any',
'active power')
self.assertEqual(net.num_vars, net.num_vsc_converters*self.T)
# Constraint
constr = pf.Constraint('VSC DC power control',net)
self.assertEqual(constr.name,'VSC DC power control')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Verify analyze
constr.analyze()
# Check if vsc index are setting to unique values
vscindices = [vsc.index_P for vsc in net.vsc_converters]
self.assertEqual(len(np.unique(vscindices)), net.num_vsc_converters*self.T)
dcmodevsc = [vsc for vsc in net.vsc_converters if vsc.is_in_P_dc_mode()]
Annz = len(dcmodevsc)*self.T
Arow = Annz
self.assertEqual(constr.A_nnz,Annz)
self.assertEqual(constr.A_row,Arow)
# Verify evaluation
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
b = constr.b
A = constr.A
# After
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax = b Check
for k in range(self.T):
for vsc in dcmodevsc:
self.assertTrue(vsc.has_flags('variable', ['active power']))
indexP = np.where(A.col == vsc.index_P[k])[0]
self.assertEqual(indexP.size,1)
self.assertEqual(A.data[indexP],-1)
self.assertEqual(b[A.row[indexP]],vsc.P_dc_set[k])
# With outages
for conv in net.vsc_converters:
conv.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_VSC_DC_VSET(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('dc bus',
'variable',
'any',
'voltage')
self.assertEqual(net.num_vars, net.num_dc_buses*self.T)
# Constraint
constr = pf.Constraint('VSC DC voltage control',net)
self.assertEqual(constr.name,'VSC DC voltage control')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Verify analyze
constr.analyze()
dcmodevsc = [vsc for vsc in net.vsc_converters if vsc.is_in_v_dc_mode()]
Annz = len(dcmodevsc)*self.T
Arow = Annz
self.assertEqual(constr.A_nnz,Annz)
self.assertEqual(constr.A_row,Arow)
# Verify evaluation
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Verify A matrix
self.assertTrue(np.all(A.data == 1))
for t in range(0,self.T):
indices = [vsc.dc_bus.index_v[t] for vsc in dcmodevsc]
self.assertTrue(np.all(A.col[t*len(dcmodevsc):(t*len(dcmodevsc)+len(dcmodevsc))] == indices))
# Verify b vector
for t in range(0,self.T):
setpoints = [vsc.v_dc_set[t] for vsc in dcmodevsc]
self.assertTrue(np.all(b[t*len(dcmodevsc):(t*len(dcmodevsc)+len(dcmodevsc))] == setpoints))
# With outages
for conv in net.vsc_converters:
conv.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_LOAD_VDEP(self):
# Constants
h = 1e-10
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
net.set_flags('load',
'variable',
'any',
['active power', 'reactive power'])
self.assertEqual(net.num_vars, (2*net.num_loads+net.num_buses)*self.T)
x0 = net.get_var_values()+1e-5*np.random.randn(net.num_vars)
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Loads comps
for load in net.loads:
load.comp_ci = np.random.randn(self.T)
load.comp_cj = np.random.randn(self.T)
load.comp_cg = np.random.randn()
load.comp_cb = np.random.randn()
load.comp_cp = load.P
load.comp_cq = load.Q
# Constraint
constr = pf.Constraint('load voltage dependence',net)
self.assertEqual(constr.name,'load voltage dependence')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
Jnnz = 4*net.num_loads*self.T
rowsJ = 2*net.num_loads*self.T
constr.analyze()
self.assertEqual(constr.J_nnz,Jnnz)
self.assertEqual(constr.J_row,rowsJ)
self.assertEqual(constr.num_extra_vars,0)
self.assertLessEqual(constr.J_row, constr.H_nnz.size)
self.assertLessEqual(2*net.num_loads*net.num_periods, constr.H_nnz.size)
self.assertTrue(np.all(constr.H_nnz[:2*net.num_loads*net.num_periods] == 1))
for i in range(rowsJ):
H = constr.get_H_single(i)
self.assertEqual(H.shape[0], net.num_vars)
self.assertEqual(H.shape[1], net.num_vars)
self.assertEqual(H.nnz, 1)
H = constr.H_combined
self.assertEqual(H.shape[0], net.num_vars)
self.assertEqual(H.shape[1], net.num_vars)
self.assertEqual(H.nnz, rowsJ)
constr.eval(x0)
self.assertEqual(constr.J_nnz,Jnnz)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ,net.num_vars))
self.assertEqual(J.nnz,Jnnz)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# f check
J_row = 0
for t in range(self.T):
for bus in net.buses:
for load in bus.loads:
Sp = (x0[load.index_P[t]] -
load.comp_cp[t] -
load.comp_ci[t]*x0[bus.index_v_mag[t]] -
load.comp_cg*(x0[bus.index_v_mag[t]])**2.)
Sq = (x0[load.index_Q[t]] -
load.comp_cq[t] -
load.comp_cj[t]*x0[bus.index_v_mag[t]] +
load.comp_cb*(x0[bus.index_v_mag[t]])**2.)
self.assertAlmostEqual(Sp,f[J_row])
self.assertAlmostEqual(Sq,f[J_row+1])
J_row += 2
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Single Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# With outages
for load in net.loads:
load.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_CFUNC(self):
h = 1e-9
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
net.set_flags('generator',
'variable',
'any',
'active power')
self.assertEqual(net.num_vars, (net.num_buses+net.num_generators)*net.num_periods)
x = net.get_var_values() + 1e-2*np.random.rand(net.num_vars)
func = pf.Function('generation cost', 1., net)
constr = pf.Constraint('constrained function', net)
rhs = 100.
constr.set_parameter("rhs", rhs)
constr.set_parameter("func", func)
# Equality
constr.set_parameter("op", "=")
constr.analyze()
self.assertEqual(constr.num_extra_vars, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertTupleEqual(constr.G.shape, (0, net.num_vars))
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l_extra_vars.size, 0)
self.assertEqual(constr.u_extra_vars.size, 0)
self.assertEqual(constr.init_extra_vars.size, 0)
self.assertEqual(constr.f.size, 1)
self.assertEqual(constr.J.nnz, net.num_vars)
self.assertTupleEqual(constr.J.shape, (1, net.num_vars))
H = constr.get_H_single(0)
self.assertEqual(H.nnz, func.Hphi.nnz)
self.assertTupleEqual(H.shape, (net.num_vars, net.num_vars))
self.assertEqual(func.phi, 0.)
constr.eval(x)
net.update_properties(x)
self.assertNotEqual(func.phi, 0.)
self.assertLess(np.abs(func.phi-np.sum(net.gen_P_cost)), 1e-12*(np.abs(func.phi)+1.))
self.assertEqual(constr.f[0], func.phi - rhs - 0.)
pf.tests.utils.check_constraint_Jacobian(self, constr, x, np.zeros(0), NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_single_Hessian(self, constr, x, np.zeros(0), NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_combined_Hessian(self, constr, x, np.zeros(0), NUM_TRIALS, TOL, EPS, h, quiet=True)
# Inequality >=
constr.set_parameter("op", ">=")
constr.analyze()
self.assertEqual(constr.num_extra_vars, 1)
self.assertEqual(constr.G.nnz, 1)
self.assertEqual(constr.G.row[0], 0)
self.assertEqual(constr.G.col[0], net.num_vars)
self.assertEqual(constr.G.data[0], 1.)
self.assertTupleEqual(constr.G.shape, (1, net.num_vars+1))
self.assertEqual(constr.l.size, 1)
self.assertEqual(constr.u.size, 1)
self.assertEqual(constr.l_extra_vars.size, 1)
self.assertEqual(constr.u_extra_vars.size, 1)
self.assertEqual(constr.init_extra_vars.size, 1)
self.assertEqual(constr.l[0],0)
self.assertEqual(constr.l_extra_vars[0],0)
self.assertEqual(constr.u[0],1e8)
self.assertEqual(constr.u_extra_vars[0],1e8)
self.assertEqual(constr.f.size, 1)
self.assertEqual(constr.J.nnz, net.num_vars+1)
self.assertTupleEqual(constr.J.shape, (1, net.num_vars+1))
H = constr.get_H_single(0)
self.assertEqual(H.nnz, func.Hphi.nnz)
self.assertTupleEqual(H.shape, (net.num_vars+1, net.num_vars+1))
self.assertEqual(func.phi, 0.)
y = np.random.randn(1)
constr.eval(x,y)
net.update_properties(x)
self.assertNotEqual(func.phi, 0.)
self.assertLess(np.abs(func.phi-np.sum(net.gen_P_cost)), 1e-12*(np.abs(func.phi)+1.))
self.assertEqual(constr.f[0], func.phi - rhs - y[0])
pf.tests.utils.check_constraint_Jacobian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_single_Hessian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_combined_Hessian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
self.assertEqual(constr.G*np.hstack((x,y)),y[0])
# Inequality <=
constr.set_parameter("op", "<=")
constr.analyze()
self.assertEqual(constr.num_extra_vars, 1)
self.assertEqual(constr.G.nnz, 1)
self.assertEqual(constr.G.row[0], 0)
self.assertEqual(constr.G.col[0], net.num_vars)
self.assertEqual(constr.G.data[0], 1.)
self.assertTupleEqual(constr.G.shape, (1, net.num_vars+1))
self.assertEqual(constr.l.size, 1)
self.assertEqual(constr.u.size, 1)
self.assertEqual(constr.l_extra_vars.size, 1)
self.assertEqual(constr.u_extra_vars.size, 1)
self.assertEqual(constr.init_extra_vars.size, 1)
self.assertEqual(constr.l[0],-1e8)
self.assertEqual(constr.l_extra_vars[0],-1e8)
self.assertEqual(constr.u[0],0)
self.assertEqual(constr.u_extra_vars[0],0)
self.assertEqual(constr.f.size, 1)
self.assertEqual(constr.J.nnz, net.num_vars+1)
self.assertTupleEqual(constr.J.shape, (1, net.num_vars+1))
H = constr.get_H_single(0)
self.assertEqual(H.nnz, func.Hphi.nnz)
self.assertTupleEqual(H.shape, (net.num_vars+1, net.num_vars+1))
self.assertEqual(func.phi, 0.)
y = np.random.randn(1)
constr.eval(x,y)
net.update_properties(x)
self.assertNotEqual(func.phi, 0.)
self.assertLess(np.abs(func.phi-np.sum(net.gen_P_cost)), 1e-12*(np.abs(func.phi)+1.))
self.assertEqual(constr.f[0], func.phi - rhs - y[0])
pf.tests.utils.check_constraint_Jacobian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_single_Hessian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
pf.tests.utils.check_constraint_combined_Hessian(self, constr, x, y, NUM_TRIALS, TOL, EPS, h, quiet=True)
self.assertEqual(constr.G*np.hstack((x,y)),y[0])
# With outages
for gen in net.generators:
gen.in_service = False
constr.analyze()
constr.eval(x)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, net.num_vars+1)
self.assertEqual(constr.J.shape[0], 1)
self.assertEqual(constr.f.size, 1)
self.assertEqual(constr.G.nnz, 1)
self.assertEqual(constr.G.shape[0], 1)
self.assertEqual(constr.u.size, 1)
self.assertEqual(constr.l.size, 1)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_FIX(self):
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
# add vargens
net.add_var_generators_from_parameters(net.get_load_buses(),80.,50.,30.,5,0.05)
for vargen in net.var_generators:
vargen.P = vargen.index*1.5
vargen.Q = vargen.index*2.5
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
net.set_flags('load',
'variable',
'any',
'active power')
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
self.assertEqual(net.num_vars,
2*net.num_buses +
net.get_num_slack_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
net.num_var_generators*2+
3*net.num_batteries+
net.num_loads)
# Fixed
net.set_flags('bus',
'fixed',
'slack',
['voltage magnitude','voltage angle'])
net.set_flags('bus',
'fixed',
'regulated by generator',
'voltage magnitude')
net.set_flags('generator',
'fixed',
'regulator',
'reactive power')
net.set_flags('branch',
'fixed',
'tap changer',
'tap ratio')
net.set_flags('branch',
'fixed',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'fixed',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'fixed',
'any',
['active power','reactive power'])
net.set_flags('battery',
'fixed',
'any',
['charging power','energy level'])
net.set_flags('load',
'fixed',
'any',
'active power')
# outages have no effect
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for bus in net.buses:
bus.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
self.assertGreater(net.num_fixed,0)
self.assertEqual(net.num_fixed,
2*(net.get_num_slack_buses()) +
(net.get_num_buses_reg_by_gen()-net.get_num_slack_buses()) +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
net.num_var_generators*2+
3*net.num_batteries+
net.num_loads)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr = pf.Constraint('variable fixing',net)
self.assertEqual(constr.name,'variable fixing')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
A_nnz = net.num_fixed
constr.analyze()
self.assertEqual(A_nnz,constr.A_nnz)
constr.eval(x0)
self.assertEqual(0,constr.A_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(net.num_fixed,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(net.num_fixed,net.num_vars))
self.assertEqual(A.nnz,net.num_fixed)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Vargen
for vargen in net.var_generators:
ar = np.where(A.col == vargen.index_P)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],vargen.index_P)
self.assertEqual(b[A.row[ar[0]]],vargen.P)
self.assertEqual(b[A.row[ar[0]]],vargen.index*1.5)
for vargen in net.var_generators:
ar = np.where(A.col == vargen.index_Q)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],vargen.index_Q)
self.assertEqual(b[A.row[ar[0]]],vargen.Q)
self.assertEqual(b[A.row[ar[0]]],vargen.index*2.5)
# Batteries
for bat in net.batteries:
ar = np.where(A.col == bat.index_Pc)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_Pc)
self.assertEqual(b[A.row[ar[0]]],max([bat.P,0]))
for bat in net.batteries:
ar = np.where(A.col == bat.index_Pd)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_Pd)
self.assertEqual(b[A.row[ar[0]]],max([-bat.P,0]))
for bat in net.batteries:
ar = np.where(A.col == bat.index_E)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_E)
self.assertEqual(b[A.row[ar[0]]],bat.E)
# Load
for load in net.loads:
self.assertTrue(load.has_flags('variable','active power'))
self.assertTrue(load.has_flags('fixed','active power'))
ar = np.where(A.col == load.index_P)[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],load.index_P)
self.assertEqual(b[A.row[ar[0]]],load.P)
# Projections
P1 = constr.get_var_projection()
P2 = constr.get_extra_var_projection()
self.assertTrue(isinstance(P1,coo_matrix))
self.assertTrue(isinstance(P2,coo_matrix))
self.assertEqual(P1.shape[0],net.num_vars)
self.assertEqual(P2.shape[0],0)
self.assertEqual(P1.shape[1],net.num_vars)
self.assertEqual(P2.shape[1],net.num_vars)
self.assertEqual(P1.nnz,net.num_vars)
self.assertEqual(P2.nnz,0)
self.assertLess(np.linalg.norm(x0-P1*x0),1e-12)
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# add vargens
net.add_var_generators_from_parameters(net.get_load_buses(),80.,50.,30.,5,0.05)
for vargen in net.var_generators:
vargen.P = np.random.rand(self.T)*10
vargen.Q = np.random.rand(self.T)*10
self.assertEqual(vargen.num_periods,self.T)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
# Vars
net.set_flags('bus',
['variable','fixed'],
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
['variable','fixed'],
'slack',
'active power')
net.set_flags('generator',
['variable','fixed'],
'regulator',
'reactive power')
net.set_flags('branch',
['variable','fixed'],
'tap changer',
'tap ratio')
net.set_flags('branch',
['variable','fixed'],
'phase shifter',
'phase shift')
net.set_flags('shunt',
['variable','fixed'],
'switching - v',
'susceptance')
net.set_flags('variable generator',
['variable','fixed'],
'any',
['active power','reactive power'])
net.set_flags('battery',
['variable','fixed'],
'any',
['charging power','energy level'])
net.set_flags('load',
['variable','fixed'],
'any',
'active power')
# outages have no effect
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for bus in net.buses:
bus.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_fixed,net.num_vars)
self.assertEqual(net.num_vars,
(2*net.num_buses +
net.get_num_slack_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
net.num_var_generators*2+
3*net.num_batteries+
net.num_loads)*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr = pf.Constraint('variable fixing',net)
self.assertEqual(constr.name,'variable fixing')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
A_nnz = net.num_fixed
constr.analyze()
self.assertEqual(A_nnz,constr.A_nnz)
constr.eval(x0)
self.assertEqual(0,constr.A_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(net.num_fixed,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(net.num_fixed,net.num_vars))
self.assertEqual(A.nnz,net.num_fixed)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Time loop
for t in range(self.T):
# bus
for bus in net.buses:
ar = np.where(A.col == bus.index_v_mag[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bus.index_v_mag[t])
self.assertEqual(b[A.row[ar[0]]],bus.v_mag[t])
ar = np.where(A.col == bus.index_v_ang[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bus.index_v_ang[t])
self.assertEqual(b[A.row[ar[0]]],bus.v_ang[t])
# Gens
for gen in net.generators:
if gen.is_slack():
ar = np.where(A.col == gen.index_P[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],gen.index_P[t])
self.assertEqual(b[A.row[ar[0]]],gen.P[t])
if gen.is_regulator():
ar = np.where(A.col == gen.index_Q[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],gen.index_Q[t])
self.assertEqual(A.data[ar[0]],1.)
self.assertEqual(b[A.row[ar[0]]],gen.Q[t])
# Shunts
for shunt in net.shunts:
if shunt.is_switched_v():
ar = np.where(A.col == shunt.index_b[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],shunt.index_b[t])
self.assertEqual(b[A.row[ar[0]]],shunt.b[t])
# Branch
for branch in net.branches:
if branch.is_tap_changer():
ar = np.where(A.col == branch.index_ratio[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],branch.index_ratio[t])
self.assertEqual(b[A.row[ar[0]]],branch.ratio[t])
if branch.is_phase_shifter():
ar = np.where(A.col == branch.index_phase[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],branch.index_phase[t])
self.assertEqual(b[A.row[ar[0]]],branch.phase[t])
# Vargen
for vargen in net.var_generators:
ar = np.where(A.col == vargen.index_P[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],vargen.index_P[t])
self.assertEqual(b[A.row[ar[0]]],vargen.P[t])
ar = np.where(A.col == vargen.index_Q[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],vargen.index_Q[t])
self.assertEqual(b[A.row[ar[0]]],vargen.Q[t])
# Batteries
for bat in net.batteries:
ar = np.where(A.col == bat.index_Pc[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_Pc[t])
self.assertEqual(b[A.row[ar[0]]],max([bat.P[t],0]))
ar = np.where(A.col == bat.index_Pd[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_Pd[t])
self.assertEqual(b[A.row[ar[0]]],max([-bat.P[t],0]))
ar = np.where(A.col == bat.index_E[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],bat.index_E[t])
self.assertEqual(b[A.row[ar[0]]],bat.E[t])
# Load
for load in net.loads:
self.assertTrue(load.has_flags('variable','active power'))
self.assertTrue(load.has_flags('fixed','active power'))
ar = np.where(A.col == load.index_P[t])[0]
self.assertEqual(ar.size,1)
self.assertEqual(A.col[ar[0]],load.index_P[t])
self.assertEqual(b[A.row[ar[0]]],load.P[t])
def test_constr_FIX_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
net.make_all_in_service()
gen = net.get_generator(0)
branch = net.get_branch(0)
gen.in_service = False
branch.in_service = False
self.assertFalse(gen.is_in_service())
self.assertFalse(branch.is_in_service())
gen.P = np.random.rand(self.T)
gen.Q = np.random.rand(self.T)
branch.ratio = np.random.randn(self.T)
branch.phase = np.random.randn(self.T)
net.set_flags('generator',
['variable','fixed'],
'any',
['active power', 'reactive power'])
net.set_flags('branch',
['variable','fixed'],
'any',
['tap ratio', 'phase shift'])
net.set_flags_of_component(gen, ['variable', 'fixed'], ['active power', 'reactive power'])
net.set_flags_of_component(branch, ['variable', 'fixed'], ['tap ratio', 'phase shift'])
self.assertEqual(net.num_vars,
self.T*(2*net.num_generators + 2*net.num_branches))
self.assertEqual(net.num_vars, net.num_fixed)
constr = pf.Constraint('variable fixing', net)
constr.analyze()
A = constr.A
b = constr.b
for t in range(self.T):
# gen P
k = np.where(A.col == gen.index_P[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = A.row[k]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[i], gen.P[t])
# gen Q
k = np.where(A.col == gen.index_Q[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = A.row[k]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[i], gen.Q[t])
# branch ratio
k = np.where(A.col == branch.index_ratio[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = A.row[k]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[i], branch.ratio[t])
# branch phase
k = np.where(A.col == branch.index_phase[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = A.row[k]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[i], branch.phase[t])
# Disconnect
net.make_all_in_service()
net.clear_flags()
self.assertEqual(net.num_vars, 0)
for bus in net.buses:
if bus.degree == 1:
self.assertEqual(len(bus.branches), 1)
bus.branches[0].in_service = False
self.assertFalse(bus.branches[0].is_in_service())
net.set_flags_of_component(bus,
['variable', 'fixed'],
['voltage magnitude', 'voltage angle'])
self.assertEqual(net.num_vars, 2*self.T)
self.assertEqual(net.num_vars, net.num_fixed)
self.assertTrue(bus.has_flags('variable', ['voltage magnitude',
'voltage angle']))
self.assertTrue(bus.has_flags('fixed', ['voltage magnitude',
'voltage angle']))
constr = pf.Constraint('variable fixing', net)
constr.analyze()
A = constr.A
b = constr.b
self.assertEqual(A.shape[0], 2*self.T)
for t in range(self.T):
# bus v mag
k = np.where(A.col == bus.index_v_mag[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[A.row[k]], bus.v_mag[t])
# bus v ang
k = np.where(A.col == bus.index_v_ang[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
self.assertEqual(A.data[k], 1.)
self.assertEqual(b[A.row[k]], bus.v_ang[t])
break
def test_constr_BOUND(self):
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
# add vargens
net.add_var_generators_from_parameters(net.get_load_buses(),80.,50.,30.,5,0.05)
for vargen in net.var_generators:
vargen.P = vargen.index*1.5
vargen.Q = vargen.index*2.5
vargen.P_ava = vargen.index*3.
vargen.P_max = 100.
vargen.P_min = 0.
vargen.Q_max = 50.
vargen.Q_min = -50.
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_bounded,0)
self.assertEqual(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
# loads
for load in net.loads:
load.P_min = -2.4*(load.index+1)
load.P_max = 3.3*(load.index+1)
load.Q_min = 1.2*(load.index+2.)
load.Q_max = 5.8*(load.index+3.)
# Vars
net.set_flags('bus',
'variable',
'regulated by generator',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'regulator',
['active power','reactive power'])
net.set_flags('load',
'variable',
'adjustable active power',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
net.set_flags('vsc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
net.set_flags('facts',
'variable',
'any',
['series voltage magnitude','series voltage angle',
'active power', 'reactive power'])
net.set_flags('dc bus',
'variable',
'any',
'voltage')
net.set_flags('csc converter',
'variable',
'any',
'all')
num_vars_saved = net.num_vars
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_gen()*2 +
net.get_num_reg_gens()*2 +
2*net.get_num_P_adjust_loads() +
net.get_num_tap_changers() +
net.get_num_phase_shifters()*1 +
net.get_num_switched_v_shunts() +
net.num_var_generators*2+
3*net.num_batteries+
4*net.num_vsc_converters+
9*net.num_facts +
net.num_dc_buses +
6*net.num_csc_converters))
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr = pf.Constraint('variable bounds',net)
self.assertEqual(constr.name,'variable bounds')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
constr.analyze()
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
constr.eval(x0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,net.num_vars))
self.assertEqual(A.nnz,0)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(net.num_vars,net.num_vars))
self.assertEqual(G.nnz,net.num_vars)
self.assertTrue(np.all(G.row == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.col == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.data == np.ones(net.num_vars)))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(net.num_vars,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(net.num_vars,))
E = G-eye(net.num_vars)
self.assertGreater(G.nnz,0)
self.assertGreater(norm(G.data,np.inf),0.5)
self.assertEqual(E.nnz,0)
self.assertTrue(not np.any(np.isinf(l)))
self.assertTrue(not np.any(np.isnan(l)))
self.assertTrue(not np.any(np.isinf(u)))
self.assertTrue(not np.any(np.isnan(u)))
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Bounds
for bus in net.buses:
if bus.is_regulated_by_gen():
self.assertTrue(bus.has_flags('variable',
['voltage magnitude',
'voltage angle']))
self.assertEqual(u[bus.index_v_mag],pf.BUS_INF_V_MAG)
self.assertEqual(u[bus.index_v_ang],pf.BUS_INF_V_ANG)
self.assertEqual(l[bus.index_v_mag],0.)
self.assertEqual(l[bus.index_v_ang],-pf.BUS_INF_V_ANG)
else:
self.assertFalse(bus.has_flags('variable',
['voltage magnitude',
'voltage angle']))
for branch in net.branches:
if branch.is_tap_changer():
self.assertTrue(branch.has_flags('variable','tap ratio'))
self.assertEqual(u[branch.index_ratio],pf.BRANCH_INF_RATIO)
self.assertEqual(l[branch.index_ratio],0.)
else:
self.assertFalse(branch.has_flags('variable','tap ratio'))
if branch.is_phase_shifter():
self.assertTrue(branch.has_flags('variable','phase shift'))
self.assertLess(np.abs(u[branch.index_phase]-np.pi*2.),1e-10)
self.assertLess(np.abs(l[branch.index_phase]+np.pi*2.),1e-10)
else:
self.assertFalse(branch.has_flags('variable','phase shift'))
for gen in net.generators:
if gen.is_regulator():
self.assertTrue(gen.has_flags('variable',['active power','reactive power']))
self.assertEqual(u[gen.index_P],pf.GEN_INF_P)
self.assertEqual(u[gen.index_Q],pf.GEN_INF_Q)
self.assertEqual(l[gen.index_P],-pf.GEN_INF_P)
self.assertEqual(l[gen.index_Q],-pf.GEN_INF_Q)
else:
self.assertFalse(gen.has_flags('variable',
['active power','reactive power']))
for load in net.loads:
self.assertTrue(load.has_flags('variable','active power'))
self.assertTrue(load.has_flags('variable','reactive power'))
self.assertTrue(load.has_flags('variable',['active power','reactive power']))
self.assertEqual(u[load.index_P],pf.LOAD_INF_P)
self.assertEqual(l[load.index_P],-pf.LOAD_INF_P)
self.assertEqual(u[load.index_Q],pf.LOAD_INF_Q)
self.assertEqual(l[load.index_Q],-pf.LOAD_INF_Q)
for vargen in net.var_generators:
self.assertTrue(vargen.has_flags('variable',
['active power','reactive power']))
self.assertEqual(u[vargen.index_P],pf.VARGEN_INF_P)
self.assertEqual(u[vargen.index_Q],pf.VARGEN_INF_Q)
self.assertEqual(l[vargen.index_P],-pf.VARGEN_INF_P)
self.assertEqual(l[vargen.index_Q],-pf.VARGEN_INF_Q)
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertTrue(shunt.has_flags('variable','susceptance'))
self.assertEqual(u[shunt.index_b],pf.SHUNT_INF_SUSC)
self.assertEqual(l[shunt.index_b],-pf.SHUNT_INF_SUSC)
else:
self.assertFalse(shunt.has_flags('variable','susceptance'))
for bat in net.batteries:
self.assertTrue(bat.has_flags('variable','charging power'))
self.assertTrue(bat.has_flags('variable','energy level'))
self.assertEqual(u[bat.index_Pc],pf.BAT_INF_P)
self.assertEqual(l[bat.index_Pc],0.)
self.assertEqual(u[bat.index_Pd],pf.BAT_INF_P)
self.assertEqual(l[bat.index_Pd],0.)
self.assertEqual(u[bat.index_E],pf.BAT_INF_E)
self.assertEqual(l[bat.index_E],0.)
for vsc_conv in net.vsc_converters:
self.assertTrue(vsc_conv.has_flags('variable','active power'))
self.assertTrue(vsc_conv.has_flags('variable','reactive power'))
self.assertTrue(vsc_conv.has_flags('variable','dc power'))
self.assertEqual(u[vsc_conv.index_P],pf.CONVVSC_INF_P)
self.assertEqual(l[vsc_conv.index_P],-pf.CONVVSC_INF_P)
self.assertEqual(u[vsc_conv.index_Q],pf.CONVVSC_INF_Q)
self.assertEqual(l[vsc_conv.index_Q],-pf.CONVVSC_INF_Q)
self.assertEqual(u[vsc_conv.index_P_dc],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_P_dc],-pf.CONVVSC_INF_PDC)
self.assertEqual(u[vsc_conv.index_i_dc],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_i_dc],-pf.CONVVSC_INF_PDC)
for facts in net.facts:
self.assertTrue(facts.has_flags('variable','series voltage magnitude'))
self.assertTrue(facts.has_flags('variable','series voltage angle'))
self.assertTrue(facts.has_flags('variable','active power'))
self.assertTrue(facts.has_flags('variable','reactive power'))
self.assertEqual(u[facts.index_v_mag_s],pf.FACTS_INF_VMAG_S)
self.assertEqual(l[facts.index_v_mag_s],0.)
self.assertEqual(u[facts.index_v_ang_s],pf.FACTS_INF_VANG_S)
self.assertEqual(l[facts.index_v_ang_s],-pf.FACTS_INF_VANG_S)
self.assertEqual(u[facts.index_P_k],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_k],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_m],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_m],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_dc],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_dc],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_Q_k],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_k],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_m],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_m],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_s],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_s],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_sh],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_sh],-pf.FACTS_INF_Q)
for bus in net.dc_buses:
self.assertEqual(u[bus.index_v], pf.BUSDC_INF_V)
self.assertEqual(l[bus.index_v], -pf.BUSDC_INF_V)
for csc in net.csc_converters:
self.assertEqual(u[csc.index_P], pf.CONVCSC_INF_P)
self.assertEqual(l[csc.index_P], -pf.CONVCSC_INF_P)
self.assertEqual(u[csc.index_Q], pf.CONVCSC_INF_Q)
self.assertEqual(l[csc.index_Q], -pf.CONVCSC_INF_Q)
self.assertEqual(u[csc.index_P_dc], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_P_dc], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_i_dc], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_i_dc], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_angle], pf.CONVCSC_INF_ANGLE)
self.assertEqual(l[csc.index_angle], -pf.CONVCSC_INF_ANGLE)
self.assertEqual(u[csc.index_ratio], pf.CONVCSC_INF_RATIO)
self.assertEqual(l[csc.index_ratio], -pf.CONVCSC_INF_RATIO)
# Add bounded flags
net.set_flags('bus',
'bounded',
'regulated by generator',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'bounded',
'regulator',
['active power','reactive power'])
net.set_flags('load',
'bounded',
'adjustable active power',
['active power','reactive power'])
net.set_flags('branch',
'bounded',
'tap changer',
'tap ratio')
net.set_flags('branch',
'bounded',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'bounded',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'bounded',
'any',
['active power','reactive power'])
net.set_flags('battery',
'bounded',
'any',
['charging power','energy level'])
net.set_flags('vsc converter',
'bounded',
'any',
['dc power', 'active power','reactive power'])
net.set_flags('facts',
'bounded',
'any',
['series voltage magnitude','series voltage angle',
'active power','reactive power'])
net.set_flags('dc bus',
'bounded',
'any',
'voltage')
net.set_flags('csc converter',
'bounded',
'any',
'all')
# outages have no effect
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for bus in net.buses:
bus.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
self.assertEqual(net.num_vars,num_vars_saved)
self.assertEqual(net.num_fixed,0)
self.assertEqual(net.num_bounded,net.num_vars)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr = pf.Constraint('variable bounds',net)
self.assertEqual(constr.name,'variable bounds')
constr.analyze()
G = constr.G
l = constr.l
u = constr.u
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(net.num_vars,net.num_vars))
self.assertEqual(G.nnz,net.num_vars)
self.assertTrue(np.all(G.row == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.col == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.data == np.ones(net.num_vars)))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(net.num_vars,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(net.num_vars,))
E = G-eye(net.num_vars)
self.assertGreater(G.nnz,0)
self.assertGreater(norm(G.data,np.inf),0.5)
self.assertEqual(E.nnz,0)
# Bounds
for bus in net.buses:
if bus.is_regulated_by_gen():
self.assertTrue(bus.has_flags('bounded',
['voltage magnitude',
'voltage angle']))
self.assertTrue(bus.has_flags('variable',
['voltage magnitude',
'voltage angle']))
self.assertEqual(u[bus.index_v_mag],bus.v_max)
self.assertEqual(u[bus.index_v_ang],pf.BUS_INF_V_ANG)
self.assertEqual(l[bus.index_v_mag],bus.v_min)
self.assertEqual(l[bus.index_v_ang],-pf.BUS_INF_V_ANG)
else:
self.assertFalse(bus.has_flags('bounded',
['voltage magnitude',
'voltage angle']))
for branch in net.branches:
if branch.is_tap_changer():
self.assertTrue(branch.has_flags('bounded','tap ratio'))
self.assertEqual(u[branch.index_ratio],branch.ratio_max)
self.assertEqual(l[branch.index_ratio],branch.ratio_min)
else:
self.assertFalse(branch.has_flags('bounded','tap ratio'))
if branch.is_phase_shifter():
self.assertTrue(branch.has_flags('bounded','phase shift'))
self.assertEqual(u[branch.index_phase],branch.phase_max)
self.assertEqual(l[branch.index_phase],branch.phase_min)
else:
self.assertFalse(branch.has_flags('bounded','phase shift'))
for gen in net.generators:
if gen.is_regulator():
self.assertTrue(gen.has_flags('bounded',['active power','reactive power']))
self.assertEqual(u[gen.index_P],gen.P_max)
self.assertEqual(u[gen.index_Q],gen.Q_max)
self.assertEqual(l[gen.index_P],gen.P_min)
self.assertEqual(l[gen.index_Q],gen.Q_min)
else:
self.assertFalse(gen.has_flags('bounded',['active power','reactive power']))
for load in net.loads:
self.assertTrue(load.has_flags('bounded','active power'))
self.assertTrue(load.has_flags('bounded','reactive power'))
self.assertTrue(load.has_flags('bounded',['active power','reactive power']))
self.assertEqual(u[load.index_P],load.P_max)
self.assertEqual(l[load.index_P],load.P_min)
self.assertEqual(u[load.index_Q],load.Q_max)
self.assertEqual(l[load.index_Q],load.Q_min)
for vargen in net.var_generators:
self.assertTrue(vargen.has_flags('bounded',['active power','reactive power']))
self.assertEqual(u[vargen.index_P],vargen.P_ava)
self.assertEqual(u[vargen.index_Q],vargen.Q_max)
self.assertEqual(l[vargen.index_P],vargen.P_min)
self.assertEqual(l[vargen.index_Q],vargen.Q_min)
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertTrue(shunt.has_flags('bounded','susceptance'))
self.assertEqual(u[shunt.index_b],shunt.b_max)
self.assertEqual(l[shunt.index_b],shunt.b_min)
else:
self.assertFalse(shunt.has_flags('bounded','susceptance'))
for bat in net.batteries:
self.assertTrue(bat.has_flags('bounded','charging power'))
self.assertTrue(bat.has_flags('bounded','energy level'))
self.assertEqual(u[bat.index_Pc],bat.P_max)
self.assertEqual(l[bat.index_Pc],0.)
self.assertEqual(u[bat.index_Pd],-bat.P_min)
self.assertEqual(l[bat.index_Pd],0.)
self.assertEqual(u[bat.index_E],bat.E_max)
self.assertEqual(l[bat.index_E],0.)
for vsc_conv in net.vsc_converters:
self.assertTrue(vsc_conv.has_flags('bounded','active power'))
self.assertTrue(vsc_conv.has_flags('bounded','reactive power'))
self.assertTrue(vsc_conv.has_flags('bounded','dc power'))
self.assertEqual(u[vsc_conv.index_P],vsc_conv.P_max)
self.assertEqual(l[vsc_conv.index_P],vsc_conv.P_min)
self.assertEqual(u[vsc_conv.index_Q],vsc_conv.Q_max)
self.assertEqual(l[vsc_conv.index_Q],vsc_conv.Q_min)
self.assertEqual(u[vsc_conv.index_P_dc],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_P_dc],-pf.CONVVSC_INF_PDC)
self.assertEqual(u[vsc_conv.index_i_dc],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_i_dc],-pf.CONVVSC_INF_PDC)
for facts in net.facts:
self.assertTrue(facts.has_flags('bounded','series voltage magnitude'))
self.assertTrue(facts.has_flags('bounded','series voltage angle'))
self.assertTrue(facts.has_flags('bounded','active power'))
self.assertTrue(facts.has_flags('bounded','reactive power'))
self.assertEqual(u[facts.index_v_mag_s],facts.v_max_s)
self.assertEqual(l[facts.index_v_mag_s],0.)
self.assertEqual(u[facts.index_v_ang_s],pf.FACTS_INF_VANG_S)
self.assertEqual(l[facts.index_v_ang_s],-pf.FACTS_INF_VANG_S)
self.assertEqual(u[facts.index_P_k],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_k],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_m],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_m],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_dc],facts.P_max_dc)
self.assertEqual(l[facts.index_P_dc],-facts.P_max_dc)
self.assertEqual(u[facts.index_Q_k],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_k],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_m],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_m],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_s],facts.Q_max_s)
self.assertEqual(l[facts.index_Q_s],facts.Q_min_s)
self.assertEqual(u[facts.index_Q_sh],facts.Q_max_sh)
self.assertEqual(l[facts.index_Q_sh],facts.Q_min_sh)
for bus in net.dc_buses:
self.assertEqual(u[bus.index_v], pf.BUSDC_INF_V)
self.assertEqual(l[bus.index_v], -pf.BUSDC_INF_V)
for csc in net.csc_converters:
self.assertEqual(u[csc.index_P], pf.CONVCSC_INF_P)
self.assertEqual(l[csc.index_P], -pf.CONVCSC_INF_P)
self.assertEqual(u[csc.index_Q], pf.CONVCSC_INF_Q)
self.assertEqual(l[csc.index_Q], -pf.CONVCSC_INF_Q)
self.assertEqual(u[csc.index_P_dc], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_P_dc], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_i_dc], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_i_dc], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_angle], pf.CONVCSC_INF_ANGLE)
self.assertEqual(l[csc.index_angle], -pf.CONVCSC_INF_ANGLE)
self.assertEqual(u[csc.index_ratio], pf.CONVCSC_INF_RATIO)
self.assertEqual(l[csc.index_ratio], -pf.CONVCSC_INF_RATIO)
# Sensitivities
net.clear_sensitivities()
for branch in net.branches:
self.assertEqual(branch.sens_ratio_u_bound, 0.)
self.assertEqual(branch.sens_ratio_l_bound, 0.)
self.assertEqual(branch.sens_phase_u_bound, 0.)
self.assertEqual(branch.sens_phase_l_bound, 0.)
for bus in net.buses:
self.assertEqual(bus.sens_P_balance,0.)
self.assertEqual(bus.sens_Q_balance,0.)
self.assertEqual(bus.sens_v_mag_u_bound,0.)
self.assertEqual(bus.sens_v_mag_l_bound,0.)
self.assertEqual(bus.sens_v_ang_u_bound,0.)
self.assertEqual(bus.sens_v_ang_l_bound,0.)
for gen in net.generators:
self.assertEqual(gen.sens_P_u_bound,0.)
self.assertEqual(gen.sens_P_l_bound,0.)
self.assertEqual(gen.sens_Q_u_bound,0.)
self.assertEqual(gen.sens_Q_l_bound,0.)
for load in net.loads:
self.assertEqual(load.sens_P_u_bound,0.)
self.assertEqual(load.sens_P_l_bound,0.)
for shunt in net.shunts:
self.assertEqual(shunt.sens_b_u_bound, 0.)
self.assertEqual(shunt.sens_b_l_bound, 0.)
mu = np.random.randn(net.num_vars)
pi = np.random.randn(net.num_vars)
constr.store_sensitivities(None,None,mu,pi)
# Branch sens
for branch in net.branches:
if branch.is_tap_changer():
self.assertEqual(branch.sens_ratio_u_bound, mu[branch.index_ratio])
self.assertEqual(branch.sens_ratio_l_bound, pi[branch.index_ratio])
else:
self.assertEqual(branch.sens_ratio_u_bound, 0.)
self.assertEqual(branch.sens_ratio_l_bound, 0.)
if branch.is_phase_shifter():
self.assertEqual(branch.sens_phase_u_bound, mu[branch.index_phase])
self.assertEqual(branch.sens_phase_l_bound, pi[branch.index_phase])
else:
self.assertEqual(branch.sens_phase_u_bound, 0.)
self.assertEqual(branch.sens_phase_l_bound, 0.)
# Bus sens
for bus in net.buses:
self.assertEqual(bus.sens_P_balance,0.)
self.assertEqual(bus.sens_Q_balance,0.)
if bus.is_regulated_by_gen():
self.assertTrue(bus.has_flags('variable','voltage angle'))
self.assertNotEqual(bus.sens_v_ang_u_bound,0.)
self.assertNotEqual(bus.sens_v_ang_l_bound,0.)
self.assertEqual(bus.sens_v_mag_u_bound,mu[bus.index_v_mag])
self.assertEqual(bus.sens_v_mag_l_bound,pi[bus.index_v_mag])
self.assertEqual(bus.sens_v_ang_u_bound,mu[bus.index_v_ang])
self.assertEqual(bus.sens_v_ang_l_bound,pi[bus.index_v_ang])
else:
self.assertEqual(bus.sens_v_mag_u_bound,0.)
self.assertEqual(bus.sens_v_mag_l_bound,0.)
self.assertEqual(bus.sens_v_ang_u_bound,0.)
self.assertEqual(bus.sens_v_ang_l_bound,0.)
# Gen sens
for gen in net.generators:
if gen.is_regulator():
self.assertTrue(gen.has_flags('variable','active power'))
self.assertNotEqual(gen.sens_P_u_bound,0.)
self.assertNotEqual(gen.sens_P_l_bound,0.)
self.assertEqual(gen.sens_P_u_bound, mu[gen.index_P])
self.assertEqual(gen.sens_P_l_bound, pi[gen.index_P])
self.assertEqual(gen.sens_Q_u_bound, mu[gen.index_Q])
self.assertEqual(gen.sens_Q_l_bound, pi[gen.index_Q])
else:
self.assertEqual(gen.sens_P_u_bound, 0.)
self.assertEqual(gen.sens_P_l_bound, 0.)
self.assertEqual(gen.sens_Q_u_bound, 0.)
self.assertEqual(gen.sens_Q_l_bound, 0.)
# Load sens
for load in net.loads:
self.assertTrue(load.has_flags('variable','active power'))
self.assertNotEqual(load.sens_P_u_bound,0.)
self.assertNotEqual(load.sens_P_l_bound,0.)
self.assertEqual(load.sens_P_u_bound,mu[load.index_P])
self.assertEqual(load.sens_P_l_bound,pi[load.index_P])
# Shunts
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertEqual(shunt.sens_b_u_bound,mu[shunt.index_b])
self.assertEqual(shunt.sens_b_l_bound,pi[shunt.index_b])
else:
self.assertEqual(shunt.sens_b_u_bound, 0.)
self.assertEqual(shunt.sens_b_l_bound, 0.)
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# add vargens
net.add_var_generators_from_parameters(net.get_load_buses(),80.,50.,30.,5,0.05)
for vargen in net.var_generators:
vargen.P = np.random.rand(self.T)
vargen.Q = np.random.rand(self.T)
vargen.P_ava = vargen.P*3.4
vargen.P_max = 100.
vargen.P_min = 0.
vargen.Q_max = 50.
vargen.Q_min = -50.
self.assertEqual(vargen.num_periods,self.T)
for t in range(self.T):
self.assertEqual(vargen.P_ava[t],vargen.P[t]*3.4)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_bounded,0)
self.assertEqual(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
# add batteries
gen_buses = net.get_generator_buses()
net.add_batteries_from_parameters(gen_buses,20.,40.,0.8,0.9)
# loads
for load in net.loads:
load.P_min = -2.4*(load.index+1)*np.array(range(net.num_periods))
load.P_max = 3.3*(load.index+1)*np.array(range(net.num_periods))
load.Q = 3.5*load.index*np.array(range(net.num_periods))
load.Q_min = 1.2*(load.index+1)*np.array(range(net.num_periods))
load.Q_max = 7.5*(load.index+1)*np.array(range(net.num_periods))
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('load',
'variable',
'any',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
['tap ratio'])
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
['susceptance'])
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
net.set_flags('vsc converter',
'variable',
'any',
['dc power', 'active power', 'reactive power'])
net.set_flags('facts',
'variable',
'any',
['series voltage magnitude','series voltage angle',
'active power', 'reactive power'])
net.set_flags('dc bus',
'variable',
'any',
'voltage')
net.set_flags('csc converter',
'variable',
'any',
'all')
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_fixed,0)
self.assertEqual(net.num_vars,
(net.num_buses*2 +
net.num_generators*2 +
2*net.num_loads +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
net.num_var_generators*2 +
3*net.num_batteries +
4*net.num_vsc_converters +
9*net.num_facts +
net.num_dc_buses +
6*net.num_csc_converters)*self.T)
x0 = net.get_var_values()
constr = pf.Constraint('variable bounds',net)
self.assertEqual(constr.name,'variable bounds')
constr.analyze()
constr.eval(x0)
G = constr.G
l = constr.l
u = constr.u
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(net.num_vars,net.num_vars))
self.assertEqual(G.nnz,net.num_vars)
self.assertTrue(np.all(G.row == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.col == np.array(range(net.num_vars))))
self.assertTrue(np.all(G.data == np.ones(net.num_vars)))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(net.num_vars,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(net.num_vars,))
for t in range(self.T):
for bus in net.buses:
self.assertEqual(u[bus.index_v_mag[t]],pf.BUS_INF_V_MAG)
self.assertEqual(u[bus.index_v_ang[t]],pf.BUS_INF_V_ANG)
self.assertEqual(l[bus.index_v_mag[t]],0)
self.assertEqual(l[bus.index_v_ang[t]],-pf.BUS_INF_V_ANG)
for gen in net.generators:
self.assertEqual(u[gen.index_P[t]],pf.GEN_INF_P)
self.assertEqual(u[gen.index_Q[t]],pf.GEN_INF_Q)
self.assertEqual(l[gen.index_P[t]],-pf.GEN_INF_P)
self.assertEqual(l[gen.index_Q[t]],-pf.GEN_INF_Q)
for branch in net.branches:
if branch.is_tap_changer():
self.assertEqual(u[branch.index_ratio[t]],pf.BRANCH_INF_RATIO)
self.assertEqual(l[branch.index_ratio[t]],0.)
if branch.is_phase_shifter():
self.assertLess(np.abs(u[branch.index_phase[t]]-np.pi*2.),1e-10)
self.assertLess(np.abs(l[branch.index_phase[t]]+np.pi*2.),1e-10)
for vargen in net.var_generators:
self.assertEqual(u[vargen.index_P[t]],pf.VARGEN_INF_P)
self.assertEqual(u[vargen.index_Q[t]],pf.VARGEN_INF_Q)
self.assertEqual(l[vargen.index_P[t]],-pf.VARGEN_INF_P)
self.assertEqual(l[vargen.index_Q[t]],-pf.VARGEN_INF_Q)
for load in net.loads:
self.assertEqual(u[load.index_P[t]],pf.LOAD_INF_P)
self.assertEqual(l[load.index_P[t]],-pf.LOAD_INF_P)
self.assertEqual(u[load.index_Q[t]],pf.LOAD_INF_Q)
self.assertEqual(l[load.index_Q[t]],-pf.LOAD_INF_Q)
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertEqual(u[shunt.index_b[t]],pf.SHUNT_INF_SUSC)
self.assertEqual(l[shunt.index_b[t]],-pf.SHUNT_INF_SUSC)
for vsc_conv in net.vsc_converters:
self.assertTrue(vsc_conv.has_flags('variable','active power'))
self.assertTrue(vsc_conv.has_flags('variable','reactive power'))
self.assertTrue(vsc_conv.has_flags('variable','dc power'))
self.assertEqual(u[vsc_conv.index_P[t]],pf.CONVVSC_INF_P)
self.assertEqual(l[vsc_conv.index_P[t]],-pf.CONVVSC_INF_P)
self.assertEqual(u[vsc_conv.index_Q[t]],pf.CONVVSC_INF_Q)
self.assertEqual(l[vsc_conv.index_Q[t]],-pf.CONVVSC_INF_Q)
self.assertEqual(u[vsc_conv.index_P_dc[t]],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_P_dc[t]],-pf.CONVVSC_INF_PDC)
self.assertEqual(u[vsc_conv.index_i_dc[t]],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_i_dc[t]],-pf.CONVVSC_INF_PDC)
for facts in net.facts:
self.assertTrue(facts.has_flags('variable','series voltage magnitude'))
self.assertTrue(facts.has_flags('variable','series voltage angle'))
self.assertTrue(facts.has_flags('variable','active power'))
self.assertTrue(facts.has_flags('variable','reactive power'))
self.assertEqual(u[facts.index_v_mag_s[t]],pf.FACTS_INF_VMAG_S)
self.assertEqual(l[facts.index_v_mag_s[t]],0.)
self.assertEqual(u[facts.index_v_ang_s[t]],pf.FACTS_INF_VANG_S)
self.assertEqual(l[facts.index_v_ang_s[t]],-pf.FACTS_INF_VANG_S)
self.assertEqual(u[facts.index_P_k[t]],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_k[t]],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_m[t]],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_m[t]],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_dc[t]],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_dc[t]],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_Q_k[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_k[t]],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_m[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_m[t]],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_s[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_s[t]],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_sh[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_sh[t]],-pf.FACTS_INF_Q)
for bus in net.dc_buses:
self.assertEqual(u[bus.index_v[t]], pf.BUSDC_INF_V)
self.assertEqual(l[bus.index_v[t]], -pf.BUSDC_INF_V)
for csc in net.csc_converters:
self.assertEqual(u[csc.index_P[t]], pf.CONVCSC_INF_P)
self.assertEqual(l[csc.index_P[t]], -pf.CONVCSC_INF_P)
self.assertEqual(u[csc.index_Q[t]], pf.CONVCSC_INF_Q)
self.assertEqual(l[csc.index_Q[t]], -pf.CONVCSC_INF_Q)
self.assertEqual(u[csc.index_P_dc[t]], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_P_dc[t]], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_i_dc[t]], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_i_dc[t]], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_angle[t]], pf.CONVCSC_INF_ANGLE)
self.assertEqual(l[csc.index_angle[t]], -pf.CONVCSC_INF_ANGLE)
self.assertEqual(u[csc.index_ratio[t]], pf.CONVCSC_INF_RATIO)
self.assertEqual(l[csc.index_ratio[t]], -pf.CONVCSC_INF_RATIO)
# Row info
for t in range(self.T):
for bus in net.buses:
i = bus.index_v_mag[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:bus:%d:voltage magnitude:%d' %(bus.index,t))
i = bus.index_v_ang[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:bus:%d:voltage angle:%d' %(bus.index,t))
for gen in net.generators:
i = gen.index_P[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:generator:%d:active power:%d' %(gen.index,t))
i = gen.index_Q[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:generator:%d:reactive power:%d' %(gen.index,t))
for load in net.loads:
i = load.index_P[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:load:%d:active power:%d' %(load.index,t))
i = load.index_Q[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:load:%d:reactive power:%d' %(load.index,t))
for vargen in net.var_generators:
i = vargen.index_P[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:variable generator:%d:active power:%d' %(vargen.index,t))
i = vargen.index_Q[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:variable generator:%d:reactive power:%d' %(vargen.index,t))
for branch in net.branches:
if branch.is_tap_changer():
i = branch.index_ratio[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:branch:%d:tap ratio:%d' %(branch.index,t))
if branch.is_phase_shifter():
i = branch.index_phase[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:branch:%d:phase shift:%d' %(branch.index,t))
for shunt in net.shunts:
if shunt.is_switched_v():
i = shunt.index_b[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:shunt:%d:susceptance:%d' %(shunt.index,t))
for bat in net.batteries:
i = bat.index_Pc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:battery:%d:charging power:%d' %(bat.index,t))
i = bat.index_Pd[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:battery:%d:discharging power:%d' %(bat.index,t))
i = bat.index_E[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:battery:%d:energy level:%d' %(bat.index,t))
for vsc_conv in net.vsc_converters:
i = vsc_conv.index_P[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:vsc converter:%d:active power:%d' %(vsc_conv.index,t))
i = vsc_conv.index_Q[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:vsc converter:%d:reactive power:%d' %(vsc_conv.index,t))
i = vsc_conv.index_P_dc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:vsc converter:%d:dc power:%d' %(vsc_conv.index,t))
i = vsc_conv.index_i_dc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:vsc converter:%d:dc current:%d' %(vsc_conv.index,t))
for facts in net.facts:
i = facts.index_v_mag_s[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:series voltage magnitude:%d' %(facts.index,t))
i = facts.index_v_ang_s[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:series voltage angle:%d' %(facts.index,t))
i = facts.index_P_k[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:active power k:%d' %(facts.index,t))
i = facts.index_P_m[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:active power m:%d' %(facts.index,t))
i = facts.index_P_dc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:dc power:%d' %(facts.index,t))
i = facts.index_Q_k[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:reactive power k:%d' %(facts.index,t))
i = facts.index_Q_m[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:reactive power m:%d' %(facts.index,t))
i = facts.index_Q_s[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:series reactive power:%d' %(facts.index,t))
i = facts.index_Q_sh[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:facts:%d:shunt reactive power:%d' %(facts.index,t))
for bus in net.dc_buses:
i = bus.index_v[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:dc bus:%d:voltage:%d' %(bus.index,t))
for csc in net.csc_converters:
i = csc.index_P[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:active power:%d' %(csc.index,t))
i = csc.index_Q[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:reactive power:%d' %(csc.index,t))
i = csc.index_P_dc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:dc power:%d' %(csc.index,t))
i = csc.index_i_dc[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:dc current:%d' %(csc.index,t))
i = csc.index_angle[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:angle:%d' %(csc.index,t))
i = csc.index_ratio[t]
s = constr.get_G_row_info_string(i)
self.assertEqual(constr.get_A_row_info_string(i),"")
self.assertEqual(constr.get_J_row_info_string(i),"")
self.assertEqual(s,'variable bounds:csc converter:%d:tap ratio:%d' %(csc.index,t))
# Bounded
net.set_flags('bus',
'bounded',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'bounded',
'any',
['active power','reactive power'])
net.set_flags('load',
'bounded',
'any',
['active power','reactive power'])
net.set_flags('branch',
'bounded',
'tap changer',
['tap ratio'])
net.set_flags('branch',
'bounded',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'bounded',
'switching - v',
['susceptance'])
net.set_flags('variable generator',
'bounded',
'any',
['active power','reactive power'])
net.set_flags('battery',
'bounded',
'any',
['charging power','energy level'])
net.set_flags('vsc converter',
'bounded',
'any',
['dc power', 'active power','reactive power'])
net.set_flags('facts',
'bounded',
'any',
['series voltage magnitude','series voltage angle',
'active power','reactive power'])
net.set_flags('dc bus',
'bounded',
'any',
'voltage')
net.set_flags('csc converter',
'bounded',
'any',
'all')
# outages have no effect
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for bus in net.buses:
bus.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_bounded,net.num_vars)
x0 = net.get_var_values()
constr = pf.Constraint('variable bounds',net)
self.assertEqual(constr.name,'variable bounds')
constr.analyze()
constr.eval(x0)
G = constr.G
l = constr.l
u = constr.u
for t in range(self.T):
for bus in net.buses:
self.assertEqual(u[bus.index_v_mag[t]],bus.v_max)
self.assertEqual(u[bus.index_v_ang[t]],pf.BUS_INF_V_ANG)
self.assertEqual(l[bus.index_v_mag[t]],bus.v_min)
self.assertEqual(l[bus.index_v_ang[t]],-pf.BUS_INF_V_ANG)
for gen in net.generators:
self.assertEqual(u[gen.index_P[t]],gen.P_max)
self.assertEqual(u[gen.index_Q[t]],gen.Q_max)
self.assertEqual(l[gen.index_P[t]],gen.P_min)
self.assertEqual(l[gen.index_Q[t]],gen.Q_min)
for branch in net.branches:
if branch.is_tap_changer():
self.assertEqual(u[branch.index_ratio[t]],branch.ratio_max)
self.assertEqual(l[branch.index_ratio[t]],branch.ratio_min)
if branch.is_phase_shifter():
self.assertEqual(u[branch.index_phase[t]],branch.phase_max)
self.assertEqual(l[branch.index_phase[t]],branch.phase_min)
for vargen in net.var_generators:
self.assertEqual(u[vargen.index_P[t]],vargen.P_ava[t])
self.assertEqual(u[vargen.index_Q[t]],vargen.Q_max)
self.assertEqual(l[vargen.index_P[t]],vargen.P_min)
self.assertEqual(l[vargen.index_Q[t]],vargen.Q_min)
for load in net.loads:
self.assertEqual(u[load.index_P[t]],load.P_max[t])
self.assertEqual(l[load.index_P[t]],load.P_min[t])
self.assertEqual(u[load.index_P[t]],3.3*(load.index+1)*t)
self.assertEqual(l[load.index_P[t]],-2.4*(load.index+1)*t)
self.assertEqual(u[load.index_Q[t]],7.5*(load.index+1)*t)
self.assertEqual(l[load.index_Q[t]],1.2*(load.index+1)*t)
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertEqual(u[shunt.index_b[t]],shunt.b_max)
self.assertEqual(l[shunt.index_b[t]],shunt.b_min)
for vsc_conv in net.vsc_converters:
self.assertTrue(vsc_conv.has_flags('bounded','active power'))
self.assertTrue(vsc_conv.has_flags('bounded','reactive power'))
self.assertTrue(vsc_conv.has_flags('bounded','dc power'))
self.assertEqual(u[vsc_conv.index_P[t]],vsc_conv.P_max)
self.assertEqual(l[vsc_conv.index_P[t]],vsc_conv.P_min)
self.assertEqual(u[vsc_conv.index_Q[t]],vsc_conv.Q_max)
self.assertEqual(l[vsc_conv.index_Q[t]],vsc_conv.Q_min)
self.assertEqual(u[vsc_conv.index_P_dc[t]],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_P_dc[t]],-pf.CONVVSC_INF_PDC)
self.assertEqual(u[vsc_conv.index_i_dc[t]],pf.CONVVSC_INF_PDC)
self.assertEqual(l[vsc_conv.index_i_dc[t]],-pf.CONVVSC_INF_PDC)
for facts in net.facts:
self.assertTrue(facts.has_flags('bounded','series voltage magnitude'))
self.assertTrue(facts.has_flags('bounded','series voltage angle'))
self.assertTrue(facts.has_flags('bounded','active power'))
self.assertTrue(facts.has_flags('bounded','reactive power'))
self.assertEqual(u[facts.index_v_mag_s[t]],facts.v_max_s)
self.assertEqual(l[facts.index_v_mag_s[t]],0.)
self.assertEqual(u[facts.index_v_ang_s[t]],pf.FACTS_INF_VANG_S)
self.assertEqual(l[facts.index_v_ang_s[t]],-pf.FACTS_INF_VANG_S)
self.assertEqual(u[facts.index_P_k[t]],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_k[t]],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_m[t]],pf.FACTS_INF_P)
self.assertEqual(l[facts.index_P_m[t]],-pf.FACTS_INF_P)
self.assertEqual(u[facts.index_P_dc[t]],facts.P_max_dc)
self.assertEqual(l[facts.index_P_dc[t]],-facts.P_max_dc)
self.assertEqual(u[facts.index_Q_k[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_k[t]],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_m[t]],pf.FACTS_INF_Q)
self.assertEqual(l[facts.index_Q_m[t]],-pf.FACTS_INF_Q)
self.assertEqual(u[facts.index_Q_s[t]],facts.Q_max_s)
self.assertEqual(l[facts.index_Q_s[t]],facts.Q_min_s)
self.assertEqual(u[facts.index_Q_sh[t]],facts.Q_max_sh)
self.assertEqual(l[facts.index_Q_sh[t]],facts.Q_min_sh)
for bus in net.dc_buses:
self.assertEqual(u[bus.index_v[t]], pf.BUSDC_INF_V)
self.assertEqual(l[bus.index_v[t]], -pf.BUSDC_INF_V)
for csc in net.csc_converters:
self.assertEqual(u[csc.index_P[t]], pf.CONVCSC_INF_P)
self.assertEqual(l[csc.index_P[t]], -pf.CONVCSC_INF_P)
self.assertEqual(u[csc.index_Q[t]], pf.CONVCSC_INF_Q)
self.assertEqual(l[csc.index_Q[t]], -pf.CONVCSC_INF_Q)
self.assertEqual(u[csc.index_P_dc[t]], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_P_dc[t]], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_i_dc[t]], pf.CONVCSC_INF_PDC)
self.assertEqual(l[csc.index_i_dc[t]], -pf.CONVCSC_INF_PDC)
self.assertEqual(u[csc.index_angle[t]], pf.CONVCSC_INF_ANGLE)
self.assertEqual(l[csc.index_angle[t]], -pf.CONVCSC_INF_ANGLE)
self.assertEqual(u[csc.index_ratio[t]], pf.CONVCSC_INF_RATIO)
self.assertEqual(l[csc.index_ratio[t]], -pf.CONVCSC_INF_RATIO)
# Sensitivities
mu = np.random.randn(net.num_vars)
pi = np.random.randn(net.num_vars)
net.clear_sensitivities()
constr.store_sensitivities(None,None,mu,pi)
for t in range(self.T):
# Branch sens
for branch in net.branches:
if branch.is_tap_changer():
self.assertEqual(branch.sens_ratio_u_bound[t], mu[branch.index_ratio[t]])
self.assertEqual(branch.sens_ratio_l_bound[t], pi[branch.index_ratio[t]])
else:
self.assertEqual(branch.sens_ratio_u_bound[t], 0.)
self.assertEqual(branch.sens_ratio_l_bound[t], 0.)
if branch.is_phase_shifter():
self.assertEqual(branch.sens_phase_u_bound[t], mu[branch.index_phase[t]])
self.assertEqual(branch.sens_phase_l_bound[t], pi[branch.index_phase[t]])
else:
self.assertEqual(branch.sens_phase_u_bound[t], 0.)
self.assertEqual(branch.sens_phase_l_bound[t], 0.)
# Bus sens
for bus in net.buses:
self.assertEqual(bus.sens_P_balance[t],0.)
self.assertEqual(bus.sens_Q_balance[t],0.)
self.assertEqual(bus.sens_v_mag_u_bound[t], mu[bus.index_v_mag[t]])
self.assertEqual(bus.sens_v_mag_l_bound[t], pi[bus.index_v_mag[t]])
self.assertEqual(bus.sens_v_ang_u_bound[t], mu[bus.index_v_ang[t]])
self.assertEqual(bus.sens_v_ang_l_bound[t], pi[bus.index_v_ang[t]])
# Gen sens
for gen in net.generators:
self.assertEqual(gen.sens_P_u_bound[t], mu[gen.index_P[t]])
self.assertEqual(gen.sens_P_l_bound[t], pi[gen.index_P[t]])
self.assertEqual(gen.sens_Q_u_bound[t], mu[gen.index_Q[t]])
self.assertEqual(gen.sens_Q_l_bound[t], pi[gen.index_Q[t]])
# Load sens
for load in net.loads:
self.assertEqual(load.sens_P_u_bound[t], mu[load.index_P[t]])
self.assertEqual(load.sens_P_l_bound[t], pi[load.index_P[t]])
# Shunts
for shunt in net.shunts:
if shunt.is_switched_v():
self.assertEqual(shunt.sens_b_u_bound[t], mu[shunt.index_b[t]])
self.assertEqual(shunt.sens_b_l_bound[t], pi[shunt.index_b[t]])
def test_constr_BOUND_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
net.make_all_in_service()
gen = net.get_generator(0)
branch = net.get_branch(0)
gen.in_service = False
branch.in_service = False
self.assertFalse(gen.is_in_service())
self.assertFalse(branch.is_in_service())
gen.P_min = np.random.rand()
gen.Q_min = np.random.rand()
branch.ratio_min = np.random.randn()
branch.phase_min = np.random.randn()
gen.P_max = gen.P_min + 3.
gen.Q_max = gen.Q_min + 4.
branch.ratio_max = branch.ratio_min + 5.
branch.phase_max = branch.phase_min + 2.
net.set_flags('generator',
['variable','bounded'],
'any',
['active power', 'reactive power'])
net.set_flags('branch',
['variable','bounded'],
'any',
['tap ratio', 'phase shift'])
net.set_flags_of_component(gen, ['variable', 'bounded'], ['active power', 'reactive power'])
net.set_flags_of_component(branch, ['variable', 'bounded'], ['tap ratio', 'phase shift'])
self.assertEqual(net.num_vars,
self.T*(2*net.num_generators + 2*net.num_branches))
self.assertEqual(net.num_vars, net.num_bounded)
constr = pf.Constraint('variable bounds', net)
constr.analyze()
l = constr.l
u = constr.u
G = constr.G
for t in range(self.T):
# gen P
k = np.where(G.col == gen.index_P[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = G.row[k]
self.assertEqual(G.data[k], 1.)
self.assertEqual(l[i], gen.P_min)
self.assertEqual(u[i], gen.P_max)
self.assertEqual(u[i], l[i] + 3.)
# gen Q
k = np.where(G.col == gen.index_Q[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = G.row[k]
self.assertEqual(G.data[k], 1.)
self.assertEqual(l[i], gen.Q_min)
self.assertEqual(u[i], gen.Q_max)
self.assertEqual(u[i], l[i] + 4.)
# branch ratio
k = np.where(G.col == branch.index_ratio[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = G.row[k]
self.assertEqual(G.data[k], 1.)
self.assertEqual(l[i], branch.ratio_min)
self.assertEqual(u[i], branch.ratio_max)
self.assertEqual(u[i], l[i] + 5.)
# branch phase
k = np.where(G.col == branch.index_phase[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
i = G.row[k]
self.assertEqual(G.data[k], 1.)
self.assertEqual(l[i], branch.phase_min)
self.assertEqual(u[i], branch.phase_max)
self.assertEqual(u[i], l[i] + 2.)
# Disconnect
net.make_all_in_service()
net.clear_flags()
self.assertEqual(net.num_vars, 0)
for bus in net.buses:
if bus.degree == 1:
self.assertEqual(len(bus.branches), 1)
bus.branches[0].in_service = False
self.assertFalse(bus.branches[0].is_in_service())
net.set_flags_of_component(bus,
['variable', 'bounded'],
['voltage magnitude', 'voltage angle'])
self.assertEqual(net.num_vars, 2*self.T)
self.assertEqual(net.num_vars, net.num_bounded)
self.assertTrue(bus.has_flags('variable', ['voltage magnitude',
'voltage angle']))
self.assertTrue(bus.has_flags('bounded', ['voltage magnitude',
'voltage angle']))
constr = pf.Constraint('variable bounds', net)
constr.analyze()
G = constr.G
l = constr.l
u = constr.u
self.assertEqual(G.shape[0], 2*self.T)
for t in range(self.T):
k = np.where(G.col == bus.index_v_mag[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
self.assertEqual(l[G.row[k]], bus.v_min)
self.assertEqual(u[G.row[k]], bus.v_max)
k = np.where(G.col == bus.index_v_ang[t])[0]
self.assertEqual(k.size, 1)
k = k[0]
self.assertEqual(l[G.row[k]], -100.)
self.assertEqual(u[G.row[k]], 100.)
break
def test_constr_PAR_GEN_P(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Vars
net.set_flags('generator',
'variable',
'slack',
['active power','reactive power'])
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_vars,(net.get_num_slack_gens()+net.get_num_reg_gens())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('generator active power participation',net)
self.assertEqual(constr.name,'generator active power participation')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
# Manual count
nnz = 0
num_constr = 0
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_slack():
num_constr += len(bus.generators)-1 # P participation
nnz += 2*(len(bus.generators)-1)
constr.analyze()
self.assertEqual(nnz*self.T,constr.A_nnz)
constr.eval(x0)
self.assertEqual(0,constr.A_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(num_constr*self.T,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(num_constr*self.T,net.num_vars))
self.assertEqual(A.nnz,nnz*self.T)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Detailed check
Ai = A.row
Aj = A.col
Ad = A.data
self.assertEqual(Ai.size,nnz*self.T)
self.assertEqual(Aj.size,nnz*self.T)
self.assertEqual(Ad.size,nnz*self.T)
i = 0
row = 0
for t in range(self.T):
for bus in net.buses:
if bus.is_slack():
gens = bus.generators
self.assertGreater(len(gens),0)
g1 = gens[0]
for g2 in gens[1:]:
self.assertEqual(b[row],0.)
self.assertEqual(Ai[i],row)
self.assertEqual(Aj[i],g1.index_P[t])
self.assertEqual(Ad[i],1.)
i += 1
self.assertEqual(Ai[i],row)
self.assertEqual(Aj[i],g2.index_P[t])
self.assertEqual(Ad[i],-1.)
i += 1
row += 1
self.assertEqual(i,nnz*self.T)
# Last check
x = np.zeros(net.num_vars)
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_slack():
self.assertGreater(len(bus.generators),0)
for g in bus.generators:
self.assertTrue(g.has_flags('variable','active power'))
x[g.index_P[t]] = 10.
self.assertGreater(norm(x),0)
self.assertTrue(norm(A*x-b) < 1e-10)
# With outages
for gen in net.generators:
gen.in_service = False
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_PAR_GEN_P_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
net.make_all_in_service()
net.clear_flags()
for bus in net.buses:
if bus.is_slack():
for branch in net.branches:
branch.in_service = False
for gen in net.generators:
gen.in_service = False
net.set_flags('generator',
'variable',
'any',
'active power')
self.assertEqual(net.num_vars, self.T*net.get_num_generators(True))
constr = pf.Constraint('generator active power participation', net)
constr.analyze()
A = constr.A
b = constr.b
self.assertEqual(A.shape[0], 0)
self.assertEqual(b.shape[0], 0)
net.make_all_in_service()
net.set_flags('generator',
'variable',
'any',
'active power')
self.assertEqual(net.num_vars, self.T*net.num_generators)
constr.analyze()
A = constr.A
b = constr.b
check = False
for bus in net.buses:
if bus.is_slack() and len(bus.generators) > 1:
check = True
if check:
self.assertGreater(A.shape[0], 0)
self.assertGreater(b.shape[0], 0)
def test_constr_PVPQ_SWITCHING(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Vars
net.set_flags('bus',
'variable',
'regulated by generator',
'voltage magnitude')
net.set_flags('generator',
'variable',
'slack',
['active power','reactive power'])
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_gen()+net.get_num_slack_gens()+net.get_num_reg_gens())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Make it iteresting
for gen in net.generators:
gen.Q_par = np.random.rand()
# Constraint
constr = pf.Constraint('PVPQ switching',net)
self.assertEqual(constr.name,'PVPQ switching')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
# Manual count
nnz = 0
num_constr = 0
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_gen():
num_constr += len(bus.reg_generators)
nnz += len(bus.reg_generators)*(len(bus.reg_generators)+1)
constr.analyze()
self.assertEqual(nnz*self.T,constr.A_nnz)
constr.eval(x0)
self.assertEqual(0,constr.A_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(num_constr*self.T,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(num_constr*self.T,net.num_vars))
self.assertEqual(A.nnz,nnz*self.T)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Detailed check
Ai = A.row
Aj = A.col
Ad = A.data
self.assertEqual(Ai.size,nnz*self.T)
self.assertEqual(Aj.size,nnz*self.T)
self.assertEqual(Ad.size,nnz*self.T)
nnz = 0
row = 0
for t in range(self.T):
for bus in net.buses:
if bus.is_regulated_by_gen():
self.assertEqual(b[row], bus.v_set[t])
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], bus.index_v_mag[t])
self.assertEqual(Ad[nnz], 1.)
nnz += 1
for gen in bus.reg_generators:
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], gen.index_Q[t])
self.assertEqual(Ad[nnz], 0.)
nnz += 1
row += 1
for i in range(len(bus.reg_generators)-1):
gen1 = bus.reg_generators[i]
gen2 = bus.reg_generators[i+1]
self.assertEqual(b[row], 0.)
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], bus.index_v_mag[t])
self.assertEqual(Ad[nnz], 0.)
nnz += 1
for gen3 in bus.reg_generators:
self.assertEqual(Ai[nnz], row)
self.assertEqual(Aj[nnz], gen3.index_Q[t])
if gen3.index == gen1.index:
self.assertEqual(Ad[nnz], np.maximum(gen2.Q_par,1e-4))
elif gen3.index == gen2.index:
self.assertEqual(Ad[nnz], -np.maximum(gen1.Q_par,1e-4))
else:
self.assertEqual(Ad[nnz], 0.)
nnz += 1
row += 1
self.assertEqual(nnz,A.nnz)
# Now with no Q vars
net.clear_flags()
# Vars
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
self.assertEqual(net.num_vars, net.get_num_buses()*self.T)
# Analyze
constr.analyze()
A = constr.A
b = constr.b
self.assertEqual(A.shape[0], 0)
self.assertEqual(A.nnz, 0)
self.assertEqual(b.size, 0)
# Now with no v vars
net.clear_flags()
# Vars
net.set_flags('generator',
'variable',
'any',
'reactive power')
self.assertEqual(net.num_vars, net.get_num_generators()*self.T)
# Analyze
constr.analyze()
A = constr.A
b = constr.b
nnz = 0
m = 0
for bus in net.buses:
if bus.is_regulated_by_gen():
n = len(bus.reg_generators)
m += n-1
nnz += n*(n-1)
self.assertEqual(A.shape[0], m*self.T)
self.assertEqual(A.nnz, nnz*self.T)
# With outages
net.clear_flags()
net.set_flags('bus',
'variable',
'regulated by generator',
'voltage magnitude')
net.set_flags('generator',
'variable',
'slack',
['active power','reactive power'])
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertGreater(net.num_vars, 0)
for gen in net.generators:
gen.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_PVPQ_SWITCHING_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Vars
net.set_flags('bus',
'variable',
'regulated by generator',
'voltage magnitude')
net.set_flags('generator',
'variable',
'slack',
['active power','reactive power'])
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertGreater(net.num_vars,0)
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_gen()+net.get_num_slack_gens()+net.get_num_reg_gens())*self.T)
constr = pf.Constraint('PVPQ switching', net)
constr.analyze()
A0 = constr.A.copy()
b0 = constr.b.copy()
self.assertEqual(net.get_num_branches_out_of_service(), 0)
self.assertEqual(net.get_num_generators_out_of_service(), 0)
for bus in net.buses:
if bus.is_regulated_by_gen():
for branch in net.branches:
branch.in_service = False
self.assertNotEqual(net.get_num_branches_out_of_service(), 0)
self.assertEqual(net.get_num_generators_out_of_service(), 0)
constr = pf.Constraint('PVPQ switching', net)
constr.analyze()
A1 = constr.A.copy()
b1 = constr.b.copy()
self.assertEqual((A1-A0).tocoo().nnz, 0)
self.assertLess(norm(b1-b0), 1e-8)
for bus in net.buses:
if bus.is_regulated_by_gen():
for gen in bus.reg_generators:
gen.in_service = False
self.assertFalse(bus.is_regulated_by_gen(only_in_service=True))
self.assertNotEqual(net.get_num_generators_out_of_service(), 0)
constr = pf.Constraint('PVPQ switching', net)
constr.analyze()
A2 = constr.A.copy()
b2 = constr.b.copy()
self.assertEqual(A2.shape[0], 0)
self.assertEqual(b2.size, 0)
def test_constr_ACPF(self):
# Constants
h = 1e-10
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T).get_copy(merge_buses=True)
self.assertEqual(net.num_periods,self.T)
# Add vargens
load_buses = net.get_load_buses()
net.add_var_generators_from_parameters(load_buses,80.,50.,30.,5,0.05)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_var_generators,len(load_buses))
for vargen in net.var_generators:
vargen.P = np.random.rand(net.num_periods)
vargen.Q = np.random.randn(net.num_periods)
# Add batteries
gen_buses = net.get_generator_buses()
net.add_batteries_from_parameters(gen_buses,20.,40.,0.8,0.9)
self.assertGreater(net.num_batteries,0)
self.assertEqual(net.num_batteries,len(gen_buses))
for bat in net.batteries:
bat.P = np.random.randn(net.num_periods)
# No vars
self.assertEqual(net.num_vars,0)
# Constraint
constr = pf.Constraint('AC power balance',net)
self.assertEqual(constr.name,'AC power balance')
x0 = net.get_var_values()
self.assertEqual(x0.size,0)
constr.analyze()
constr.eval(x0)
f = constr.f
self.assertEqual(f.size,2*net.num_buses*net.num_periods)
# Check mismatches (no vars)
for t in range(net.num_periods):
for bus in net.buses:
P_mis = 0.
Q_mis = 0.
for branch in bus.branches_k:
P_mis -= branch.get_P_km()[t]
Q_mis -= branch.get_Q_km()[t]
for branch in bus.branches_m:
P_mis -= branch.get_P_mk()[t]
Q_mis -= branch.get_Q_mk()[t]
for gen in bus.generators:
P_mis += gen.P[t]
Q_mis += gen.Q[t]
for vargen in bus.var_generators:
P_mis += vargen.P[t]
Q_mis += vargen.Q[t]
for load in bus.loads:
P_mis -= load.P[t]
Q_mis -= load.Q[t]
for bat in bus.batteries:
P_mis -= bat.P[t]
for shunt in bus.shunts:
P_mis -= shunt.g*(bus.v_mag[t]**2.)
Q_mis -= -shunt.b[t]*(bus.v_mag[t]**2.)
for conv in bus.csc_converters:
P_mis += conv.P[t]
Q_mis += conv.Q[t]
for conv in bus.vsc_converters:
P_mis += conv.P[t]
Q_mis += conv.Q[t]
for facts in bus.facts_k:
P_mis += facts.P_k[t]
Q_mis += facts.Q_k[t]
for facts in bus.facts_m:
P_mis += facts.P_m[t]
Q_mis += facts.Q_m[t]
self.assertAlmostEqual(P_mis,f[bus.dP_index[t]])
self.assertAlmostEqual(Q_mis,f[bus.dQ_index[t]])
# Cross check mismatches with net properties (no vars)
net.update_properties()
dP_list = dict([(t,list()) for t in range(self.T)])
dQ_list = dict([(t,list()) for t in range(self.T)])
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
dP = f[bus.dP_index[t]]
dQ = f[bus.dQ_index[t]]
dP_list[t].append(dP)
dQ_list[t].append(dQ)
self.assertAlmostEqual(dP,bus.P_mismatch[t])
self.assertAlmostEqual(dQ,bus.Q_mismatch[t])
self.assertAlmostEqual(net.bus_P_mis[t],np.max(np.abs(dP_list[t]))*net.base_power)
self.assertAlmostEqual(net.bus_Q_mis[t],np.max(np.abs(dQ_list[t]))*net.base_power)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('load',
'variable',
'any',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
net.set_flags('vsc converter',
'variable',
'any',
['active power', 'reactive power', 'dc power'])
net.set_flags('facts',
'variable',
'any',
['active power', 'reactive power'])
self.assertEqual(net.num_vars,
(2*net.num_buses +
2*net.num_generators +
2*net.num_loads +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
3*net.num_batteries +
net.num_var_generators*2 +
net.num_vsc_converters*4 +
net.num_facts*7)*self.T)
# Check facts
for facts in net.facts:
self.assertTrue(facts.has_flags('variable', 'active power'))
self.assertTrue(facts.has_flags('variable', 'reactive power'))
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('AC power balance',net)
self.assertEqual(constr.name,'AC power balance')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
num_statcom = len([f for f in net.facts if f.is_STATCOM()])
num_Jnnz = (net.num_buses*4 +
net.num_branches*8 +
net.get_num_tap_changers()*4 +
net.get_num_phase_shifters()*4 +
net.get_num_switched_v_shunts() +
net.num_generators*2 +
net.num_loads*2 +
net.num_batteries*2 +
net.num_var_generators*2 +
net.num_vsc_converters*2 +
(net.num_facts-num_statcom)*4+num_statcom*2)*self.T
constr.analyze()
self.assertEqual(num_Jnnz,constr.J_nnz)
constr.eval(x0)
self.assertEqual(num_Jnnz,constr.J_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
constr.combine_H(np.ones(f.size),False)
Hcomb = constr.H_combined
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(2*net.num_buses*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(2*net.num_buses*self.T,net.num_vars))
self.assertEqual(J.nnz,num_Jnnz)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,net.num_vars))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertTupleEqual(Hcomb.shape,(net.num_vars,net.num_vars))
self.assertEqual(Hcomb.nnz,2*(net.get_num_buses()*3 +
net.get_num_branches()*12 +
net.get_num_tap_changers()*9 +
net.get_num_phase_shifters()*10 +
net.get_num_switched_v_shunts())*self.T)
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
# Check mismatches
x1 = x0+np.random.randn(net.num_vars)
constr.eval(x1)
for t in range(net.num_periods):
for bus in net.buses:
P_mis = 0.
Q_mis = 0.
for branch in bus.branches_k:
P_mis -= branch.get_P_km(x1)[t]
Q_mis -= branch.get_Q_km(x1)[t]
for branch in bus.branches_m:
P_mis -= branch.get_P_mk(x1)[t]
Q_mis -= branch.get_Q_mk(x1)[t]
for gen in bus.generators:
P_mis += x1[gen.index_P[t]]
Q_mis += x1[gen.index_Q[t]]
for vargen in bus.var_generators:
P_mis += x1[vargen.index_P[t]]
Q_mis += x1[vargen.index_Q[t]]
for load in bus.loads:
P_mis -= x1[load.index_P[t]]
Q_mis -= x1[load.index_Q[t]]
for bat in bus.batteries:
P_mis -= x1[bat.index_Pc[t]]-x1[bat.index_Pd[t]]
for shunt in bus.shunts:
if shunt.has_flags('variable','susceptance'):
b = x1[shunt.index_b[t]]
else:
b = shunt.b[t]
if bus.has_flags('variable','voltage magnitude'):
v = x1[bus.index_v_mag[t]]
else:
v = bus.v_mag[t]
P_mis -= shunt.g*v*v
Q_mis -= -b*v*v
for conv in bus.vsc_converters:
if conv.has_flags('variable', 'active power'):
P_mis += x1[conv.index_P[t]]
else:
P_mis += conv.P[t]
if conv.has_flags('variable', 'reactive power'):
Q_mis += x1[conv.index_Q[t]]
else:
Q_mis += conv.Q[t]
for conv in bus.csc_converters:
P_mis += conv.P[t]
Q_mis += conv.Q[t]
for facts in bus.facts_k:
self.assertTrue(facts.has_flags('variable', 'active power'))
P_mis += x1[facts.index_P_k[t]]
self.assertTrue(facts.has_flags('variable', 'reactive power'))
Q_mis += x1[facts.index_Q_k[t]]
for facts in bus.facts_m:
self.assertTrue(facts.has_flags('variable', 'active power'))
P_mis += x1[facts.index_P_m[t]]
self.assertTrue(facts.has_flags('variable', 'reactive power'))
Q_mis += x1[facts.index_Q_m[t]]
self.assertAlmostEqual(P_mis,f[bus.dP_index[t]])
self.assertAlmostEqual(Q_mis,f[bus.dQ_index[t]])
# Cross check mismatches with net properties
constr.eval(x1)
net.update_properties(x1)
dP_list = dict([(t,list()) for t in range(self.T)])
dQ_list = dict([(t,list()) for t in range(self.T)])
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
dP = f[bus.dP_index[t]]
dQ = f[bus.dQ_index[t]]
dP_list[t].append(dP)
dQ_list[t].append(dQ)
self.assertAlmostEqual(dP,bus.P_mismatch[t])
self.assertAlmostEqual(dQ,bus.Q_mismatch[t])
self.assertAlmostEqual(net.bus_P_mis[t],np.max(np.abs(dP_list[t]))*net.base_power)
self.assertAlmostEqual(net.bus_Q_mis[t],np.max(np.abs(dQ_list[t]))*net.base_power)
# Check mismatches across time
for vargen in net.var_generators:
vargen.P = np.ones(net.num_periods)*0.2 # static
vargen.Q = np.ones(net.num_periods)*0.1 # static
for bat in net.batteries:
bat.P = np.ones(net.num_periods)*0.1 # static
x0 = net.get_var_values()
constr.eval(x0)
f = constr.f
J = constr.J
P_list = []
for t in range(self.T):
P_list.append(net.get_var_projection('all','any','all',t_start=t,t_end=t))
fp_list = [f[[bus.dP_index[t] for bus in net.buses]] for t in range(self.T)] # t*net.num_buses:(t+1)*net.num_buses
fq_list = [f[[bus.dQ_index[t] for bus in net.buses]] for t in range(self.T)] # (t+self.T)*net.num_buses:(t+1+self.T)*net.num_buses
for t in range(self.T-1):
self.assertLess(norm(fp_list[t]-fp_list[t+1]),1e-12*norm(fp_list[t]))
self.assertLess(norm(fq_list[t]-fq_list[t+1]),1e-12*norm(fq_list[t]))
Jx = J*x0
Jxp_list = [Jx[[bus.dP_index[t] for bus in net.buses]] for t in range(self.T)] # t*net.num_buses:(t+1)*net.num_buses
Jxq_list = [Jx[[bus.dQ_index[t] for bus in net.buses]] for t in range(self.T)] # (t+self.T)*net.num_buses:(t+1+self.T)*net.num_buses
for t in range(self.T-1):
self.assertLess(norm(Jxp_list[t]-Jxp_list[t+1]),1e-12*norm(Jxp_list[t]))
self.assertLess(norm(Jxq_list[t]-Jxq_list[t+1]),1e-12*norm(Jxq_list[t]))
for i in range(10):
Hp_list = []
Hq_list = []
j = np.random.randint(0,net.num_buses)
for t in range(self.T):
Hp_list.append(coo_matrix(P_list[t]*constr.get_H_single(net.get_bus(j).dP_index[t])*P_list[t].T))
Hq_list.append(coo_matrix(P_list[t]*constr.get_H_single(net.get_bus(j).dQ_index[t])*P_list[t].T))
for t in range(self.T-1):
self.assertTrue(np.all(Hp_list[t].row == Hp_list[t+1].row))
self.assertTrue(np.all(Hp_list[t].col == Hp_list[t+1].col))
self.assertLessEqual(norm(Hp_list[t].data-Hp_list[t+1].data),1e-12*norm(Hp_list[t].data))
self.assertTrue(np.all(Hq_list[t].row == Hq_list[t+1].row))
self.assertTrue(np.all(Hq_list[t].col == Hq_list[t+1].col))
self.assertLessEqual(norm(Hq_list[t].data-Hq_list[t+1].data),1e-12*norm(Hq_list[t].data))
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sensitivities
net.clear_sensitivities()
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_P_balance[t],0.)
self.assertEqual(bus.sens_Q_balance[t],0.)
sens = np.zeros(2*net.num_buses*self.T)
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
sens[bus.dP_index[t]] = 3.5*bus.dP_index[t]+0.33+t*2*net.num_buses
sens[bus.dQ_index[t]] = 3.4*bus.dQ_index[t]+0.32+t*2*net.num_buses
constr.store_sensitivities(None,sens,None,None)
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_P_balance[t],3.5*bus.dP_index[t]+0.33+t*2*net.num_buses)
self.assertEqual(bus.sens_Q_balance[t],3.4*bus.dQ_index[t]+0.32+t*2*net.num_buses)
# with outages except buses
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, net.num_buses*4*self.T)
self.assertEqual(constr.J.shape[0], 2*net.num_buses*self.T)
self.assertEqual(constr.f.size, 2*net.num_buses*self.T)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 2*net.num_buses*3*self.T)
self.assertFalse(np.all(constr.H_nnz == 0), 0)
# with bus outages
net.make_all_in_service()
for bus in net.buses:
bus.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_ACPF_with_outages(self):
# Constants
h = 1e-10
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('load',
'variable',
'any',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
self.assertEqual(net.num_vars,
(2*net.num_buses +
2*net.num_generators +
2*net.num_loads +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
3*net.num_batteries +
net.num_var_generators*2)*self.T)
x0 = net.get_var_values()
constr0 = pf.Constraint('AC power balance', net)
constr0.analyze()
constr0.eval(x0)
buses = net.buses[:10]
side = []
for bus in buses:
for gen in bus.generators:
gen.in_service = False
for br in bus.branches_k:
self.assertTrue(bus.is_equal(br.bus_k))
br.in_service = False
side.append(br.bus_m)
for br in bus.branches_m:
self.assertTrue(bus.is_equal(br.bus_m))
br.in_service = False
side.append(br.bus_k)
constr1 = pf.Constraint('AC power balance', net)
constr1.analyze()
constr1.eval(x0)
f0 = constr0.f
f1 = constr1.f
for bus in net.buses:
if bus not in buses+side:
for t in range(self.T):
i = bus.dP_index[t]
j = bus.dQ_index[t]
self.assertLess(np.abs(f0[i]-f1[i]), 1e-8)
self.assertLess(np.abs(f0[j]-f1[j]), 1e-8)
for bus in buses:
for t in range(self.T):
i = bus.dP_index[t]
j = bus.dQ_index[t]
dp = 0.
dq = 0.
for gen in bus.generators:
self.assertFalse(gen.is_in_service())
dp += gen.P[t]
dq += gen.Q[t]
for br in bus.branches_k:
dp -= br.P_km[t]
dq -= br.Q_km[t]
for br in bus.branches_m:
dp -= br.P_mk[t]
dq -= br.Q_mk[t]
self.assertLess(np.abs(f1[i]+dp-f0[i]), 1e-8)
self.assertLess(np.abs(f1[j]+dq-f0[j]), 1e-8)
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr1,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr1,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr1,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
def test_constr_REG_VSET(self):
# Constants
h = 1e-8
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'not slack',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertEqual(net.num_vars,
(2*(net.num_buses-net.get_num_slack_buses()) +
net.get_num_slack_gens() +
net.get_num_reg_gens())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('voltage set point regulation',net)
self.assertEqual(constr.name,'voltage set point regulation')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.num_extra_vars,0)
Jnnz = 0
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_gen() and not bus.is_slack():
for gen in bus.reg_generators:
Jnnz += 4
Annz = 3*(net.get_num_reg_gens()-net.get_num_slack_gens())
rowsJ = 2*(net.get_num_reg_gens()-net.get_num_slack_gens())
rowsA = net.get_num_reg_gens()-net.get_num_slack_gens()
constr.analyze()
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,Annz*self.T)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,rowsA*self.T)
self.assertEqual(constr.num_extra_vars,rowsJ*self.T)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertTrue(np.all(y_init == 0.))
y0 = np.random.rand(constr.num_extra_vars)
constr.eval(x0,y0)
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(rowsA*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(J.nnz,Jnnz*self.T)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(rowsA*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(A.nnz,Annz*self.T)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
# Ax=b check
self.assertEqual(norm(A.data,1),rowsA*3*self.T)
self.assertEqual(np.sum(A.data),(net.get_num_reg_gens()-net.get_num_slack_gens())*self.T)
for k in range(J.shape[0]//2):
index1 = np.where(A.col == net.num_vars+2*k)[0]
index2 = np.where(A.col == net.num_vars+2*k+1)[0]
self.assertEqual(index1.size,1)
self.assertEqual(index2.size,1)
self.assertEqual(A.row[index1[0]],A.row[index2[0]])
index3 = np.where(A.row == A.row[index1[0]])[0]
self.assertEqual(index3.size,3)
for i in index3:
if A.col[i] == net.num_vars+2*k: # y
self.assertEqual(A.data[i],-1.)
elif A.col[i] == net.num_vars+2*k+1:
self.assertEqual(A.data[i],1.) # z
else:
self.assertEqual(A.data[i],1.) # v
# f check
flags = {}
eps = 1e-8
J_row = 0
for t in range(self.T):
for bus in net.buses:
if bus.is_regulated_by_gen() and not bus.is_slack():
for gen in bus.reg_generators:
y = y0[J_row]
z = y0[J_row+1]
Q = gen.Q[t]
Qmax = gen.Q_max
Qmin = gen.Q_min
CompY = (Q-Qmin)+y-np.sqrt((Q-Qmin)**2.+y**2.+2*eps)
CompZ = (Qmax-Q)+z-np.sqrt((Qmax-Q)**2.+z**2.+2*eps)
self.assertAlmostEqual(CompY,f[J_row])
self.assertAlmostEqual(CompZ,f[J_row+1])
J_row += 2
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sensitivities
net.clear_sensitivities()
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_v_set_reg[t],0.)
sensf = np.zeros(constr.f.size)
sensA = np.ones(constr.b.size)*10.5
self.assertEqual(sensf.size,rowsJ*self.T)
Ji = constr.J.row
Jj = constr.J.col
Ai = constr.A.row
Aj = constr.A.col
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_gen() and not bus.is_slack():
indices = Ji[np.where(Jj == bus.reg_generators[-1].index_Q[t])[0]]
self.assertEqual(indices.size,2)
sensf[indices[0]] = -bus.index-10
sensf[indices[1]] = bus.index+11*(bus.index % 2)
constr.store_sensitivities(sensA,sensf,None,None)
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
if bus.is_regulated_by_gen() and not bus.is_slack():
if bus.index % 2 == 1:
self.assertEqual(bus.sens_v_set_reg[t],bus.index+11)
else:
self.assertEqual(bus.sens_v_set_reg[t],-bus.index-10 if bus.index != 0 else 10.5)
# With outages
for gen in net.generators:
gen.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_REG_VSET_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'not slack',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
self.assertEqual(net.num_vars,
(2*(net.num_buses-net.get_num_slack_buses()) +
net.get_num_slack_gens() +
net.get_num_reg_gens())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr0 = pf.Constraint('voltage set point regulation', net)
constr0.analyze()
constr0.eval(x0)
for bus in net.buses:
if bus.is_regulated_by_gen():
for branch in bus.branches:
branch.in_service = False
constr1 = pf.Constraint('voltage set point regulation', net)
constr1.analyze()
constr1.eval(x0)
self.assertEqual((constr0.A-constr1.A).tocoo().nnz, 0)
self.assertLess(norm(constr0.b-constr1.b), 1e-8)
self.assertEqual((constr0.J-constr1.J).tocoo().nnz, 0)
self.assertLess(norm(constr0.f-constr1.f), 1e-8)
for bus in net.buses:
if bus.is_regulated_by_gen():
for gen in bus.reg_generators:
gen.in_service = False
self.assertFalse(bus.is_regulated_by_gen(only_in_service=True))
self.assertTrue(bus.is_regulated_by_gen())
constr2 = pf.Constraint('voltage set point regulation', net)
constr2.analyze()
constr2.eval(x0)
self.assertEqual(constr2.A.shape[0], 0)
self.assertEqual(constr2.J.shape[0], 0)
def test_constr_REG_TRAN(self):
# Constants
h = 1e-8
normal = 1e0
eta = 1e-8
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'regulated by transformer',
'voltage magnitude')
net.set_flags('branch',
'variable',
'tap changer - v',
'tap ratio')
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_tran() +
net.get_num_tap_changers_v())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('voltage regulation by transformers',net)
self.assertEqual(constr.name,'voltage regulation by transformers')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
Jnnz = 10*net.get_num_tap_changers_v()
Annz = 3*net.get_num_tap_changers_v()
self.assertGreaterEqual(Jnnz,0)
self.assertGreaterEqual(Annz,0)
rowsJ = 4*net.get_num_tap_changers_v()
rowsA = net.get_num_tap_changers_v()
self.assertGreaterEqual(rowsJ,0)
self.assertGreaterEqual(rowsA,0)
constr.analyze()
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,Annz*self.T)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,rowsA*self.T)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertTrue(np.all(y_init == 0.))
constr.eval(x0)
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(rowsA*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(J.nnz,Jnnz*self.T)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(rowsA*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(A.nnz,Annz*self.T)
self.assertEqual(constr.num_extra_vars,rowsJ*self.T)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
y0 = np.random.rand(constr.num_extra_vars)
# Ax=b check
self.assertEqual(norm(A.data,1),rowsA*3*self.T)
self.assertEqual(np.sum(A.data),net.get_num_tap_changers_v()*self.T)
# f check
index = 0
for t in range(self.T):
for bus in net.buses:
for br in bus.branches_k:
if br.is_tap_changer_v():
self.assertTrue(br.has_flags('variable','tap ratio'))
bus = br.reg_bus
fvmin = ((bus.v_mag[t]-bus.v_min_reg) - np.sqrt((bus.v_mag[t]-bus.v_min_reg)**2. + 2*eta))*normal
fvmax = ((bus.v_max_reg-bus.v_mag[t]) - np.sqrt((bus.v_max_reg-bus.v_mag[t])**2. + 2*eta))*normal
ftmax = ((br.ratio_max-br.ratio[t]) - np.sqrt((br.ratio_max-br.ratio[t])**2. + 2*eta))*normal
ftmin = ((br.ratio[t]-br.ratio_min) - np.sqrt((br.ratio[t]-br.ratio_min)**2. + 2*eta))*normal
self.assertLess(np.abs(fvmin-f[index]),1e-10*(1+np.abs(fvmin)))
self.assertLess(np.abs(fvmax-f[index+1]),1e-10*(1+np.abs(fvmax)))
self.assertLess(np.abs(ftmax-f[index+2]),1e-10*(1+np.abs(ftmax)))
self.assertLess(np.abs(ftmin-f[index+3]),1e-10*(1+np.abs(ftmin)))
index += 4
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sensitivities
net.clear_sensitivities()
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_v_reg_by_tran[t],0.)
sens = np.zeros(constr.f.size)
counter = 0
for t in range(self.T):
for bus in net.buses:
for branch in bus.branches_k:
if branch.is_tap_changer_v():
sens[counter:counter+4] = branch.reg_bus.index*t
counter += 4
self.assertEqual(counter,constr.f.size)
constr.store_sensitivities(np.zeros(constr.A.shape[0]),sens,None,None)
for t in range(self.T):
for bus in net.buses:
for branch in bus.branches_k:
if branch.is_tap_changer_v():
self.assertEqual(branch.reg_bus.sens_v_reg_by_tran[t],branch.reg_bus.index*t)
# With outages
for br in net.branches:
br.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_REG_TRAN_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'regulated by transformer',
'voltage magnitude')
net.set_flags('branch',
'variable',
'tap changer - v',
'tap ratio')
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_tran() +
net.get_num_tap_changers_v())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
constr0 = pf.Constraint('voltage regulation by transformers', net)
constr0.analyze()
constr0.eval(x0)
for bus in net.buses:
if bus.is_regulated_by_tran():
for gen in bus.generators:
gen.in_service = False
constr1 = pf.Constraint('voltage regulation by transformers', net)
constr1.analyze()
constr1.eval(x0)
self.assertEqual((constr0.A-constr1.A).tocoo().nnz, 0)
self.assertLess(norm(constr0.b-constr1.b), 1e-8)
self.assertEqual((constr0.J-constr1.J).tocoo().nnz, 0)
self.assertLess(norm(constr0.f-constr1.f), 1e-8)
for bus in net.buses:
if bus.is_regulated_by_tran():
for branch in bus.reg_trans:
branch.in_service = False
self.assertFalse(bus.is_regulated_by_tran(only_in_service=True))
self.assertTrue(bus.is_regulated_by_tran())
constr2 = pf.Constraint('voltage regulation by transformers', net)
constr2.analyze()
constr2.eval(x0)
self.assertEqual(constr2.A.shape[0], 0)
self.assertEqual(constr2.J.shape[0], 0)
def test_constr_REG_SHUNT(self):
# Constants
h = 1e-8
normal = 1e0
eta = 1e-8
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'regulated by shunt',
'voltage magnitude')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_shunt() +
net.get_num_switched_v_shunts())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('voltage regulation by shunts',net)
self.assertEqual(constr.name,'voltage regulation by shunts')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
Jnnz = 10*net.get_num_switched_v_shunts()
Annz = 3*net.get_num_switched_v_shunts()
self.assertGreaterEqual(Jnnz,0)
self.assertGreaterEqual(Annz,0)
rowsJ = 4*net.get_num_switched_v_shunts()
rowsA = net.get_num_switched_v_shunts()
self.assertGreaterEqual(rowsJ,0)
self.assertGreaterEqual(rowsA,0)
constr.analyze()
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,Annz*self.T)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,rowsA*self.T)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertTrue(np.all(y_init == 0.))
constr.eval(x0)
self.assertEqual(constr.J_nnz,Jnnz*self.T)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,rowsJ*self.T)
self.assertEqual(constr.A_row,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(rowsJ*self.T,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(rowsA*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(rowsJ*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(J.nnz,Jnnz*self.T)
self.assertTrue(np.all(J.row <= rowsJ*self.T-1))
self.assertTrue(np.all(J.col <= net.num_vars+constr.num_extra_vars-1))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(rowsA*self.T,net.num_vars+constr.num_extra_vars))
self.assertEqual(A.nnz,Annz*self.T)
self.assertTrue(np.all(A.row <= rowsA*self.T-1))
self.assertTrue(np.all(A.col <= net.num_vars+constr.num_extra_vars-1))
self.assertEqual(constr.num_extra_vars,rowsJ*self.T)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
self.assertTrue(not np.any(np.isinf(J.data)))
self.assertTrue(not np.any(np.isnan(J.data)))
self.assertTrue(not np.any(np.isinf(A.data)))
self.assertTrue(not np.any(np.isnan(A.data)))
y0 = np.random.rand(constr.num_extra_vars)
# Ax=b check
self.assertEqual(norm(A.data,1),rowsA*3*self.T)
self.assertEqual(np.sum(A.data),net.get_num_switched_v_shunts()*self.T)
# f check
index = 0
for t in range(self.T):
for bus in net.buses:
for s in bus.reg_shunts:
self.assertEqual(bus.number,s.reg_bus.number)
self.assertTrue(bus.has_flags('variable','voltage magnitude'))
self.assertTrue(s.has_flags('variable','susceptance'))
fvmin = ((bus.v_mag[t]-bus.v_min_reg) - np.sqrt((bus.v_mag[t]-bus.v_min_reg)**2. + 2.*eta))*normal
fvmax = ((bus.v_max_reg-bus.v_mag[t]) - np.sqrt((bus.v_max_reg-bus.v_mag[t])**2. + 2.*eta))*normal
fbmax = ((s.b_max-s.b[t]) - np.sqrt((s.b_max-s.b[t])**2. + 2*eta))*normal
fbmin = ((s.b[t]-s.b_min) - np.sqrt((s.b[t]-s.b_min)**2. + 2*eta))*normal
self.assertLess(np.abs(fvmin-f[index]),1e-10*(1+np.abs(fvmin)))
self.assertLess(np.abs(fvmax-f[index+1]),1e-10*(1+np.abs(fvmax)))
self.assertLess(np.abs(fbmax-f[index+2]),1e-10*(1+np.abs(fbmax)))
self.assertLess(np.abs(fbmin-f[index+3]),1e-10*(1+np.abs(fbmin)))
index += 4
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sensitivities
net.clear_sensitivities()
for t in range(self.T):
for i in range(net.num_buses):
bus = net.get_bus(i)
self.assertEqual(bus.sens_v_reg_by_shunt[t],0.)
sens = np.zeros(constr.f.size)
counter = 0
for t in range(self.T):
for bus in net.buses:
for shunt in bus.reg_shunts:
sens[counter:counter+4] = bus.index*t
counter += 4
self.assertEqual(counter,constr.f.size)
constr.store_sensitivities(np.zeros(constr.A.shape[0]),sens,None,None)
for t in range(self.T):
for bus in net.buses:
for shunt in bus.reg_shunts:
self.assertEqual(bus.sens_v_reg_by_shunt[t],bus.index*t)
# With outages
for shunt in net.shunts:
shunt.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_REG_SHUNT_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'regulated by shunt',
'voltage magnitude')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
self.assertEqual(net.num_vars,
(net.get_num_buses_reg_by_shunt() +
net.get_num_switched_v_shunts())*self.T)
x0 = net.get_var_values()
constr0 = pf.Constraint('voltage regulation by shunts', net)
constr0.analyze()
constr0.eval(x0)
for bus in net.buses:
if bus.is_regulated_by_shunt():
for gen in bus.generators:
gen.in_service = False
for branch in bus.branches:
branch.in_service = False
constr1 = pf.Constraint('voltage regulation by shunts', net)
constr1.analyze()
constr1.eval(x0)
self.assertEqual((constr0.A-constr1.A).tocoo().nnz, 0)
self.assertLess(norm(constr0.b-constr1.b), 1e-8)
self.assertEqual((constr0.J-constr1.J).tocoo().nnz, 0)
self.assertLess(norm(constr0.f-constr1.f), 1e-8)
def test_network_state_tag(self):
for case in test_cases.CASES:
net = pf.Parser(case).parse(case, self.T)
if net.num_shunts == 0:
continue
x = np.zeros(0)
constr = pf.Constraint('AC power balance',net)
constr.analyze()
constr.eval(x)
y = net.state_tag
# shunt
for shunt in net.shunts:
shunt.in_service = False
z = net.state_tag
self.assertEqual(y+net.num_shunts, z)
self.assertRaises(pf.ConstraintError, constr.eval, x)
def test_robustness(self):
for case in test_cases.CASES:
net = pf.Network(self.T)
constraints = [pf.Constraint('variable fixing',net),
pf.Constraint('generator active power participation',net),
pf.Constraint('PVPQ switching',net),
pf.Constraint('AC power balance',net),
pf.Constraint('DC power balance',net),
pf.Constraint('voltage set point regulation',net),
pf.Constraint('voltage regulation by transformers',net),
pf.Constraint('voltage regulation by shunts',net),
pf.Constraint('AC branch flow limits',net)]
x0 = net.get_var_values()
for c in constraints:
self.assertTrue(isinstance(c.b,np.ndarray))
self.assertTrue(isinstance(c.A,coo_matrix))
self.assertTrue(isinstance(c.f,np.ndarray))
self.assertTrue(isinstance(c.J,coo_matrix))
self.assertEqual(c.b.size,0)
self.assertEqual(c.A.nnz,0)
self.assertTupleEqual(c.A.shape,(0,0))
self.assertEqual(c.f.size,0)
self.assertEqual(c.J.nnz,0)
self.assertTupleEqual(c.J.shape,(0,0))
list(map(lambda c: c.eval(x0),constraints))
list(map(lambda c: c.analyze(),constraints))
list(map(lambda c: c.eval(x0),constraints))
for c in constraints:
self.assertTrue(isinstance(c.b,np.ndarray))
self.assertTrue(isinstance(c.A,coo_matrix))
self.assertTrue(isinstance(c.f,np.ndarray))
self.assertTrue(isinstance(c.J,coo_matrix))
self.assertEqual(c.b.size,0)
self.assertEqual(c.A.nnz,0)
self.assertTupleEqual(c.A.shape,(0,0))
self.assertEqual(c.f.size,0)
self.assertEqual(c.J.nnz,0)
self.assertTupleEqual(c.J.shape,(0,0))
# Network changes
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
constraints = [pf.Constraint('variable fixing',net),
pf.Constraint('generator active power participation',net),
pf.Constraint('PVPQ switching',net),
pf.Constraint('AC power balance',net),
pf.Constraint('DC power balance',net),
pf.Constraint('voltage set point regulation',net),
pf.Constraint('voltage regulation by transformers',net),
pf.Constraint('voltage regulation by shunts',net),
pf.Constraint('AC branch flow limits',net)]
# After updating network
list(map(lambda c: c.analyze(),constraints))
list(map(lambda c: c.eval(x0),constraints))
for c in constraints:
self.assertTrue(isinstance(c.b,np.ndarray))
self.assertTrue(isinstance(c.A,coo_matrix))
self.assertTrue(isinstance(c.f,np.ndarray))
self.assertTrue(isinstance(c.J,coo_matrix))
# Add variables
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
self.assertEqual(net.num_vars,
(2*net.num_buses +
2*net.num_generators +
net.get_num_tap_changers()+
net.get_num_phase_shifters()+
net.get_num_switched_v_shunts()+
3*net.num_batteries)*self.T)
x0 = net.get_var_values()
# Before analyzing
list(map(lambda c: c.clear_error(),constraints))
for c in constraints:
self.assertRaises(pf.ConstraintError,c.eval,x0)
list(map(lambda c: c.clear_error(),constraints))
# Do it right
list(map(lambda c: c.analyze(),constraints))
list(map(lambda c: c.eval(x0),constraints))
for c in constraints:
self.assertTrue(isinstance(c.b,np.ndarray))
self.assertTrue(isinstance(c.A,coo_matrix))
self.assertTrue(isinstance(c.f,np.ndarray))
self.assertTrue(isinstance(c.J,coo_matrix))
self.assertEqual(c.A.shape[1],net.num_vars+c.num_extra_vars)
self.assertEqual(c.J.shape[1],net.num_vars+c.num_extra_vars)
if c.f.size:
self.assertTupleEqual(c.get_H_single(0).shape,
(net.num_vars+c.num_extra_vars,net.num_vars+c.num_extra_vars))
else:
self.assertTupleEqual(c.get_H_single(0).shape,(0,0))
def test_constr_DCPF(self):
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case).get_copy(merge_buses=True)
self.assertEqual(net.num_periods,1)
self.assertEqual(net.num_vars,0)
# Add vargens
load_buses = net.get_load_buses()
net.add_var_generators_from_parameters(load_buses,80.,50.,30.,5,0.05)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_var_generators,len([b for b in net.buses if b.loads]))
for b in net.buses:
if b.loads:
self.assertGreater(len(b.var_generators),0)
for vargen in b.var_generators:
self.assertEqual(vargen.bus,b)
# batteries
for bat in net.batteries:
if bat.index % 2 == 0:
bat.P *= -1.
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('generator',
'variable',
'any',
'active power')
net.set_flags('load',
'variable',
'any',
'active power')
net.set_flags('variable generator',
'variable',
'any',
'active power')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('battery',
'variable',
'any',
'charging power')
self.assertEqual(net.num_vars,
(net.num_buses-net.get_num_slack_buses() +
net.num_generators +
net.num_loads +
net.num_var_generators +
net.get_num_phase_shifters()+
2*net.num_batteries))
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('DC power balance',net)
self.assertEqual(constr.name,'DC power balance')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
r = 0
for b in net.buses:
if b.is_slack():
r += len(b.branches)
# Analyze
constr.analyze()
f = constr.f
J = constr.J
A = constr.A
b = constr.b
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,net.num_buses*1)
self.assertEqual(constr.A_nnz,
(net.num_generators +
net.num_loads +
net.num_var_generators +
4*net.num_branches -
2*r +
2*net.get_num_phase_shifters()+
2*net.num_batteries))
self.assertTupleEqual(b.shape,(net.num_buses,))
self.assertTupleEqual(f.shape,(0,))
self.assertTupleEqual(A.shape,(net.num_buses,net.num_vars))
self.assertEqual(A.nnz,constr.A_nnz)
self.assertTupleEqual(J.shape,(0,net.num_vars))
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(A.nnz,
(net.num_generators +
net.num_loads +
net.num_var_generators +
4*net.num_branches -
2*r +
2*net.get_num_phase_shifters()+
2*net.num_batteries))
# Extract pieces
P1 = net.get_var_projection('bus','any','voltage angle')
P2 = net.get_var_projection('generator','any','active power')
P3 = net.get_var_projection('variable generator','any','active power')
P4 = net.get_var_projection('branch','any','phase shift')
P5 = net.get_var_projection('load','any','active power')
P6 = net.get_var_projection('battery','any','charging power')
G = A*P2.T
R = A*P3.T
Atheta = -A*P1.T
Aphi = -A*P4.T
L = -A*P5.T
B = -A*P6.T
x = np.random.randn(net.num_vars)
p = P2*x
r = P3*x
theta = P1*x
phi = P4*x
l = P5*x
Pb = P6*x
self.assertLess(norm((G*p+R*r-Atheta*theta-Aphi*phi-L*l-B*Pb)-A*x),1e-10)
# Sensitivities
for bus in net.buses:
self.assertEqual(bus.sens_P_balance,0.)
self.assertEqual(bus.sens_Q_balance,0.)
new_sens = np.random.randn(net.num_buses)
constr.store_sensitivities(new_sens,None,None,None)
for bus in net.buses:
self.assertNotEqual(bus.sens_P_balance,0.)
self.assertEqual(bus.sens_Q_balance,0.)
self.assertEqual(bus.sens_P_balance,new_sens[bus.index])
# mismatches
mismatches = A*x0-b
for bus in net.buses:
mis = 0
for gen in bus.generators:
mis += gen.P
for vargen in bus.var_generators:
mis += vargen.P
for load in bus.loads:
mis -= load.P
for bat in bus.batteries:
mis -= bat.P
for br in bus.branches_k:
mis -= br.P_km_DC
for br in bus.branches_m:
mis += br.P_km_DC
self.assertLess(np.abs(mismatches[bus.index]-mis),1e-8)
# No variables
net.clear_flags()
self.assertEqual(net.num_vars,0)
constr.del_matvec()
constr.analyze()
f1 = constr.f
J1 = constr.J
A1 = constr.A
b1 = constr.b
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,net.num_buses)
self.assertEqual(constr.A_nnz,0)
self.assertTupleEqual(b1.shape,(net.num_buses,))
self.assertTupleEqual(f1.shape,(0,))
self.assertTupleEqual(A1.shape,(net.num_buses,net.num_vars))
self.assertEqual(A1.nnz,constr.A_nnz)
self.assertTupleEqual(J1.shape,(0,net.num_vars))
x1 = net.get_var_values()
self.assertTrue(type(x1) is np.ndarray)
self.assertTupleEqual(x1.shape,(net.num_vars,))
mismatches1 = A1*x1-b1
for bus in net.buses:
mis = 0
for gen in bus.generators:
mis += gen.P
for vargen in bus.var_generators:
mis += vargen.P
for load in bus.loads:
mis -= load.P
for bat in bus.batteries:
mis -= bat.P
for br in bus.branches_k:
mis -= br.P_km_DC
for br in bus.branches_m:
mis -= br.P_mk_DC
self.assertLess(np.abs(mismatches1[bus.dP_index]-mis),1e-8)
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Add vargens
load_buses = net.get_load_buses()
net.add_var_generators_from_parameters(load_buses,80.,50.,30.,5,0.05)
# batteries
for bat in net.batteries:
bat.P = np.random.randn(self.T)*10
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('generator',
'variable',
'any',
'active power')
net.set_flags('load',
'variable',
'any',
'active power')
net.set_flags('variable generator',
'variable',
'any',
'active power')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('battery',
'variable',
'any',
'charging power')
self.assertEqual(net.num_vars,
(net.num_buses-net.get_num_slack_buses() +
net.num_generators +
net.num_loads +
net.num_var_generators +
net.get_num_phase_shifters()+
2*net.num_batteries)*self.T)
x0 = net.get_var_values()
# Count something
r = 0
for b in net.buses:
if b.is_slack():
r += len(b.branches)
# Constraint
constr = pf.Constraint('DC power balance',net)
self.assertEqual(constr.name,'DC power balance')
# Analyze
constr.analyze()
A = constr.A
b = constr.b
self.assertEqual(constr.A_row, net.num_buses*self.T)
self.assertEqual(constr.A_nnz,
(net.num_generators +
net.num_loads +
net.num_var_generators +
4*net.num_branches -
2*r +
2*net.get_num_phase_shifters()+
2*net.num_batteries)*self.T)
self.assertTupleEqual(b.shape,(net.num_buses*self.T,))
self.assertTupleEqual(A.shape,(net.num_buses*self.T,net.num_vars))
self.assertEqual(A.nnz,constr.A_nnz)
# Eval
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(A.nnz,
(net.num_generators +
net.num_loads +
net.num_var_generators +
4*net.num_branches -
2*r +
2*net.get_num_phase_shifters()+
2*net.num_batteries)*self.T)
# Mismatches
mismatches = A*x0-b
for t in range(self.T):
for bus in net.buses:
mis = 0
for gen in bus.generators:
mis += gen.P[t]
for vargen in bus.var_generators:
mis += vargen.P[t]
for load in bus.loads:
mis -= load.P[t]
for bat in bus.batteries:
mis -= bat.P[t]
for br in bus.branches_k:
mis -= br.P_km_DC[t]
for br in bus.branches_m:
mis -= br.P_mk_DC[t]
self.assertLess(np.abs(mismatches[bus.dP_index[t]]-mis),1e-8)
# No variables
net.clear_flags()
self.assertEqual(net.num_vars,0)
constr.del_matvec()
constr.analyze()
A1 = constr.A
b1 = constr.b
x1 = net.get_var_values()
self.assertTupleEqual(x1.shape,(0,))
mismatches1 = A1*x1-b1
for t in range(self.T):
for bus in net.buses:
mis = 0
for gen in bus.generators:
mis += gen.P[t]
for vargen in bus.var_generators:
mis += vargen.P[t]
for load in bus.loads:
mis -= load.P[t]
for bat in bus.batteries:
mis -= bat.P[t]
for br in bus.branches_k:
mis -= br.P_km_DC[t]
for br in bus.branches_m:
mis -= br.P_mk_DC[t]
self.assertLess(np.abs(mismatches1[bus.dP_index[t]]-mis),1e-8)
# Sensitivities
net.clear_sensitivities()
lam = np.random.randn(net.num_buses*net.num_periods)
self.assertEqual(lam.size, constr.A.shape[0])
for t in range(net.num_periods):
for bus in net.buses:
self.assertEqual(bus.sens_P_balance[t], 0.)
self.assertEqual(bus.sens_Q_balance[t], 0.)
constr.store_sensitivities(lam, None, None, None)
for t in range(net.num_periods):
for bus in net.buses:
self.assertEqual(bus.sens_P_balance[t], lam[bus.dP_index[t]])
self.assertNotEqual(bus.sens_P_balance[t], 0.)
self.assertEqual(bus.sens_Q_balance[t], 0.)
# with outages except buses
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
for load in net.loads:
load.in_service = False
for bus in net.dc_buses:
bus.in_service = False
for branch in net.dc_branches:
branch.in_service = False
for conv in net.csc_converters:
conv.in_service = False
for conv in net.vsc_converters:
conv.in_service = False
for facts in net.facts:
facts.in_service = False
for bat in net.batteries:
bat.in_service = False
for gen in net.var_generators:
gen.in_service = False
for shunt in net.shunts:
shunt.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], net.num_buses*self.T)
self.assertEqual(constr.b.size, net.num_buses*self.T)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
# with bus outages
net.make_all_in_service()
for bus in net.buses:
bus.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_DCPF_with_outages(self):
# Multiperiods
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
'voltage angle')
net.set_flags('generator',
'variable',
'any',
'active power')
net.set_flags('load',
'variable',
'any',
'active power')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
self.assertEqual(net.num_vars,
(net.num_buses +
net.num_generators +
net.num_loads +
net.get_num_phase_shifters())*self.T)
x0 = net.get_var_values()
constr0 = pf.Constraint('DC power balance', net)
constr0.analyze()
constr0.eval(x0)
buses = net.buses[:10]
side = []
for bus in buses:
for gen in bus.generators:
gen.in_service = False
for br in bus.branches_k:
self.assertTrue(bus.is_equal(br.bus_k))
br.in_service = False
side.append(br.bus_m)
for br in bus.branches_m:
self.assertTrue(bus.is_equal(br.bus_m))
br.in_service = False
side.append(br.bus_k)
constr1 = pf.Constraint('DC power balance', net)
constr1.analyze()
constr1.eval(x0)
f0 = constr0.A*x0-constr0.b
f1 = constr1.A*x0-constr1.b
for bus in net.buses:
if bus not in buses+side:
for t in range(self.T):
i = bus.dP_index[t]
self.assertLess(np.abs(f0[i]-f1[i]), 1e-8)
for bus in buses:
for t in range(self.T):
i = bus.dP_index[t]
dp = 0.
for gen in bus.generators:
self.assertFalse(gen.is_in_service())
dp += gen.P[t]
for br in bus.branches_k:
dp -= br.P_km_DC[t]
for br in bus.branches_m:
dp -= br.P_mk_DC[t]
self.assertLess(np.abs(f1[i]+dp-f0[i]), 1e-8)
def test_constr_DC_FLOW_LIM(self):
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
self.assertEqual(net.num_vars,0)
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
self.assertEqual(net.num_vars,net.num_buses-net.get_num_slack_buses())
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('DC branch flow limits',net)
self.assertEqual(constr.name,'DC branch flow limits')
# Num constr
num_constr = len([br for br in net.branches if br.ratingA != 0.])
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.G_row,0)
# Analyze
constr.analyze()
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
u = constr.u
G = constr.G
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.G_row,num_constr)
self.assertTupleEqual(b.shape,(0,))
self.assertTupleEqual(f.shape,(0,))
self.assertTupleEqual(l.shape,(num_constr,))
self.assertTupleEqual(u.shape,(num_constr,))
self.assertTupleEqual(A.shape,(0,net.num_vars))
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertTupleEqual(G.shape,(num_constr,net.num_vars))
self.assertEqual(G.nnz,constr.G_nnz)
self.assertTrue(np.all(l <= u))
num = 0
for br in net.branches:
if br.ratingA == 0.:
continue
if not br.bus_k.is_slack():
num += 1
if not br.bus_m.is_slack():
num += 1
self.assertEqual(num,constr.G_nnz)
counter = 0
index = 0
for bus in net.buses:
for br in bus.branches_k:
if br.ratingA == 0.:
continue
off = 0
if br.bus_k.is_slack():
off = br.b*br.bus_k.v_ang
else:
self.assertEqual(G.row[counter],index)
self.assertEqual(G.col[counter],br.bus_k.index_v_ang)
self.assertEqual(G.data[counter],-br.b)
counter += 1
if br.bus_m.is_slack():
off = -br.b*br.bus_m.v_ang
else:
self.assertEqual(G.row[counter],index)
self.assertEqual(G.col[counter],br.bus_m.index_v_ang)
self.assertEqual(G.data[counter],br.b)
counter += 1
rating = br.ratingA
self.assertEqual(l[index],-rating+off-br.b*br.phase)
self.assertEqual(u[index],rating+off-br.b*br.phase)
index += 1
self.assertEqual(counter,G.nnz)
self.assertEqual(index,G.shape[0])
# Flow
Gx0 = constr.G*x0
self.assertTupleEqual(Gx0.shape,(num_constr,))
index = 0
for bus in net.buses:
for branch in bus.branches_k:
if branch.ratingA == 0.:
continue
bus1 = branch.bus_k
bus2 = branch.bus_m
if bus1.is_slack():
flow = Gx0[index]-branch.b*(bus1.v_ang-branch.phase)
elif bus2.is_slack():
flow = Gx0[index]-branch.b*(-bus2.v_ang-branch.phase)
else:
flow = Gx0[index]-branch.b*(-branch.phase)
self.assertLess(np.abs(branch.P_km_DC-flow),1e-10)
index += 1
# Sensitivities
index = 0
for branch in net.branches:
self.assertEqual(branch.sens_P_u_bound,0.)
self.assertEqual(branch.sens_P_l_bound,0.)
mu = np.random.randn(num_constr)
pi = np.random.randn(num_constr)
self.assertEqual(constr.G.shape[0],num_constr)
constr.store_sensitivities(None,None,mu,pi)
for bus in net.buses:
for branch in bus.branches_k:
if branch.ratingA == 0.:
continue
self.assertEqual(branch.sens_P_u_bound,mu[index])
self.assertEqual(branch.sens_P_l_bound,pi[index])
index += 1
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.G_row,num_constr)
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Nonzero angles
for bus in net.buses:
bus.v_ang = np.random.randn()*np.ones(self.T)
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
self.assertEqual(net.num_vars,(net.num_buses-net.get_num_slack_buses())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Num constr
num_constr = len([br for br in net.branches if br.ratingA != 0.])
# Constraint
constr = pf.Constraint('DC branch flow limits',net)
self.assertEqual(constr.name,'DC branch flow limits')
constr.analyze()
G = constr.G
l = constr.l
u = constr.u
self.assertTupleEqual(l.shape,(num_constr*self.T,))
self.assertTupleEqual(u.shape,(num_constr*self.T,))
self.assertTupleEqual(G.shape,(num_constr*self.T,net.num_vars))
Projs = []
for t in range(self.T):
Projs.append(net.get_var_projection('all','any','all',t,t))
Gs = [G*P.T for P in Projs]
x0s = [P*x0 for P in Projs]
Gx0s = [(Gs[t]*x0s[t])[t*num_constr:(t+1)*num_constr] for t in range(self.T)]
ls = [l[t*num_constr:(t+1)*num_constr] for t in range(self.T)]
us = [u[t*num_constr:(t+1)*num_constr] for t in range(self.T)]
for t in range(self.T):
self.assertLessEqual(norm(Gx0s[t]-Gx0s[0]),1e-10*norm(Gx0s[0]))
self.assertLessEqual(norm(ls[t]-ls[0]),1e-10*norm(ls[0]))
self.assertLessEqual(norm(us[t]-us[0]),1e-10*norm(us[0]))
# with outages
for br in net.branches:
br.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_DC_FLOW_LIM_with_outages(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case, self.T)
self.assertEqual(net.num_periods, self.T)
self.assertEqual(net.num_vars,0)
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
self.assertEqual(net.num_vars,(net.num_buses-net.get_num_slack_buses())*self.T)
x0 = net.get_var_values()
constr = pf.Constraint('DC branch flow limits', net)
constr.analyze()
constr.eval(x0)
num_constr = len([br for br in net.branches if br.ratingA != 0.])*self.T
self.assertEqual(constr.G.shape[0], num_constr)
for branch in net.branches:
branch.in_service = False
constr.analyze()
self.assertEqual(constr.G.shape[0], 0)
def test_constr_LINPF(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# load
if sum([l.P[0] for l in net.loads]) < 0:
lmin = np.min([l.P for l in net.loads])
for l in net.loads:
l.P = l.P + np.abs(lmin)
# add vargens
load_buses = net.get_load_buses()
net.add_var_generators_from_parameters(load_buses,80.,50.,30.,5,0.05)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_var_generators,len(load_buses))
for vargen in net.var_generators:
vargen.Q = np.abs(vargen.P)
for t in range(self.T):
self.assertGreater(vargen.Q[t],0.)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
self.assertEqual(net.num_vars,
(2*net.get_num_buses() +
net.get_num_slack_gens() +
net.get_num_reg_gens() +
net.get_num_tap_changers() +
net.get_num_phase_shifters() +
net.get_num_switched_v_shunts() +
net.num_var_generators*2)*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('linearized AC power balance',net)
self.assertEqual(constr.name,'linearized AC power balance')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
num_Annz = (net.num_buses*4 +
net.get_num_branches()*8 +
net.get_num_tap_changers()*4 +
net.get_num_phase_shifters()*4 +
net.get_num_switched_v_shunts() +
net.get_num_slack_gens() +
net.get_num_reg_gens()+
net.num_var_generators*2)
constr.analyze()
self.assertEqual(constr.A_nnz,0)
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
# After
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(2*net.num_buses*self.T,))
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(2*net.num_buses*self.T,net.num_vars))
self.assertEqual(A.nnz,num_Annz*self.T)
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertTrue(not np.any(np.isinf(b)))
self.assertTrue(not np.any(np.isnan(b)))
# Check with ACPF
constrPF = pf.Constraint('AC power balance',net)
self.assertEqual(constrPF.name,'AC power balance')
constrPF.analyze()
constrPF.eval(x0)
self.assertEqual(A.nnz,constrPF.J.nnz)
self.assertTrue(np.all(A.row == constrPF.J.row))
self.assertTrue(np.all(A.col == constrPF.J.col))
self.assertTrue(np.all(A.data == constrPF.J.data))
self.assertGreater(norm(A.row),0)
self.assertGreater(norm(A.col),0)
self.assertGreater(norm(A.data),0)
self.assertGreater(norm(b),0)
self.assertLess(norm(b-(constrPF.J*x0-constrPF.f)),1e-10*(norm(b)+1))
# After eval
constr.eval(np.zeros(x0.size))
self.assertEqual(constr.A.nnz,constrPF.J.nnz)
self.assertTrue(np.all(constr.A.row == constrPF.J.row))
self.assertTrue(np.all(constr.A.col == constrPF.J.col))
self.assertTrue(np.all(constr.A.data == constrPF.J.data))
self.assertGreater(norm(constr.A.row),0)
self.assertGreater(norm(constr.A.col),0)
self.assertGreater(norm(constr.A.data),0)
self.assertGreater(norm(constr.b),0)
self.assertLess(norm(constr.b-(constrPF.J*x0-constrPF.f)),1e-10*(norm(b)+1))
# with bus outages
for bus in net.buses:
bus.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_LINPF_with_outages(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'slack',
'active power')
net.set_flags('generator',
'variable',
'regulator',
'reactive power')
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('variable generator',
'variable',
'any',
['active power','reactive power'])
self.assertEqual(net.num_vars,
(2*net.get_num_buses() +
net.get_num_slack_gens(True) +
net.get_num_reg_gens(True) +
net.get_num_tap_changers(True) +
net.get_num_phase_shifters(True) +
net.get_num_switched_v_shunts() +
net.num_var_generators*2)*self.T)
constr = pf.Constraint('linearized AC power balance',net)
constr.analyze()
x0 = net.get_var_values()
constrPF = pf.Constraint('AC power balance',net)
constrPF.analyze()
constrPF.eval(x0)
self.assertEqual(constr.A.nnz,constrPF.J.nnz)
self.assertTrue(np.all(constr.A.row == constrPF.J.row))
self.assertTrue(np.all(constr.A.col == constrPF.J.col))
self.assertTrue(np.all(constr.A.data == constrPF.J.data))
if net.num_shunts:
self.assertGreater(norm(constr.A.row),0)
self.assertGreater(norm(constr.A.col),0)
self.assertGreater(norm(constr.A.data),0)
self.assertGreater(norm(constr.b),0)
self.assertLess(norm(constr.b-(constrPF.J*x0-constrPF.f)),1e-10*(norm(constr.b)+1))
def test_constr_GEN_RAMP(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Gens
for gen in net.generators:
gen.dP_max = np.random.rand()*100.
gen.P_prev = np.random.rand()*10.
gen.P = np.random.rand()*20
# Vars
net.set_flags('generator',
'variable',
'not slack',
'active power')
num = net.num_generators-net.get_num_slack_gens()
self.assertEqual(net.num_vars,num*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('generator ramp limits',net)
self.assertEqual(constr.name,'generator ramp limits')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
constr.analyze()
self.assertEqual(constr.A_nnz,0)
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,net.num_vars))
self.assertEqual(A.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(num*self.T,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(num*self.T,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(num*self.T,net.num_vars))
self.assertEqual(G.nnz,num*(1 + (self.T-1)*2))
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
for t in range(self.T):
for gen in net.generators:
if not gen.is_slack():
ac = np.where(G.col == gen.index_P[t])[0]
# Last time
if t == self.T-1:
self.assertEqual(ac.size,1)
i = G.row[ac[0]]
self.assertEqual(G.data[ac[0]],1.)
self.assertEqual(l[i],-gen.dP_max)
self.assertEqual(u[i],gen.dP_max)
ar = np.where(G.row == i)[0]
self.assertEqual(ar.size,2)
for j in ar:
if G.col[j] == gen.index_P[t]:
pass
else:
self.assertEqual(G.col[j],gen.index_P[t-1])
self.assertEqual(G.data[j],-1.)
# Not last time
else:
self.assertEqual(ac.size,2)
for i in ac:
self.assertEqual(G.col[i],gen.index_P[t])
# added
if G.data[i] == -1.:
self.assertEqual(l[G.row[i]],-gen.dP_max)
self.assertEqual(u[G.row[i]],gen.dP_max)
ar = np.where(G.row == G.row[i])[0]
self.assertEqual(ar.size,2)
for j in ar:
if G.col[j] == gen.index_P[t]:
pass
else:
self.assertEqual(G.col[j],gen.index_P[t+1])
self.assertEqual(G.data[j],1.)
# subtracted
else:
if t == 0:
self.assertEqual(l[G.row[i]],-gen.dP_max+gen.P_prev)
self.assertEqual(u[G.row[i]],gen.dP_max+gen.P_prev)
else:
self.assertEqual(l[G.row[i]],-gen.dP_max)
self.assertEqual(u[G.row[i]],gen.dP_max)
ar = np.where(G.row == G.row[i])[0]
self.assertEqual(ar.size,2)
for j in ar:
if G.col[j] == gen.index_P[t]:
pass
else:
self.assertEqual(G.col[j],gen.index_P[t-1])
self.assertEqual(G.data[j],-1.)
# with outages
for gen in net.generators:
gen.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_GEN_RAMP_with_outages(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
# Vars
net.set_flags('generator',
'variable',
'not slack',
'active power')
num = net.num_generators-net.get_num_slack_gens()
self.assertEqual(net.num_vars,num*self.T)
x0 = net.get_var_values()
# Constraint
constr = pf.Constraint('generator ramp limits',net)
constr.analyze()
self.assertEqual(constr.A.shape[0], 0)
self.assertGreater(constr.G.shape[0], 0)
for gen in net.generators:
gen.in_service = False
constr.analyze()
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.G.shape[0], 0)
def test_constr_AC_FLOW_LIM(self):
# Constants
h = 1e-11
tol = 1e-2
eps = 1.1 # %
param = 1e-6
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T).get_copy(merge_buses=True)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
self.assertEqual(net.num_vars,
(2*net.get_num_buses() +
net.get_num_tap_changers() +
net.get_num_phase_shifters())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constr
constr = pf.Constraint('AC branch flow limits',net)
self.assertEqual(constr.name,'AC branch flow limits')
constr.analyze()
num_constr = len([br for br in net.branches if br.ratingA != 0.])*2*net.num_periods
self.assertTupleEqual(constr.f.shape,(num_constr,))
self.assertEqual(constr.J_row,num_constr)
# zero ratings
for br in net.branches:
if br.ratingA == 0.:
br.ratingA = 100.
# Constraint
constr = pf.Constraint('AC branch flow limits',net)
self.assertEqual(constr.name,'AC branch flow limits')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertEqual(constr.num_extra_vars,0)
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.G_row,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
self.assertEqual(constr.num_extra_vars,0)
num_constr = net.get_num_branches()*2*self.T
num_Jnnz = (net.get_num_branches()*8 +
net.get_num_tap_changers()*2 +
net.get_num_phase_shifters()*2)*self.T+num_constr
constr.analyze()
self.assertEqual(num_Jnnz,constr.J_nnz)
self.assertEqual(num_constr,constr.G_nnz)
self.assertEqual(num_constr,constr.J_row)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# After analyze
self.assertEqual(constr.num_extra_vars,num_constr)
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(num_constr,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(num_constr,net.num_vars+num_constr))
self.assertEqual(J.nnz,num_Jnnz)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,net.num_vars+num_constr))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(num_constr,net.num_vars+num_constr))
self.assertEqual(G.nnz,num_constr)
self.assertTrue(np.all(G.row == np.array(range(num_constr))))
self.assertTrue(np.all(G.col == np.array(range(net.num_vars,net.num_vars+num_constr))))
self.assertTrue(np.all(G.row == G.col-net.num_vars))
self.assertTrue(np.all(G.data == 1.))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(num_constr,))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(num_constr,))
J_row = 0
for t in range(net.num_periods):
for bus in net.buses:
for branch in bus.branches_k:
#i = t*net.num_branches*2+2*branch.index
self.assertEqual(u[J_row],branch.ratingA)
self.assertEqual(u[J_row+1],branch.ratingA)
self.assertEqual(l[J_row],-branch.ratingA)
self.assertEqual(l[J_row+1],-branch.ratingA)
J_row += 2
# Row info
index = 0
for t in range(net.num_periods):
for bus in net.buses:
for branch in bus.branches_k:
if branch.ratingA != 0:
skmJ = constr.get_J_row_info_string(index)
smkJ = constr.get_J_row_info_string(index+1)
self.assertEqual(skmJ,"AC branch flow limits:branch:%d:%s:%d" %(branch.index,"km",t))
self.assertEqual(smkJ,"AC branch flow limits:branch:%d:%s:%d" %(branch.index,"mk",t))
skmG = constr.get_G_row_info_string(index)
smkG = constr.get_G_row_info_string(index+1)
self.assertEqual(skmG,"AC branch flow limits:branch:%d:%s:%d" %(branch.index,"km",t))
self.assertEqual(smkG,"AC branch flow limits:branch:%d:%s:%d" %(branch.index,"mk",t))
index += 2
# Hessian structure
for i in range(constr.J.shape[0]):
H = constr.get_H_single(i)
self.assertTupleEqual(H.shape,(net.num_vars+num_constr,net.num_vars+num_constr))
self.assertTrue(np.all(H.row >= H.col))
Hcomb = constr.H_combined
H_comb_nnz = 2*(net.num_branches*10 +
net.get_num_tap_changers()*5+
net.get_num_phase_shifters()*5)*self.T
self.assertTupleEqual(Hcomb.shape,(net.num_vars+num_constr,net.num_vars+num_constr))
self.assertTrue(np.all(Hcomb.row >= Hcomb.col))
self.assertEqual(Hcomb.nnz,H_comb_nnz)
y_init = constr.init_extra_vars
self.assertEqual(y_init.size,constr.num_extra_vars)
self.assertEqual(y_init.size,constr.f.size)
self.assertTrue(np.all(y_init == 0.))
constr.eval(x0)
y0 = np.random.randn(num_constr)
constr.eval(x0,y0)
self.assertEqual(num_constr,constr.J_row)
self.assertEqual(0,constr.G_nnz)
self.assertEqual(num_Jnnz,constr.J_nnz)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
constr.combine_H(np.ones(f.size),False)
Hcomb = constr.H_combined
# After eval
self.assertTrue(not np.any(np.isinf(f)))
self.assertTrue(not np.any(np.isnan(f)))
# Projections
P1 = constr.get_var_projection()
P2 = constr.get_extra_var_projection()
self.assertTrue(isinstance(P1,coo_matrix))
self.assertTrue(isinstance(P2,coo_matrix))
self.assertEqual(P1.shape[0],net.num_vars)
self.assertEqual(P2.shape[0],constr.num_extra_vars)
self.assertEqual(P1.shape[1],net.num_vars+constr.num_extra_vars)
self.assertEqual(P2.shape[1],net.num_vars+constr.num_extra_vars)
self.assertEqual(P1.nnz,net.num_vars)
self.assertEqual(P2.nnz,constr.num_extra_vars)
self.assertLess(np.linalg.norm(x0-P1*np.hstack((x0,y0))),1e-12)
self.assertLess(np.linalg.norm(y0-P2*np.hstack((x0,y0))),1e-12)
# Cross check current magnitudes
J_row = 0
for t in range(net.num_periods):
for bus in net.buses:
for branch in bus.branches_k:
Pkm = branch.get_P_km()[t]
Qkm = branch.get_Q_km()[t]
Pmk = branch.get_P_mk()[t]
Qmk = branch.get_Q_mk()[t]
vk = branch.bus_k.v_mag[t]
vm = branch.bus_m.v_mag[t]
ikmmag = branch.get_i_km_mag(eps=param)[t]
imkmag = branch.get_i_mk_mag(eps=param)[t]
error_km = 100.*np.abs(ikmmag-f[J_row]-y0[J_row])/max([ikmmag,tol])
error_mk = 100.*np.abs(imkmag-f[J_row+1]-y0[J_row+1])/max([imkmag,tol])
self.assertLess(error_km,eps)
self.assertLess(error_mk,eps)
J_row += 2
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check 1
h = 1e-12
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
y0,
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check 2
coeff = np.random.randn(constr.f.shape[0])
constr.eval(x0,y0)
constr.combine_H(coeff,False)
H = constr.H_combined.copy()
H_manual = 0
for i in range(constr.f.size):
Hi = constr.get_H_single(i)
H_manual = H_manual + coeff[i]*Hi
diff = coo_matrix(H_manual-H)
self.assertLess(norm(diff.data)/norm(H.data),1e-12)
# Sensitivities
net.clear_sensitivities()
for t in range(net.num_periods):
for branch in net.branches:
self.assertEqual(branch.sens_i_mag_u_bound[t], 0.)
mu = np.random.randn(constr.J.shape[0])
self.assertEqual(mu.size, constr.G.shape[0])
constr.store_sensitivities(None, np.zeros(mu.size), mu, np.zeros(mu.size))
G_row = 0
for t in range(net.num_periods):
for bus in net.buses:
for branch in bus.branches_k:
if np.abs(mu[G_row]) > np.abs(mu[G_row+1]):
self.assertEqual(branch.sens_i_mag_u_bound[t], mu[G_row])
else:
self.assertEqual(branch.sens_i_mag_u_bound[t], mu[G_row+1])
G_row += 2
# with outages
for br in net.branches:
br.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,1).get_copy(merge_buses=True)
self.assertEqual(net.num_periods,1)
net.set_flags('bus',['variable','bounded'],'any','voltage magnitude')
net.set_flags('bus','variable','not slack','voltage angle')
self.assertEqual(net.num_vars,2*net.num_buses-net.get_num_slack_buses())
if len([b for b in net.branches if b.ratingA != 0.]) == 0:
continue
constr = pf.Constraint('AC branch flow limits',net)
constr.analyze()
self.assertGreater(constr.num_extra_vars,0)
# Single Hessian check
x0 = net.get_var_values()
y0 = np.zeros(constr.num_extra_vars)
constr.eval(x0,y0)
for i in range(10):
j = np.random.randint(0,constr.f.size)
constr.eval(x0,y0)
g0 = constr.J.tocsr()[j,:].toarray().flatten()
H0lt = constr.get_H_single(j).copy()
self.assertTrue(np.all(H0lt.row >= H0lt.col)) # lower triangular
H0 = (H0lt + H0lt.T) - triu(H0lt)
d = np.random.randn(net.num_vars+constr.num_extra_vars)
x = x0 + h*d[:net.num_vars]
y = y0 + h*d[net.num_vars:]
constr.eval(x,y)
g1 = constr.J.tocsr()[j,:].toarray().flatten()
Hd_exact = H0*d
Hd_approx = (g1-g0)/h
error = 100.*norm(Hd_exact-Hd_approx)/np.maximum(norm(Hd_exact),tol)
self.assertLessEqual(error,EPS)
# Combined Hessian check
x0 = net.get_var_values()
y0 = np.zeros(constr.num_extra_vars)
lam = np.random.randn(constr.f.size)
constr.eval(x0,y0)
constr.combine_H(lam)
h = 1e-11
F0 = np.dot(constr.f,lam)
GradF0 = constr.J.T*lam
HessF0lt = constr.H_combined.copy()
self.assertTrue(np.all(HessF0lt.row >= HessF0lt.col)) # lower triangular
HessF0 = (HessF0lt + HessF0lt.T - triu(HessF0lt))
for i in range(10):
d = np.random.randn(x0.size+y0.size)
x = x0 + h*d[:x0.size]
y = y0 + h*d[x0.size:]
constr.eval(x,y)
F1 = np.dot(constr.f,lam)
GradF1 = constr.J.T*lam
Jd_exact = np.dot(GradF0,d)
Jd_approx = (F1-F0)/h
Hd_exact = HessF0*d
Hd_approx = (GradF1-GradF0)/h
errorJ = 100.*norm(Jd_exact-Jd_approx)/norm(Jd_exact)
errorH = 100.*norm(Hd_exact-Hd_approx)/norm(Hd_exact)
self.assertLess(errorJ,EPS)
self.assertLess(errorH,EPS)
def test_constr_AC_FLOW_LIM_with_outages(self):
# Constants
h = 1e-11
tol = 1e-2
eps = 1.1 # %
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
self.assertEqual(net.num_vars,
(2*net.get_num_buses() +
net.get_num_tap_changers() +
net.get_num_phase_shifters())*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
for branch in net.branches:
branch.in_service = False
# Constr
constr = pf.Constraint('AC branch flow limits',net)
constr.analyze()
constr.eval(x0)
self.assertEqual(constr.f.size, 0)
self.assertTupleEqual(constr.J.shape, (0, net.num_vars))
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.u.size, 0)
self.assertTupleEqual(constr.G.shape, (0, net.num_vars))
# Jacobian check
pf.tests.utils.check_constraint_Jacobian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Sigle Hessian check
pf.tests.utils.check_constraint_single_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
# Combined Hessian check 1
h = 1e-12
pf.tests.utils.check_constraint_combined_Hessian(self,
constr,
x0,
np.zeros(0),
NUM_TRIALS,
TOL,
EPS,
h)
def test_constr_DUMMY(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Too big
if net.num_buses > 1000:
continue
# Add vargens
load_buses = net.get_load_buses()
net.add_var_generators_from_parameters(load_buses,80.,50.,30.,5,0.05)
self.assertGreater(net.num_var_generators,0)
self.assertEqual(net.num_var_generators,len([b for b in net.buses if b.loads]))
for b in net.buses:
if b.loads:
self.assertGreater(len(b.var_generators),0)
for vargen in b.var_generators:
self.assertEqual(vargen.bus,b)
# batteries
for bat in net.batteries:
if bat.index % 2 == 0:
bat.P *= -1.
# Variables
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('generator',
'variable',
'any',
'active power')
net.set_flags('load',
'variable',
'any',
'active power')
net.set_flags('variable generator',
'variable',
'any',
'active power')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('battery',
'variable',
'any',
'charging power')
self.assertEqual(net.num_vars,
(net.num_buses-net.get_num_slack_buses() +
net.num_generators +
net.num_loads +
net.num_var_generators +
net.get_num_phase_shifters()+
2*net.num_batteries)*net.num_periods)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Ref constraint
constrREF = pf.Constraint('DC power balance',net)
self.assertEqual(constrREF.name,'DC power balance')
# Dummy constraint
constr = pf.constraints.DummyDCPF(net)
self.assertEqual(constr.name,'dummy DC power balance')
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.A_row,constrREF.A_row)
self.assertEqual(constr.A_nnz,constrREF.A_nnz)
self.assertEqual(constr.b.size,0)
self.assertEqual(constr.A.shape[0],0)
self.assertEqual(constr.A.shape[1],0)
self.assertEqual(constr.A.nnz,0)
constrREF.analyze()
constr.analyze()
self.assertEqual(constr.A_row,net.num_buses*self.T)
self.assertGreater(constr.A_nnz,0)
self.assertEqual(constr.A_row,constrREF.A_row)
self.assertEqual(constr.A_nnz,constrREF.A_nnz)
self.assertTrue(np.all(constr.b == constrREF.b))
self.assertTrue(np.all(constr.A.row == constrREF.A.row))
self.assertTrue(np.all(constr.A.col == constrREF.A.col))
self.assertTrue(np.all(constr.A.data == constrREF.A.data))
self.assertTupleEqual(constr.l.shape,(0,))
self.assertTupleEqual(constr.u.shape,(0,))
self.assertTupleEqual(constr.f.shape,(0,))
self.assertTupleEqual(constr.G.shape,(0,net.num_vars))
self.assertTupleEqual(constr.J.shape,(0,net.num_vars))
constrREF.eval(net.get_var_values())
constr.eval(net.get_var_values())
self.assertTrue(np.all(constr.b == constrREF.b))
self.assertTrue(np.all(constr.A.row == constrREF.A.row))
self.assertTrue(np.all(constr.A.col == constrREF.A.col))
self.assertTrue(np.all(constr.A.data == constrREF.A.data))
# with bus outages
for bus in net.buses:
bus.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_BAT_DYN(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,5)
self.assertEqual(net.num_periods,5)
self.assertEqual(net.num_vars,0)
# Add battries
gen_buses = net.get_generator_buses()
net.add_batteries_from_parameters(gen_buses,20.,40.,0.8,0.7)
self.assertEqual(net.num_batteries,len(gen_buses))
self.assertGreater(net.num_batteries,0)
# Vars
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
self.assertEqual(net.num_vars,5*3*net.num_batteries)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('battery dynamics',net)
self.assertEqual(constr.name,'battery dynamics')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
constr.analyze()
self.assertEqual(constr.A_row,(5+1)*net.num_batteries)
self.assertEqual(constr.A_nnz,5*4*net.num_batteries)
self.assertEqual(constr.G_nnz,0)
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(6*net.num_batteries,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(6*net.num_batteries,net.num_vars))
self.assertEqual(A.nnz,5*4*net.num_batteries)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
for t in range(5):
for bat in net.batteries:
self.assertTrue(bat.has_flags('variable',['charging power','energy level']))
aPc = np.where(A.col == bat.index_Pc[t])[0]
aPd = np.where(A.col == bat.index_Pd[t])[0]
aE = np.where(A.col == bat.index_E[t])[0]
if t < 5-1:
aEE = np.where(A.col == bat.index_E[t+1])[0]
self.assertEqual(aPc.size,1)
self.assertEqual(aPd.size,1)
eq_row = A.row[aPc[0]]
self.assertEqual(eq_row,A.row[aPd[0]])
self.assertEqual(A.data[aPc[0]],-bat.eta_c)
self.assertEqual(A.data[aPd[0]],1./bat.eta_d)
if t == 0:
self.assertEqual(aE.size,2)
# init eq
j = aE[0]
self.assertEqual(A.data[j],1.)
self.assertEqual(b[A.row[j]],bat.E_init)
self.assertEqual(np.where(A.row == A.row[j])[0].size,1)
# update eq E_{t+1} - E_t - eta_c Pc_t + (1/eta_d) Pd_t = 0
j = aE[1]
self.assertEqual(A.data[j],-1.)
self.assertEqual(b[A.row[j]],0.)
self.assertEqual(np.where(A.row == A.row[j])[0].size,4)
self.assertEqual(A.row[j],eq_row)
self.assertEqual(A.row[j],A.row[aEE[0]])
elif t < 5-1:
self.assertEqual(aE.size,2)
# update eq E_t - E_{t-1} - eta_c Pc_{t-1} + (1/eta_d) Pd_{t-1} = 0
j = aE[0]
self.assertEqual(A.data[j],1.)
self.assertEqual(b[A.row[j]],0.)
self.assertEqual(np.where(A.row == A.row[j])[0].size,4)
self.assertNotEqual(A.row[j],eq_row)
self.assertNotEqual(A.row[j],A.row[aEE[0]])
# update eq E_{t+1} - E_t - eta_c Pc_t + (1/eta_d) Pd_t = 0
j = aE[1]
self.assertEqual(A.data[j],-1.)
self.assertEqual(b[A.row[j]],0.)
self.assertEqual(np.where(A.row == A.row[j])[0].size,4)
self.assertEqual(A.row[j],eq_row)
self.assertEqual(A.row[j],A.row[aEE[0]])
else:
self.assertEqual(aE.size,2)
# update eq E_t - E_{t-1} - eta_c Pc_{t-1} + (1/eta_d) Pd_{t-1} = 0
j = aE[0]
self.assertEqual(A.data[j],1.)
self.assertEqual(b[A.row[j]],0.)
self.assertEqual(np.where(A.row == A.row[j])[0].size,4)
self.assertNotEqual(A.row[j],eq_row)
# update eq - E_t - eta_c Pc_t + (1/eta_d) Pd_t = -E_final
j = aE[1]
self.assertEqual(A.data[j],-1.)
self.assertEqual(b[A.row[j]],-bat.E_final)
self.assertEqual(np.where(A.row == A.row[j])[0].size,3)
self.assertEqual(A.row[j],eq_row)
# with outages
for bat in net.batteries:
bat.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_BAT_DYN_with_outages(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,5)
# Add battries
gen_buses = net.get_generator_buses()
net.add_batteries_from_parameters(gen_buses,20.,40.,0.8,0.7)
self.assertEqual(net.num_batteries,len(gen_buses))
self.assertGreater(net.num_batteries,0)
# Vars
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
self.assertEqual(net.num_vars,5*3*net.num_batteries)
x0 = net.get_var_values()
# Constraint
constr0 = pf.Constraint('battery dynamics',net)
constr0.analyze()
for branch in net.branches:
branch.in_service = False
for gen in net.generators:
gen.in_service = False
constr1 = pf.Constraint('battery dynamics',net)
constr1.analyze()
self.assertEqual((constr1.A-constr0.A).tocoo().nnz, 0)
self.assertEqual((constr1.G-constr0.G).tocoo().nnz, 0)
self.assertLess(norm(constr1.b-constr0.b), 1e-8)
self.assertLess(norm(constr1.l-constr0.u), 1e-8)
self.assertLess(norm(constr1.l-constr0.u), 1e-8)
def test_constr_LOAD_PF(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
self.assertEqual(net.num_vars,0)
# Powers
for load in net.loads:
load.P = np.random.rand(net.num_periods)
self.assertTrue(np.all(load.P > 0))
# Target power factors
for load in net.loads:
load.target_power_factor = np.random.rand()
self.assertTrue(0 < load.target_power_factor < 1.)
# Vars
net.set_flags('load',
'variable',
'any',
['active power','reactive power'])
self.assertEqual(net.num_vars,2*net.num_loads*self.T)
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('load constant power factor',net)
self.assertEqual(constr.name,'load constant power factor')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# Before
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
constr.analyze()
self.assertEqual(constr.A_nnz,2*net.num_loads*self.T)
self.assertEqual(constr.A_row,net.num_loads*self.T)
constr.eval(x0)
self.assertEqual(constr.A_nnz,0)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
l = constr.l
G = constr.G
u = constr.u
# After
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(net.num_loads*self.T,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(net.num_loads*self.T,net.num_vars))
self.assertEqual(A.nnz,2*net.num_loads*self.T)
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,net.num_vars))
self.assertEqual(G.nnz,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
for load in net.loads:
for t in range(net.num_periods):
indices = np.where(A.col == load.index_P[t])[0]
self.assertEqual(indices.size,1)
row = A.row[indices[0]]
indices = np.where(A.row == row)[0]
self.assertEqual(indices.size,2)
for i in indices:
if A.col[i] == load.index_P[t]:
gamma = load.target_power_factor
factor = np.sqrt((1.-gamma**2.)/(gamma**2.))
load.Q[t] = np.abs(load.P[t])*factor*(1. if load.Q[t] >= 0 else -1.)
self.assertLess(np.abs(gamma-load.power_factor[t]),1e-12)
if load.P[t]*load.Q[t] >= 0:
self.assertAlmostEqual(A.data[i],-factor)
self.assertLess(np.abs(-factor*load.P[t]+load.Q[t]),1e-12)
else:
self.assertAlmostEqual(A.data[i],factor)
self.assertLess(np.abs(factor*load.P[t]+load.Q[t]),1e-12)
else:
self.assertEqual(A.col[i],load.index_Q[t])
self.assertEqual(A.data[i],1.)
x = net.get_var_values()
self.assertLess(np.linalg.norm(constr.A*x-constr.b),1e-10)
for load in net.loads:
for t in range(net.num_periods):
self.assertAlmostEqual(load.power_factor[t],load.target_power_factor)
# with outages
for load in net.loads:
load.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_constr_LOAD_PF_with_outages(self):
# Multi period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
# Vars
net.set_flags('load',
'variable',
'any',
['active power','reactive power'])
self.assertEqual(net.num_vars,2*net.num_loads*self.T)
# Constraint
constr0 = pf.Constraint('load constant power factor',net)
constr0.analyze()
for branch in net.branches:
branch.in_service = False
for gen in net.generators:
gen.in_service = False
constr1 = pf.Constraint('load constant power factor',net)
constr1.analyze()
self.assertEqual((constr1.A-constr0.A).tocoo().nnz, 0)
self.assertEqual((constr1.G-constr0.G).tocoo().nnz, 0)
self.assertLess(norm(constr1.b-constr0.b), 1e-8)
self.assertLess(norm(constr1.l-constr0.u), 1e-8)
self.assertLess(norm(constr1.l-constr0.u), 1e-8)
def test_constr_AC_LIN_FLOW_LIM(self):
# Multiperiod
for case in test_cases.CASES:
net = pf.Parser(case).parse(case,self.T)
self.assertEqual(net.num_periods,self.T)
# Vars
net.set_flags('bus',
'variable',
'any',
'voltage magnitude')
net.set_flags('bus',
'variable',
'not slack',
'voltage angle')
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
self.assertEqual(net.num_vars,
(2*net.get_num_buses()-net.get_num_slack_buses() +
net.get_num_tap_changers() +
net.get_num_phase_shifters())*self.T)
# Zero ratings
for br in net.branches:
if br.ratingA == 0.:
br.ratingA = 100.
x0 = net.get_var_values()
self.assertTrue(type(x0) is np.ndarray)
self.assertTupleEqual(x0.shape,(net.num_vars,))
# Constraint
constr = pf.Constraint('linearized AC branch flow limits',net)
self.assertEqual(constr.name,'linearized AC branch flow limits')
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# Before
self.assertEqual(constr.num_extra_vars,0)
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,0))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(0,0))
self.assertEqual(G.nnz,0)
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(0,))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(0,))
self.assertEqual(constr.J_row,0)
self.assertEqual(constr.A_row,0)
self.assertEqual(constr.G_row,0)
self.assertEqual(constr.J_nnz,0)
self.assertEqual(constr.A_nnz,0)
self.assertEqual(constr.G_nnz,0)
self.assertEqual(constr.num_extra_vars,0)
# Tap ratios and phase shifts
if net.get_num_tap_changers()+net.get_num_phase_shifters() > 0:
self.assertRaises(pf.ConstraintError,constr.analyze)
constr.clear_error()
continue
# No voltage magnitude bounds
self.assertRaises(pf.ConstraintError,constr.analyze)
self.assertRaisesRegexp(pf.ConstraintError,
"AC_LIN_FLOW_LIM constraint requires variable voltage magnitudes to be bounded",
constr.analyze)
constr.clear_error()
net.set_flags('bus',
'bounded',
'any',
'voltage magnitude')
self.assertEqual(net.num_bounded,net.num_buses*self.T)
constr.analyze()
self.assertGreaterEqual(constr.G_nnz,constr.G_row)
f = constr.f
J = constr.J
A = constr.A
b = constr.b
G = constr.G
l = constr.l
u = constr.u
# After analyze
self.assertEqual(constr.num_extra_vars,0)
self.assertTrue(type(f) is np.ndarray)
self.assertTupleEqual(f.shape,(0,))
self.assertTrue(type(b) is np.ndarray)
self.assertTupleEqual(b.shape,(0,))
self.assertTrue(type(J) is coo_matrix)
self.assertTupleEqual(J.shape,(0,net.num_vars))
self.assertEqual(J.nnz,0)
self.assertTrue(type(A) is coo_matrix)
self.assertTupleEqual(A.shape,(0,net.num_vars))
self.assertEqual(A.nnz,0)
self.assertTrue(type(G) is coo_matrix)
self.assertTupleEqual(G.shape,(constr.G_row,net.num_vars))
self.assertFalse(np.any(np.isnan(G.data)))
self.assertTrue(type(u) is np.ndarray)
self.assertTupleEqual(u.shape,(constr.G_row,))
self.assertFalse(np.any(np.isnan(u)))
self.assertTrue(type(l) is np.ndarray)
self.assertTupleEqual(l.shape,(constr.G_row,))
self.assertTrue(np.all(l == -1e8))
# with outages
for br in net.branches:
br.in_service = False
constr.analyze()
constr.eval(net.get_var_values())
self.assertEqual(constr.A.nnz, 0)
self.assertEqual(constr.A.shape[0], 0)
self.assertEqual(constr.b.size, 0)
self.assertTrue(np.all(constr.b == 0))
self.assertEqual(constr.J.nnz, 0)
self.assertEqual(constr.J.shape[0], 0)
self.assertEqual(constr.f.size, 0)
self.assertTrue(np.all(constr.f == 0.))
self.assertTrue(np.all(constr.J.data == 0.))
self.assertEqual(constr.G.nnz, 0)
self.assertEqual(constr.G.shape[0], 0)
self.assertEqual(constr.u.size, 0)
self.assertEqual(constr.l.size, 0)
self.assertEqual(constr.H_combined.nnz, 0)
self.assertTrue(np.all(constr.H_nnz == 0), 0)
def test_nonlinear_constr_creation(self):
# Single period
for case in test_cases.CASES:
net = pf.Parser(case).parse(case)
self.assertEqual(net.num_periods,1)
constr = pf.Constraint("variable fixing",net)
# J row
self.assertEqual(constr.J_row,0)
constr.J_row = 19
self.assertEqual(constr.J_row,19)
# J_nnz
self.assertEqual(constr.J_nnz,0)
constr.J_nnz = 17
self.assertEqual(constr.J_nnz,17)
# f
f = constr.f
self.assertEqual(f.size,0)
a = np.random.randn(15)
constr.set_f(a)
self.assertEqual(constr.f.size,15)
self.assertTrue(np.all(constr.f == a))
# J
J = constr.J
self.assertTupleEqual(J.shape,(0,0))
self.assertEqual(J.nnz,0)
Jm = coo_matrix(np.random.randn(4,3))
constr.set_J(Jm)
self.assertTrue(isinstance(constr.J,coo_matrix))
self.assertTupleEqual(constr.J.shape,Jm.shape)
self.assertTrue(np.all(constr.J.row == Jm.row))
self.assertEqual(constr.J.nnz,Jm.nnz)
self.assertTrue(np.all(constr.J.col == Jm.col))
self.assertTrue(np.all(constr.J.data == Jm.data))
# H array
self.assertEqual(constr.H_array_size,0)
constr.allocate_H_array(100)
self.assertEqual(constr.H_array_size,100)
# H single
H = constr.get_H_single(5)
self.assertTrue(isinstance(H,coo_matrix))
self.assertEqual(H.nnz,0)
self.assertTupleEqual(H.shape,(0,0))
A = coo_matrix(np.random.randn(5,4))
constr.set_H_single(5,A)
H = constr.get_H_single(5)
self.assertTrue(isinstance(H,coo_matrix))
self.assertTupleEqual(A.shape,H.shape)
self.assertTrue(np.all(A.row == H.row))
self.assertEqual(A.nnz,H.nnz)
self.assertTrue(np.all(A.col == H.col))
self.assertTrue(np.all(A.data == H.data))
# H_nnz
constr.set_H_nnz(np.zeros(50,dtype='int32'))
H_nnz = constr.H_nnz
self.assertTrue(isinstance(H_nnz,np.ndarray))
self.assertEqual(H_nnz.dtype,np.dtype('int32'))
self.assertEqual(H_nnz.size,50)
for i in range(50):
self.assertEqual(H_nnz[i],0)
constr.H_nnz[10] = 2
self.assertEqual(H_nnz[10],2)
def test_robustness_with_outages(self):
for case in test_cases.CASES:
net = pf.Parser(case).parse(case, self.T)
constraints = [pf.Constraint('variable bounds', net),
pf.Constraint('variable fixing', net),
pf.Constraint('battery dynamics', net),
pf.Constraint('generator active power participation', net),
pf.Constraint('PVPQ switching', net),
pf.Constraint('AC power balance', net), # nonlinear
pf.Constraint('DC power balance', net),
pf.Constraint('linearized AC power balance', net),
pf.Constraint('voltage set point regulation', net), # nonlinear
pf.Constraint('voltage regulation by transformers', net), # nonlinear
pf.Constraint('voltage regulation by shunts', net), # nonlinear
pf.Constraint('AC branch flow limits', net), # nolinear
pf.Constraint('DC branch flow limits', net),
pf.Constraint('generator ramp limits', net),
pf.Constraint('load constant power factor', net)]
# Add variables
net.set_flags('bus',
'variable',
'any',
['voltage magnitude','voltage angle'])
net.set_flags('generator',
'variable',
'any',
['active power','reactive power'])
net.set_flags('branch',
'variable',
'tap changer',
'tap ratio')
net.set_flags('branch',
'variable',
'phase shifter',
'phase shift')
net.set_flags('shunt',
'variable',
'switching - v',
'susceptance')
net.set_flags('battery',
'variable',
'any',
['charging power','energy level'])
self.assertEqual(net.num_vars,
(2*net.num_buses +
2*net.num_generators +
net.get_num_tap_changers()+
net.get_num_phase_shifters()+
net.get_num_switched_v_shunts()+
3*net.num_batteries)*self.T)
x0 = net.get_var_values()
net.make_all_in_service()
# Analyze without outages
for c in constraints:
c.analyze()
# Eval without outages
for c in constraints:
self.assertEqual(c.state_tag, net.state_tag)
c.eval(x0)
for gen in net.generators:
gen.in_service = False
for branch in net.branches:
branch.in_service = False
# Eval with outages
for c in constraints:
self.assertNotEqual(c.state_tag, net.state_tag)
self.assertRaises(pf.ConstraintError,
c.eval,
x0)
# Analyze with outages
for c in constraints:
c.analyze()
# Eval with outages
for c in constraints:
self.assertEqual(c.state_tag, net.state_tag)
c.eval(x0)
net.make_all_in_service()
# Eval without outages
for c in constraints:
self.assertNotEqual(c.state_tag, net.state_tag)
self.assertRaises(pf.ConstraintError,
c.eval,
x0)
def tearDown(self):
pass
| 43.84967 | 144 | 0.482273 |
36c4569159ee9d9d0f00b3844753d392f8bbe571 | 1,822 | py | Python | python/general-python/agol-helper/community.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
] | 272 | 2015-02-11T16:26:39.000Z | 2022-03-31T08:47:33.000Z | python/general-python/agol-helper/community.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
] | 254 | 2015-02-11T01:12:35.000Z | 2021-04-22T22:14:20.000Z | python/general-python/agol-helper/community.py | claudeshyaka-esri/developer-support | 016940d74f92a78f362900ab5329aa88c27d0a43 | [
"Apache-2.0"
] | 211 | 2015-02-10T00:09:07.000Z | 2022-02-24T12:27:40.000Z | from agol import AGOL
class community(AGOL):
"""
Community object that contains operations related to users and groups, \
and inherits properties from the AGOL object.
"""
def groupSearch(self):
"""
The Group Search operation searches for groups in the portal:
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#//02r3000000m1000000
"""
url = "http://{short}.maps.arcgis.com/sharing/rest/community/groups".format(short = self.short)
print(url)
data = {'token': self.token,
'f': 'json',
'num': 15,
'q': 'orgid:{orgID}'.format(orgID = self.orgID)}
jsonResponse = self.sendRequest(url, data)
return jsonResponse
def userSearch(self):
"""
The User Search operation searches for users in the portal.
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/User_Search/02r3000000m6000000/
"""
url = "http://{short}.maps.arcgis.com/sharing/rest/community/users".format(short = self.short)
data = {'token': self.token,
'f': 'json',
'num': 15,
'q': 'orgid:{orgID}'.format(orgID = self.orgID)}
jsonResponse = self.sendRequest(url, data)
return jsonResponse
def communitySelf(self):
"""
This resource allows discovery of the current authenticated user identified by the token.
http://resources.arcgis.com/en/help/arcgis-rest-api/index.html#/Self/02r300000079000000/
"""
url = "http://{short}.maps.arcgis.com/sharing/rest/community/self".format(short = self.short)
data = {'token': self.token,
'f': 'json'}
jsonResponse = self.sendRequest(url, data)
return jsonResponse
| 39.608696 | 103 | 0.603183 |
b5e256209acf7db9e6256aa4236d2076146e516b | 1,922 | py | Python | trust-devices.py | slayerizer/intrusion-detection | 8515b1d86eae6bf7fbce444d2ec7bec97e209a3a | [
"MIT"
] | 42 | 2019-03-17T00:57:48.000Z | 2020-06-21T18:27:13.000Z | trust-devices.py | LRA-QC/intrusion-detection | 8515b1d86eae6bf7fbce444d2ec7bec97e209a3a | [
"MIT"
] | 1 | 2019-03-17T15:10:21.000Z | 2019-04-01T18:17:40.000Z | trust-devices.py | LRA-QC/intrusion-detection | 8515b1d86eae6bf7fbce444d2ec7bec97e209a3a | [
"MIT"
] | 4 | 2019-03-17T05:05:07.000Z | 2019-03-19T14:15:59.000Z | #!/usr/bin/python3
"""Device intrusion script
This script will import devices listed in the second argumment into the database specified in the first argument.
The detect.py script will detect devices in your network and generate a list of devices in detects in devices.mac.
You can modify it if needed and import it with this script.
Feel free to make adjustment to the list before importing it.
example:
sudo ./trust-devices.py data.db devices.mac
To flush the whitelist table and startover : specify --flush at the end
example:
sudo ./trust-devices.py data.db devices.mac --flush
Author: Luc Raymond lucraymond@gmail.com
License: MIT
Requirements : nmap and python
"""
import os
import subprocess
import xml.etree.ElementTree as ET
import re
import datetime
import sys
import sqlite3
def unlink(filename):
if os.path.isfile(filename):
os.remove(filename)
return
if len(sys.argv) < 3:
print("Syntax\n\t./trust.py <database> <devices.mac> [--flush]")
else:
conn = sqlite3.connect(sys.argv[1])
if conn:
if len(sys.argv) == 4:
if sys.argv[3]=="--flush":
print( "- Flushing whitelist")
conn.execute('drop table whitelist;')
conn.commit()
conn.execute('vacuum;')
print( "- Creating table (if needed)")
conn.execute('CREATE TABLE IF NOT EXISTS whitelist (mac text, description text, primary key (mac));')
f = open(sys.argv[2],"r")
if f:
print( "- processing whitelist")
for r in f:
r=r.strip()
m = re.split("\|", r)
if m:
sql="insert or ignore into whitelist values (\"%s\",\"%s\");" % (m[0],m[1])
conn.execute(sql)
f.close
conn.commit()
conn.close()
else:
print("Error creating/accessing the database") | 29.569231 | 115 | 0.613944 |
1dfcdac6efd0866fc0fa85eb1ba67a3fb82acbe3 | 1,900 | py | Python | tests/test_service_customer_groups.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | null | null | null | tests/test_service_customer_groups.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | 1 | 2019-07-15T07:27:06.000Z | 2019-07-15T07:27:06.000Z | tests/test_service_customer_groups.py | jeroenubbink/commercetools-python-sdk | ee27768d6fdde3e12618059891d1d4f75dd61390 | [
"MIT"
] | null | null | null | from commercetools import types
def test_get_by_id(client):
customer_group = client.customer_groups.create(
draft=types.CustomerGroupDraft(group_name="test customer group")
)
assert customer_group.id
customer_group = client.customer_groups.get_by_id(customer_group.id)
assert customer_group
def test_get_by_key(client):
customer_group = client.customer_groups.create(
draft=types.CustomerGroupDraft(
group_name="test customer group", key="test-customer-group"
)
)
assert customer_group.key
customer_group = client.customer_groups.get_by_key(customer_group.key)
assert customer_group
def test_query(client):
client.customer_groups.create(
draft=types.CustomerGroupDraft(key="test-customer-group-1")
)
client.customer_groups.create(
draft=types.CustomerGroupDraft(key="test-customer-group-2")
)
result = client.customer_groups.query(sort="id asc", limit=10)
assert len(result.results) == 2
assert result.total == 2
result = client.customer_groups.query(sort=["id asc", "name asc"], limit=1)
assert len(result.results) == 1
assert result.total == 2
def test_delete_by_id(client):
customer_group = client.customer_groups.create(
draft=types.CustomerGroupDraft(
group_name="test customer group", key="test-customer-group"
)
)
assert customer_group.id
assert client.customer_groups.delete_by_id(
customer_group.id, version=customer_group.version
)
def test_delete_by_key(client):
customer_group = client.customer_groups.create(
draft=types.CustomerGroupDraft(
group_name="test customer group", key="test-customer-group"
)
)
assert customer_group.key
assert client.customer_groups.delete_by_key(
customer_group.key, version=customer_group.version
)
| 29.230769 | 79 | 0.708947 |
0bea2bdcef55ebbbaebac304e62a16ae84fde0ad | 13,221 | py | Python | gsxws/repairs.py | wgoldm/py-gsxws | 213979f2c3d52c9b4f6a55f6848edc1941db6093 | [
"BSD-2-Clause"
] | null | null | null | gsxws/repairs.py | wgoldm/py-gsxws | 213979f2c3d52c9b4f6a55f6848edc1941db6093 | [
"BSD-2-Clause"
] | null | null | null | gsxws/repairs.py | wgoldm/py-gsxws | 213979f2c3d52c9b4f6a55f6848edc1941db6093 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"gsxws/repairs.py"
import sys
import logging
from core import GsxObject, GsxError, validate
from lookups import Lookup
REPAIR_TYPES = (
('CA', 'Carry-In/Non-Replinished'),
('NE', 'Return Before Replace'),
('NT', 'No Trouble Found'),
('ON', 'Onsite (Indirect/Direct)'),
('RR', 'Repair Or Replace/Whole Unit Mail-In'),
('WH', 'Mail-In'),
)
REPAIR_STATUSES = (
('AWTP', 'Awaiting Parts'),
('AWTR', 'Parts Allocated'),
('BEGR', 'In Repair'),
('RFPU', 'Ready for Pickup'),
)
COVERAGE_STATUSES = (
('CC', 'Custom Bid Contracts'),
('CS', 'Customer Satisfaction'),
('DO', 'DOA Coverage'),
('LI', 'Apple Limited Warranty'),
('MU', 'Missing Upon First Use'),
('OO', 'Out of Warranty (no coverage)'),
('PA', 'AppleCare Parts Agreement'),
('PP', 'AppleCare Protection Plan'),
('QP', 'Quality Program'),
('RA', 'AppleCare Repair Agreement'),
('RE', 'Repeat Service'),
('G9', 'Pending Coverage Check'),
('TC', 'Edu/Govt Warranty (Australia)'),
('PT', 'Additional Part Coverage'),
('EC', 'Additional Service Coverage'),
('C1', 'NEW - AppleCare Protection Plan'),
('VW', 'Consumer Law Coverage'),
)
class SymptomIssue(GsxObject):
"""
The Reported Symptom/Issue API allows partners to fetch the information
related to symptoms and issues. If all the validations go through,
api returns a list of valid symptoms/issues according to the input data.
Otherwise api returns appropriate error message.
"""
_namespace = "asp:"
def fetch(self):
result = []
self._submit("requestData", "ReportedSymptomIssue",
"ReportedSymptomIssueResponse")
r = self._req.objects.reportedSymptomIssueResponse
# This may sometimes come back empty...
if r is None:
raise GsxError('Symptom/Issue code search failed')
if r.symptoms is not None:
for s in r.symptoms:
result.append((s.reportedSymptomCode, s.reportedSymptomDesc,))
if r.issues is not None:
for s in r.issues:
result.append((s.reportedIssueCode, s.reportedIssueDesc,))
return result
class CompTiaCode(GsxObject):
"""
Data type used to provide comptia codes
"""
comptiaCode = ""
comptiaModifier = ""
comptiaGroup = ""
technicianNote = ""
class Customer(GsxObject):
"""
Customer address for GSX
>>> Customer(adressLine1='blaa')._data
{'adressLine1': 'blaa'}
"""
city = ""
region = ""
country = ""
state = "ZZ"
zipCode = ""
lastName = ""
firstName = ""
adressLine1 = ""
emailAddress = ""
primaryPhone = ""
class RepairOrderLine(GsxObject):
partNumber = ""
comptiaCode = ""
comptiaModifier = ""
class ComponentCheck(GsxObject):
component = ""
serialNumber = ""
class ServicePart(GsxObject):
"A generic service part (for PartInfo and whatnot)"
def __init__(self, number, *args, **kwargs):
super(ServicePart, self).__init__(*args, **kwargs)
if not validate(number, "partNumber"):
raise ValueError("Invalid part number: %s" % number)
self.partNumber = number
class Repair(GsxObject):
"Base class for the different GSX Repair types"
def __init__(self, number=None, **kwargs):
self._namespace = "asp:"
super(Repair, self).__init__(**kwargs)
if number is not None:
self.dispatchId = number
def update_sn(self, parts):
"""
Description
The Update Serial Number API allows the service providers to update
the module serial numbers.
Context:
The API is not applicable for whole unit replacement
serial number entry (see KGB serial update).
>>> Repair('G135762375').update_sn(ServicePart('661-4964', oldSerialNumber='W882300FK22YA'))
Traceback (most recent call last):
...
GsxError: This repair cannot be updated.
"""
self.partInfo = parts
if hasattr(self, "dispatchId"):
self.repairConfirmationNumber = self.dispatchId
del self._data['dispatchId']
return self._submit("repairData", "UpdateSerialNumber", "repairConfirmation")
def update_kgb_sn(self, sn):
"""
Description:
The KGB Serial Number Update API is always to be used on
whole unit repairs that are in a released state.
This API allows users to provide the KGB serial number for the
whole unit exchange repairs. It also checks for the privilege
to create/ update whole unit exchange repairs
before updating the whole unit exchange repair.
Context:
The API is to be used on whole unit repairs that are in a released state.
This API can be invoked only after carry-in repair creation API.
"""
self.serialNumber = sn
if hasattr(self, "dispatchId"):
self.repairConfirmationNumber = self.dispatchId
del self._data['dispatchId']
return self._submit("UpdateKGBSerialNumberRequest",
"UpdateKGBSerialNumber",
"UpdateKGBSerialNumberResponse")
def lookup(self):
"""
Description:
The Repair Lookup API mimics the front-end repair search functionality.
It fetches up to 2500 repairs in a given criteria.
Subsequently, the extended Repair Status API can be used
to retrieve more details of the repair.
>>> Repair(repairStatus='Open').lookup() #doctest: +ELLIPSIS
{'customerName': 'Lepalaan,Filipp',...
"""
self._namespace = "core:"
return Lookup(**self._data).repairs()
def delete(self):
"""
The Delete Repair API allows the service providers to delete
the existing GSX Initiated Carry-In, Return Before Replace & Onsite repairs
which are in Declined-Rejected By TSPS Approver state,
that do not have an active repair id.
"""
pass
def mark_complete(self, numbers=None):
"""
The Mark Repair Complete API allows a single or an array of
repair confirmation numbers to be submitted to GSX to be marked as complete.
"""
self.repairConfirmationNumbers = numbers or self.dispatchId
return self._submit("MarkRepairCompleteRequest",
"MarkRepairComplete",
"MarkRepairCompleteResponse")
def status(self, numbers=None):
"""
The Repair Status API retrieves the status
for the submitted repair confirmation number(s).
>>> Repair('G135773004').status().repairStatus
u'Closed and Completed'
"""
self.repairConfirmationNumbers = self.dispatchId
status = self._submit("RepairStatusRequest", "RepairStatus", "repairStatus")
self.repairStatus = status.repairStatus
self._status = status
return status
def details(self):
"""
The Repair Details API includes the shipment information
similar to the Repair Lookup API.
>>> Repair('G135773004').details() #doctest: +ELLIPSIS
{'isACPlusConsumed': 'N', 'configuration': 'IPAD 3RD GEN,WIFI+CELLULAR,16GB,BLACK',...
"""
self._namespace = "core:"
details = self._submit("RepairDetailsRequest", "RepairDetails", "lookupResponseData")
# fix tracking URL, if available
for i, p in enumerate(details.partsInfo):
try:
url = p.carrierURL.replace('<<TRKNO>>', str(p.deliveryTrackingNumber))
details.partsInfo[i].carrierURL = url
except AttributeError:
pass
self._details = details
return details
class CannotDuplicateRepair(Repair):
"""
The Create CND Repair API allows Service Providers to create a repair
whenever the reported issue cannot be duplicated, and the repair
requires no parts replacement.
N01 Unable to Replicate
N02 Software Update/Issue
N03 Cable/Component Reseat
N05 SMC Reset
N06 PRAM Reset
N07 Third Party Part
N99 Other
"""
class CarryInRepair(Repair):
"""
GSX validates the information and if all of the validations go through,
it obtains a quote for the repair and creates the carry-in repair
>>> CarryInRepair(requestReviewByApple=True).requestReviewByApple
'Y'
"""
def create(self):
"""
GSX validates the information and if all of the validations go through,
it obtains a quote for the repair and creates the carry-in repair.
"""
self._namespace = "emea:"
result = self._submit("repairData", "CreateCarryIn", "repairConfirmation")
if hasattr(result, 'repairConfirmation'):
if hasattr(result.repairConfirmation, 'messages'):
raise GsxError(result.repairConfirmation.messages)
self.dispatchId = result.confirmationNumber
return result
def update(self, newdata):
"""
The Update Carry-In Repair API allows the service providers
to update the existing open carry-in repairs.
This API assists in addition/deletion of parts and addition of notes
to a repair. On successful update, the repair confirmation number and
quote for any newly added parts would be returned.
In case of any validation error or unsuccessful update, a fault code is issued.
"""
self._namespace = "asp:"
if not hasattr(self, "repairConfirmationNumber"):
self.repairConfirmationNumber = self.dispatchId
del self._data['dispatchId']
# Merge old and new data (old data should have Dispatch ID)
self._data.update(newdata)
return self._submit("repairData", "UpdateCarryIn", "repairConfirmation")
def set_techid(self, new_techid):
return self.update({'technicianId': new_techid})
def set_status(self, new_status):
return self.update({'statusCode': new_status})
class IndirectOnsiteRepair(Repair):
"""
The Create Indirect Onsite Repair API is designed to create the indirect onsite repairs.
When a service provider travels to the customer location to perform repair
on a unit eligible for onsite service, they create an indirect repair.
Once the repair is submitted, it is assigned a confirmation number,
which is a reference number to identify the repair.
"""
def create(self):
self._namespace = "asp:"
if hasattr(self, "shipTo"): # Carry-In and OnSite use different field names!
self.shippingLocation = self.shipTo
del(self._data['shipTo'])
if hasattr(self, "poNumber"):
self.purchaseOrderNumber = self.poNumber
del(self._data['poNumber'])
if hasattr(self, "diagnosedByTechId"):
self.technicianName = self.diagnosedByTechId
del(self._data['diagnosedByTechId'])
if hasattr(self, "requestReviewByApple"):
self.requestReview = self.requestReviewByApple
del(self._data['requestReviewByApple'])
return self._submit("repairData", "CreateIndirectOnsiteRepair",
"repairConfirmation")
class RepairOrReplace(Repair):
"""
The Create Repair or Replace API allows users to submit Repair-Or-Replace information to GSX.
The submissions creates a GSX RoR Repair in the system.
"""
COVERAGE_OPTIONS = (
('N', 'No Damage'),
('A1', 'Battery Service'),
('A2', 'Returnable Damage'),
('A3', 'Non-returnable Damage'),
('X', 'Non-returnable damage covered by AppleCare+'),
('RPL', 'Replace'),
('Z', 'Override to Out of Warranty when part is covered by Variable Warranty'),
)
def create(self):
self._namespace = "asp:"
return self._submit("repairData", "CreateRepairOrReplace", "repairConfirmation")
class WholeUnitExchange(Repair):
"""
The Create Whole Unit Exchange API allows the service providers to send
all the information required to create a whole unit exchange repair.
GSX validates the information and if all the validations go through,
it obtains a quote for repair and creates the whole unit exchange repair.
The quote is sent as part of the response.
If a validation error occurs, a fault code is issued.
"""
def create(self):
self._namespace = "asp:"
return self._submit("repairData", "CreateWholeUnitExchange", "repairConfirmation")
class MailInRepair(Repair):
"""
This API allows the submission of Mail-In Repair information into GSX,
resulting in the creation of a GSX Mail-In Repair.
"""
def create(self):
self._namespace = "asp:"
return self._submit("repairData", "CreateMailInRepair", "repairConfirmation")
if __name__ == '__main__':
import doctest
from core import connect
logging.basicConfig(level=logging.DEBUG)
connect(*sys.argv[1:])
doctest.testmod()
| 33.386364 | 100 | 0.635504 |
912b489455f4f7c3389b7e1ad86969de022da6ea | 5,654 | py | Python | dentalapp-backend/dentalapp/restapi/serializers.py | PavelescuVictor/DentalApplicationReact | e35e8f04c39fc52bb49d99ffd573c5ab8cc05316 | [
"MIT"
] | null | null | null | dentalapp-backend/dentalapp/restapi/serializers.py | PavelescuVictor/DentalApplicationReact | e35e8f04c39fc52bb49d99ffd573c5ab8cc05316 | [
"MIT"
] | null | null | null | dentalapp-backend/dentalapp/restapi/serializers.py | PavelescuVictor/DentalApplicationReact | e35e8f04c39fc52bb49d99ffd573c5ab8cc05316 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator, UniqueValidator
from .models import Doctor, DoctorDetails, Patient, PatientDetails, Order, OrderType, OrderStatus, OrderTypeEntry, OrderColor
# ModelSerializer is from django rest framework
def validateNumber(number):
if(len(str(number)) != 10 or len(str(number)) != 6):
raise serializers.ValidationError(
"Cannot add Doctor. Reason: Phone number is not valid! Number format: xxxx xxx xxx).")
class DoctorSerializer(serializers.ModelSerializer):
class Meta:
model = Doctor
fields = [
'id', 'firstName', 'lastName', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
validators = [
UniqueTogetherValidator(
queryset=Doctor.objects.all(), fields=['firstName', 'lastName'], message="Cannot add doctor. Reason: Doctor already exists!"
)
]
class DoctorDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = DoctorDetails
fields = [
'id', 'doctorId', 'cabinet', 'phone', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
class PatientSerializer(serializers.ModelSerializer):
class Meta:
model = Patient
fields = [
'id', 'firstName', 'lastName', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
validators = [
UniqueTogetherValidator(
queryset=Patient.objects.all(), fields=['firstName', 'lastName'], message="Cannot add patient. Reason: Patient already exists!"
)
]
class PatientDetailsSerializer(serializers.ModelSerializer):
class Meta:
model = Patient
fields = [
'id', 'patientId', 'phone', 'details', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
class OrderSerializer(serializers.ModelSerializer):
doctorName = serializers.CharField(
source='doctor.__str__', read_only=True)
patientName = serializers.CharField(
source='patient.__str__', read_only=True)
createdByName = serializers.CharField(
source='createdBy.__str__', read_only=True
)
updatedByName = serializers.CharField(
source='updatedBy.__str__', read_only=True)
class Meta:
model = Order
fields = [
'id', 'doctor', 'doctorName', 'patient', 'patientName', 'redo', 'paid', 'createdBy', 'createdByName', 'updatedBy', 'updatedByName', 'createdAt', 'updatedAt'
]
class OrderTypeSerializer(serializers.ModelSerializer):
# Validating data
type = serializers.CharField(validators=[UniqueValidator(queryset=OrderType.objects.all(
), message="Cannot add order type. Reason: Order type already exists.")])
class Meta:
model = OrderType
fields = [
'id', 'type', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
class OrderStatusSerializer(serializers.ModelSerializer):
# Validating data
status = serializers.CharField(validators=[UniqueValidator(queryset=OrderStatus.objects.all(
), message="Cannot add order status. Reason: Order status already exists.")])
class Meta:
model = OrderStatus
fields = [
'id', 'status', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
class OrderColorSerializer(serializers.ModelSerializer):
# Validating data
color = serializers.CharField(validators=[UniqueValidator(queryset=OrderColor.objects.all(
), message="Cannot add order color. Reason: Order color already exists.")])
class Meta:
model = OrderStatus
fields = [
'id', 'color', 'createdBy', 'updatedBy', 'createdAt', 'updatedAt'
]
class OrderTypeEntrySerializer(serializers.ModelSerializer):
# Serializer for the OrderTypeEntry
typeName = serializers.CharField(
source='type.__str__', read_only=True
)
typePPU = serializers.CharField(
source='type.ppu', read_only=True
)
colorName = serializers.CharField(
source='color.__str__', read_only=True
)
statusName = serializers.CharField(
source='status.__str__', read_only=True
)
createdByName = serializers.CharField(
source='createdBy.__str__', read_only=True
)
updatedByName = serializers.CharField(
source='updatedBy.__str__', read_only=True)
class Meta:
model = OrderTypeEntry
fields = [
'id', 'order', 'color', 'colorName', 'type', 'typeName', 'typePPU', 'status', 'statusName', 'unitCount', 'warranty', 'createdBy', 'createdByName', 'updatedBy', 'updatedByName', 'createdAt', 'updatedAt'
]
extra_kwargs = {
'order': {
'read_only': False,
'required': True,
},
'color': {
'required': True,
},
'type': {
'required': True,
},
'status': {
'required': True,
},
'createdBy': {
'required': True,
},
'updatedBy': {
'required': True,
}
}
validators = [
UniqueTogetherValidator(
queryset=OrderTypeEntry.objects.all(), fields=['order', 'type'], message="Cannot add Type. Reason: Order type already exists."
)
]
| 31.764045 | 214 | 0.591086 |
29c8bfc8356b0582d204b0527dc79a540a9eaf70 | 8,103 | py | Python | run.py | obss/turkish-question-generation | 5440a5e4bc9b89c322ffb502fbce1b60b396f235 | [
"MIT"
] | 18 | 2021-11-18T16:24:37.000Z | 2022-03-08T11:31:32.000Z | run.py | obss/turkish-question-generation | 5440a5e4bc9b89c322ffb502fbce1b60b396f235 | [
"MIT"
] | null | null | null | run.py | obss/turkish-question-generation | 5440a5e4bc9b89c322ffb502fbce1b60b396f235 | [
"MIT"
] | null | null | null | import json
import logging
import os
from typing import Tuple
import torch
import transformers
from transformers import Trainer as HFTrainer
from transformers import set_seed
from transformers.hf_argparser import DataClass
from transformers.optimization import Adafactor, AdamW
from transformers.trainer import Trainer
from core.argument_parsers import parser
from core.collator import T2TDataCollator
from core.evaluate import evaluate_on_train_end
from hf.model import BertModel, MT5Model
from prepare_data import main as prepare_data
from utils.file import save_experiment_config
from utils.neptune import init_neptune, log_to_neptune
from utils.wandb import init_wandb, log_to_wandb
def setup_logger(args: DataClass) -> logging.Logger:
logger = logging.getLogger(__name__)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper() if args.local_rank in [-1, 0] else logging.WARN,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
args.local_rank,
args.device,
args.n_gpu,
bool(args.local_rank != -1),
args.fp16,
)
logger.info("Training/evaluation parameters %s", args)
return logger
def check_output(args: DataClass, logger: logging.Logger = None) -> None:
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome."
)
def load_datasets(args: DataClass, train: bool, eval: bool, logger: logging.Logger) -> Tuple:
logger.info("loading dataset")
train_dataset = torch.load(args.train_file_path) if train else None
valid_dataset = torch.load(args.valid_file_path) if eval else None
logger.info("finished loading dataset")
return train_dataset, valid_dataset
def main(args_file_path: str = None):
model_args, data_args, training_args = parser(args_file_path)
# check for output_dir with given arguments.
check_output(training_args)
logger = setup_logger(training_args)
# set seed
set_seed(training_args.seed)
# initialize experiment tracking
report_to = []
if training_args.do_train:
wandb_status, wandb = init_wandb(project=model_args.wandb_project, name=training_args.run_name)
else:
wandb_status, wandb = init_wandb(
project=model_args.wandb_project, name=training_args.run_name, id=model_args.wandb_id
)
neptune_status, neptune = init_neptune(
project=model_args.neptune_project, api_token=model_args.neptune_api_token, name=training_args.run_name
)
if wandb_status:
report_to.append("wandb")
if neptune_status:
report_to.append("neptune")
training_args.report_to = report_to
# disable wandb console logs
logging.getLogger("wandb.run_manager").setLevel(logging.WARNING)
# prepare data()
if data_args.prepare_data:
prepare_data(args_file_path)
# load model
if model_args.model_type == "mt5":
model = MT5Model(
model_name_or_path=model_args.model_name_or_path,
tokenizer_name_or_path=model_args.tokenizer_path,
freeze_embeddings=training_args.freeze_embeddings,
cache_dir=model_args.cache_dir,
use_cuda=True,
)
elif model_args.model_type == "bert":
model = BertModel(
model_name_or_path=model_args.model_name_or_path,
tokenizer_name_or_path=model_args.tokenizer_path,
freeze_embeddings=training_args.freeze_embeddings,
cache_dir=model_args.cache_dir,
use_cuda=True,
)
train_dataset, valid_dataset = load_datasets(
data_args, train=training_args.do_train, eval=training_args.do_eval, logger=logger
)
# set optimizer
if training_args.adafactor:
# as adviced in https://huggingface.co/transformers/main_classes/optimizer_schedules.html#adafactor-pytorch
optimizer = Adafactor(
model.model.parameters(),
scale_parameter=False,
relative_step=False,
warmup_init=False,
weight_decay=training_args.weight_decay,
lr=training_args.learning_rate,
)
else:
optimizer = AdamW(
model.model.parameters(), weight_decay=training_args.weight_decay, lr=training_args.learning_rate
)
if model_args.model_type == "mt5":
# initialize data_collator
data_collator = T2TDataCollator(
tokenizer=model.tokenizer, mode="training", using_tpu=training_args.tpu_num_cores is not None
)
# fix https://discuss.huggingface.co/t/mt5-fine-tuning-keyerror-source-ids/5257/2
training_args.remove_unused_columns = False if model_args.model_type == "mt5" else True
# export experiment config
save_experiment_config(model_args, data_args, training_args)
# start training
if training_args.do_train:
# init model
if model_args.model_type == "mt5":
trainer: Trainer = HFTrainer(
model=model.model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
data_collator=data_collator,
optimizers=(optimizer, None),
)
elif model_args.model_type == "bert":
trainer: Trainer = HFTrainer(
model=model.model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=valid_dataset,
optimizers=(optimizer, None),
)
# perform training
trainer.train(
resume_from_checkpoint=model_args.model_name_or_path
if os.path.isdir(model_args.model_name_or_path)
else None
)
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
model.tokenizer.save_pretrained(training_args.output_dir)
# start evaluation
if training_args.do_eval and training_args.local_rank in [-1, 0]:
# arange neptune/wandb loggers
if training_args.do_train:
for callback in trainer.callback_handler.callbacks:
if isinstance(callback, transformers.integrations.WandbCallback):
wandb = callback._wandb
for callback in trainer.callback_handler.callbacks:
if isinstance(callback, transformers.integrations.NeptuneCallback):
neptune_run = callback._neptune_run
if not training_args.do_train:
if "neptune" in report_to:
neptune_run = neptune.init(
project=os.getenv("NEPTUNE_PROJECT"),
api_token=os.getenv("NEPTUNE_API_TOKEN"),
mode=os.getenv("NEPTUNE_CONNECTION_MODE", "async"),
name=os.getenv("NEPTUNE_RUN_NAME", None),
run=model_args.neptune_run,
)
elif "wandb" in report_to:
wandb.init(project=model_args.wandb_project, name=model_args.run_name, id=model_args.wandb_id)
# calculate evaluation results
overall_results = evaluate_on_train_end(model_args, training_args)
# log to neptune/wandb
if "neptune" in report_to:
log_to_neptune(neptune_run, overall_results)
if "wandb" in report_to:
log_to_wandb(wandb, overall_results)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
def run_multi(args_dict):
with open("args.json", "w") as f:
json.dump(args_dict, f)
main(args_file="args.json")
if __name__ == "__main__":
main()
| 35.077922 | 124 | 0.664692 |
5a579657e4c6b84dc45ca13d243fbe40b50fad9b | 219 | py | Python | varnish/datadog_checks/varnish/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 4 | 2021-06-21T19:21:49.000Z | 2021-06-23T21:21:55.000Z | varnish/datadog_checks/varnish/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2018-08-15T05:50:17.000Z | 2018-08-15T05:50:17.000Z | varnish/datadog_checks/varnish/__init__.py | seants/integrations-core | 1e5548915fc24f1bbd095e845f0940c22992b09c | [
"BSD-3-Clause"
] | 1 | 2021-06-21T19:21:51.000Z | 2021-06-21T19:21:51.000Z | # (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .__about__ import __version__
from .varnish import Varnish
__all__ = [
"__version__",
"Varnish"
]
| 19.909091 | 59 | 0.716895 |
c798916c74cc81165a4b42925e89940f14b3cf1f | 750 | py | Python | setup.py | iWeeti/Lavalink.py | c47149c0f6dcdd11cd57ee10b6f92ef932c027ea | [
"MIT"
] | null | null | null | setup.py | iWeeti/Lavalink.py | c47149c0f6dcdd11cd57ee10b6f92ef932c027ea | [
"MIT"
] | null | null | null | setup.py | iWeeti/Lavalink.py | c47149c0f6dcdd11cd57ee10b6f92ef932c027ea | [
"MIT"
] | null | null | null | import re
from setuptools import setup
version = ''
with open('lavalink/__init__.py') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('version is not set')
setup(
name='lavalink',
packages=['lavalink'],
version=version,
description='A lavalink interface built for discord.py', # TODO: Change this if I successfully generic-ify the client
author='Devoxin',
author_email='luke@serux.pro',
url='https://github.com/Devoxin/Lavalink.py',
download_url='https://github.com/Devoxin/Lavalink.py/archive/{}.tar.gz'.format(version),
keywords=['lavalink'],
include_package_data=True,
install_requires=['aiohttp']
)
| 28.846154 | 122 | 0.676 |
87f9eaa209277c6258dd061de7281be8852ea386 | 6,924 | py | Python | pandas/tests/indexes/interval/test_interval_tree.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 4 | 2020-01-22T20:07:06.000Z | 2021-01-23T23:34:28.000Z | pandas/tests/indexes/interval/test_interval_tree.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 16 | 2021-03-19T09:44:52.000Z | 2022-03-12T00:22:14.000Z | pandas/tests/indexes/interval/test_interval_tree.py | LauraCollard/pandas | b1c3a9031569334cafc4e8d45d35408421f7dea4 | [
"BSD-3-Clause"
] | 9 | 2020-02-05T10:24:12.000Z | 2020-02-10T13:08:50.000Z | from itertools import permutations
import numpy as np
import pytest
from pandas._libs.interval import IntervalTree
from pandas import compat
import pandas.util.testing as tm
def skipif_32bit(param):
"""
Skip parameters in a parametrize on 32bit systems. Specifically used
here to skip leaf_size parameters related to GH 23440.
"""
marks = pytest.mark.skipif(
compat.is_platform_32bit(), reason="GH 23440: int type mismatch on 32bit"
)
return pytest.param(param, marks=marks)
@pytest.fixture(
scope="class", params=["int32", "int64", "float32", "float64", "uint64"]
)
def dtype(request):
return request.param
@pytest.fixture(params=[skipif_32bit(1), skipif_32bit(2), 10])
def leaf_size(request):
"""
Fixture to specify IntervalTree leaf_size parameter; to be used with the
tree fixture.
"""
return request.param
@pytest.fixture(
params=[
np.arange(5, dtype="int64"),
np.arange(5, dtype="int32"),
np.arange(5, dtype="uint64"),
np.arange(5, dtype="float64"),
np.arange(5, dtype="float32"),
np.array([0, 1, 2, 3, 4, np.nan], dtype="float64"),
np.array([0, 1, 2, 3, 4, np.nan], dtype="float32"),
]
)
def tree(request, leaf_size):
left = request.param
return IntervalTree(left, left + 2, leaf_size=leaf_size)
class TestIntervalTree:
def test_get_loc(self, tree):
result = tree.get_loc(1)
expected = np.array([0], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
result = np.sort(tree.get_loc(2))
expected = np.array([0, 1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(KeyError, match="-1"):
tree.get_loc(-1)
def test_get_indexer(self, tree):
result = tree.get_indexer(np.array([1.0, 5.5, 6.5]))
expected = np.array([0, 4, -1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(
KeyError, match="'indexer does not intersect a unique set of intervals'"
):
tree.get_indexer(np.array([3.0]))
def test_get_indexer_non_unique(self, tree):
indexer, missing = tree.get_indexer_non_unique(np.array([1.0, 2.0, 6.5]))
result = indexer[:1]
expected = np.array([0], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
result = np.sort(indexer[1:3])
expected = np.array([0, 1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
result = np.sort(indexer[3:])
expected = np.array([-1], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
result = missing
expected = np.array([2], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
def test_duplicates(self, dtype):
left = np.array([0, 0, 0], dtype=dtype)
tree = IntervalTree(left, left + 1)
result = np.sort(tree.get_loc(0.5))
expected = np.array([0, 1, 2], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
with pytest.raises(
KeyError, match="'indexer does not intersect a unique set of intervals'"
):
tree.get_indexer(np.array([0.5]))
indexer, missing = tree.get_indexer_non_unique(np.array([0.5]))
result = np.sort(indexer)
expected = np.array([0, 1, 2], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
result = missing
expected = np.array([], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
def test_get_loc_closed(self, closed):
tree = IntervalTree([0], [1], closed=closed)
for p, errors in [(0, tree.open_left), (1, tree.open_right)]:
if errors:
with pytest.raises(KeyError, match=str(p)):
tree.get_loc(p)
else:
result = tree.get_loc(p)
expected = np.array([0], dtype="intp")
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"leaf_size", [skipif_32bit(1), skipif_32bit(10), skipif_32bit(100), 10000]
)
def test_get_indexer_closed(self, closed, leaf_size):
x = np.arange(1000, dtype="float64")
found = x.astype("intp")
not_found = (-1 * np.ones(1000)).astype("intp")
tree = IntervalTree(x, x + 0.5, closed=closed, leaf_size=leaf_size)
tm.assert_numpy_array_equal(found, tree.get_indexer(x + 0.25))
expected = found if tree.closed_left else not_found
tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.0))
expected = found if tree.closed_right else not_found
tm.assert_numpy_array_equal(expected, tree.get_indexer(x + 0.5))
@pytest.mark.parametrize(
"left, right, expected",
[
(np.array([0, 1, 4]), np.array([2, 3, 5]), True),
(np.array([0, 1, 2]), np.array([5, 4, 3]), True),
(np.array([0, 1, np.nan]), np.array([5, 4, np.nan]), True),
(np.array([0, 2, 4]), np.array([1, 3, 5]), False),
(np.array([0, 2, np.nan]), np.array([1, 3, np.nan]), False),
],
)
@pytest.mark.parametrize("order", map(list, permutations(range(3))))
def test_is_overlapping(self, closed, order, left, right, expected):
# GH 23309
tree = IntervalTree(left[order], right[order], closed=closed)
result = tree.is_overlapping
assert result is expected
@pytest.mark.parametrize("order", map(list, permutations(range(3))))
def test_is_overlapping_endpoints(self, closed, order):
"""shared endpoints are marked as overlapping"""
# GH 23309
left, right = np.arange(3), np.arange(1, 4)
tree = IntervalTree(left[order], right[order], closed=closed)
result = tree.is_overlapping
expected = closed == "both"
assert result is expected
@pytest.mark.parametrize(
"left, right",
[
(np.array([], dtype="int64"), np.array([], dtype="int64")),
(np.array([0], dtype="int64"), np.array([1], dtype="int64")),
(np.array([np.nan]), np.array([np.nan])),
(np.array([np.nan] * 3), np.array([np.nan] * 3)),
],
)
def test_is_overlapping_trivial(self, closed, left, right):
# GH 23309
tree = IntervalTree(left, right, closed=closed)
assert tree.is_overlapping is False
@pytest.mark.skipif(compat.is_platform_32bit(), reason="GH 23440")
def test_construction_overflow(self):
# GH 25485
left, right = np.arange(101), [np.iinfo(np.int64).max] * 101
tree = IntervalTree(left, right)
# pivot should be average of left/right medians
result = tree.root.pivot
expected = (50 + np.iinfo(np.int64).max) / 2
assert result == expected
| 34.969697 | 84 | 0.606153 |
c5e6755b58433628106637fb59005cce58d6962c | 16,481 | py | Python | gammapy/datasets/evaluator.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | null | null | null | gammapy/datasets/evaluator.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 1 | 2017-07-04T15:03:24.000Z | 2017-09-11T08:44:14.000Z | gammapy/datasets/evaluator.py | AtreyeeS/gammapy | a3b47c3da08900a833f0360e0374203e054cadfc | [
"BSD-3-Clause"
] | 1 | 2022-03-05T15:56:38.000Z | 2022-03-05T15:56:38.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import logging
import numpy as np
import astropy.units as u
from astropy.utils import lazyproperty
from astropy.coordinates.angle_utilities import angular_separation
from regions import CircleSkyRegion
from gammapy.maps import Map
from gammapy.modeling.models import (
TemplateNPredModel,
PointSpatialModel,
)
PSF_CONTAINMENT = 0.999
CUTOUT_MARGIN = 0.1 * u.deg
log = logging.getLogger(__name__)
class MapEvaluator:
"""Sky model evaluation on maps.
This evaluates a sky model on a 3D map and convolves with the IRFs,
and returns a map of the predicted counts.
Note that background counts are not added.
For now, we only make it work for 3D WCS maps with an energy axis.
No HPX, no other axes, those can be added later here or via new
separate model evaluator classes.
Parameters
----------
model : `~gammapy.modeling.models.SkyModel`
Sky model
exposure : `~gammapy.maps.Map`
Exposure map
psf : `~gammapy.irf.PSFKernel`
PSF kernel
edisp : `~gammapy.irf.EDispKernel`
Energy dispersion
mask : `~gammapy.maps.Map`
Mask to apply to the likelihood for fitting.
gti : `~gammapy.data.GTI`
GTI of the observation or union of GTI if it is a stacked observation
evaluation_mode : {"local", "global"}
Model evaluation mode.
The "local" mode evaluates the model components on smaller grids to save computation time.
This mode is recommended for local optimization algorithms.
The "global" evaluation mode evaluates the model components on the full map.
This mode is recommended for global optimization algorithms.
use_cache : bool
Use npred caching.
"""
def __init__(
self,
model,
exposure=None,
psf=None,
edisp=None,
gti=None,
mask=None,
evaluation_mode="local",
use_cache=True,
):
self.model = model
self.exposure = exposure
self.psf = psf
self.edisp = edisp
self.mask = mask
self.gti = gti
self.use_cache = use_cache
self._init_position = None
self.contributes = True
self.psf_containment = None
if evaluation_mode not in {"local", "global"}:
raise ValueError(f"Invalid evaluation_mode: {evaluation_mode!r}")
self.evaluation_mode = evaluation_mode
# TODO: this is preliminary solution until we have further unified the model handling
if (
isinstance(self.model, TemplateNPredModel)
or self.model.spatial_model is None
or self.model.evaluation_radius is None
):
self.evaluation_mode = "global"
# define cached computations
self._cached_parameter_values = None
self._cached_parameter_values_previous = None
self._cached_parameter_values_spatial = None
self._cached_position = (0, 0)
self._computation_cache = None
self._neval = 0 # for debugging
self._renorm = 1
self._spatial_oversampling_factor = 1
if self.exposure is not None:
if not self.geom.is_region or self.geom.region is not None:
self.update_spatial_oversampling_factor(self.geom)
def reset_cache_properties(self):
"""Reset cached properties."""
del self._compute_npred
del self._compute_flux_spatial
@property
def geom(self):
"""True energy map geometry (`~gammapy.maps.Geom`)"""
return self.exposure.geom
@property
def needs_update(self):
"""Check whether the model component has drifted away from its support."""
# TODO: simplify and clean up
if isinstance(self.model, TemplateNPredModel):
return False
elif self.exposure is None:
return True
elif self.geom.is_region:
return False
elif self.evaluation_mode == "global" or self.model.evaluation_radius is None:
return False
elif not self.parameters_spatial_changed(reset=False):
return False
else:
return self.irf_position_changed
@property
def psf_width(self):
"""Width of the PSF"""
if self.psf is not None:
psf_width = np.max(self.psf.psf_kernel_map.geom.width)
else:
psf_width = 0 * u.deg
return psf_width
def use_psf_containment(self, geom):
"""Use psf containment for point sources and circular regions"""
if not geom.is_region:
return False
is_point_model = isinstance(self.model.spatial_model, PointSpatialModel)
is_circle_region = isinstance(geom.region, CircleSkyRegion)
return is_point_model & is_circle_region
@property
def cutout_width(self):
"""Cutout width for the model component"""
return self.psf_width + 2 * (self.model.evaluation_radius + CUTOUT_MARGIN)
def update(self, exposure, psf, edisp, geom, mask):
"""Update MapEvaluator, based on the current position of the model component.
Parameters
----------
exposure : `~gammapy.maps.Map`
Exposure map.
psf : `gammapy.irf.PSFMap`
PSF map.
edisp : `gammapy.irf.EDispMap`
Edisp map.
geom : `WcsGeom`
Counts geom
mask : `~gammapy.maps.Map`
Mask to apply to the likelihood for fitting.
"""
# TODO: simplify and clean up
log.debug("Updating model evaluator")
# lookup edisp
if edisp:
energy_axis = geom.axes["energy"]
self.edisp = edisp.get_edisp_kernel(
self.model.position, energy_axis=energy_axis
)
# lookup psf
if psf and self.model.spatial_model:
if self.apply_psf_after_edisp:
geom = geom.as_energy_true
else:
geom = exposure.geom
if self.use_psf_containment(geom=geom):
energy_true = geom.axes["energy_true"].center.reshape((-1, 1, 1))
self.psf_containment = psf.containment(
energy_true=energy_true, rad=geom.region.radius
)
else:
if geom.is_region or geom.is_hpx:
geom = geom.to_wcs_geom()
self.psf = psf.get_psf_kernel(
position=self.model.position, geom=geom, containment=PSF_CONTAINMENT
)
if self.evaluation_mode == "local":
self.contributes = self.model.contributes(mask=mask, margin=self.psf_width)
if self.contributes:
self.exposure = exposure.cutout(
position=self.model.position, width=self.cutout_width, odd_npix=True
)
else:
self.exposure = exposure
if self.contributes:
if not self.geom.is_region or self.geom.region is not None:
self.update_spatial_oversampling_factor(self.geom)
self.reset_cache_properties()
self._computation_cache = None
self._cached_parameter_previous = None
def update_spatial_oversampling_factor(self, geom):
"""Update spatial oversampling_factor for model evaluation"""
res_scale = self.model.evaluation_bin_size_min
res_scale = res_scale.to_value("deg") if res_scale is not None else 0
if geom.is_region or geom.is_hpx:
geom = geom.to_wcs_geom()
if res_scale != 0:
factor = int(np.ceil(np.max(geom.pixel_scales.deg) / res_scale))
self._spatial_oversampling_factor = factor
def compute_dnde(self):
"""Compute model differential flux at map pixel centers.
Returns
-------
model_map : `~gammapy.maps.Map`
Sky cube with data filled with evaluated model values.
Units: ``cm-2 s-1 TeV-1 deg-2``
"""
return self.model.evaluate_geom(self.geom, self.gti)
def compute_flux(self, *arg):
"""Compute flux"""
return self.model.integrate_geom(self.geom, self.gti)
def compute_flux_psf_convolved(self, *arg):
"""Compute psf convolved and temporal model corrected flux."""
value = self.compute_flux_spectral()
if self.model.spatial_model:
if self.psf_containment is not None:
value = value * self.psf_containment
else:
value = value * self.compute_flux_spatial()
if self.model.temporal_model:
value *= self.compute_temporal_norm()
return Map.from_geom(geom=self.geom, data=value.value, unit=value.unit)
def compute_flux_spatial(self):
"""Compute spatial flux using caching"""
if self.parameters_spatial_changed() or not self.use_cache:
del self._compute_flux_spatial
return self._compute_flux_spatial
@lazyproperty
def _compute_flux_spatial(self):
"""Compute spatial flux
Returns
----------
value: `~astropy.units.Quantity`
Psf-corrected, integrated flux over a given region.
"""
if self.geom.is_region:
# We don't estimate spatial contributions if no psf are defined
if self.geom.region is None or self.psf is None:
return 1
wcs_geom = self.geom.to_wcs_geom(width_min=self.cutout_width).to_image()
if self.psf and self.model.apply_irf["psf"]:
values = self._compute_flux_spatial_geom(wcs_geom)
else:
values = self.model.spatial_model.integrate_geom(
wcs_geom, oversampling_factor=1
)
axes = [self.geom.axes["energy_true"].squash()]
values = values.to_cube(axes=axes)
weights = wcs_geom.region_weights(regions=[self.geom.region])
value = (values.quantity * weights).sum(axis=(1, 2), keepdims=True)
else:
value = self._compute_flux_spatial_geom(self.geom)
return value
def _compute_flux_spatial_geom(self, geom):
"""Compute spatial flux oversampling geom if necessary"""
if not self.model.spatial_model.is_energy_dependent:
geom = geom.to_image()
value = self.model.spatial_model.integrate_geom(geom)
if self.psf and self.model.apply_irf["psf"]:
value = self.apply_psf(value)
return value
def compute_flux_spectral(self):
"""Compute spectral flux"""
energy = self.geom.axes["energy_true"].edges
value = self.model.spectral_model.integral(
energy[:-1],
energy[1:],
)
if self.geom.is_hpx:
return value.reshape((-1, 1))
else:
return value.reshape((-1, 1, 1))
def compute_temporal_norm(self):
"""Compute temporal norm"""
integral = self.model.temporal_model.integral(
self.gti.time_start, self.gti.time_stop
)
return np.sum(integral)
def apply_exposure(self, flux):
"""Compute npred cube
For now just divide flux cube by exposure
"""
npred = (flux.quantity * self.exposure.quantity).to_value("")
return Map.from_geom(self.geom, data=npred, unit="")
def apply_psf(self, npred):
"""Convolve npred cube with PSF"""
tmp = npred.convolve(self.psf)
tmp.data[tmp.data < 0.0] = 0
return tmp
def apply_edisp(self, npred):
"""Convolve map data with energy dispersion.
Parameters
----------
npred : `~gammapy.maps.Map`
Predicted counts in true energy bins
Returns
-------
npred_reco : `~gammapy.maps.Map`
Predicted counts in reco energy bins
"""
return npred.apply_edisp(self.edisp)
@lazyproperty
def _compute_npred(self):
"""Compute npred"""
if isinstance(self.model, TemplateNPredModel):
npred = self.model.evaluate()
else:
if not self.parameter_norm_only_changed:
for method in self.methods_sequence:
values = method(self._computation_cache)
self._computation_cache = values
npred = self._computation_cache
else:
npred = self._computation_cache * self.renorm()
return npred
@property
def apply_psf_after_edisp(self):
""" """
if not isinstance(self.model, TemplateNPredModel):
return self.model.apply_irf.get("psf_after_edisp")
def compute_npred(self):
"""Evaluate model predicted counts.
Returns
-------
npred : `~gammapy.maps.Map`
Predicted counts on the map (in reco energy bins)
"""
if self.parameters_changed or not self.use_cache:
del self._compute_npred
return self._compute_npred
@property
def parameters_changed(self):
"""Parameters changed"""
values = self.model.parameters.value
# TODO: possibly allow for a tolerance here?
changed = ~np.all(self._cached_parameter_values == values)
if changed:
self._cached_parameter_values = values
return changed
@property
def parameter_norm_only_changed(self):
"""Only norm parameter changed"""
norm_only_changed = False
idx = self._norm_idx
values = self.model.parameters.value
if idx and self._computation_cache is not None:
changed = self._cached_parameter_values_previous == values
norm_only_changed = sum(changed) == 1 and changed[idx]
if not norm_only_changed:
self._cached_parameter_values_previous = values
return norm_only_changed
def parameters_spatial_changed(self, reset=True):
"""Parameters changed
Parameters
----------
reset : bool
Reset cached values
Returns
-------
changed : bool
Whether spatial parameters changed.
"""
values = self.model.spatial_model.parameters.value
# TODO: possibly allow for a tolerance here?
changed = ~np.all(self._cached_parameter_values_spatial == values)
if changed and reset:
self._cached_parameter_values_spatial = values
return changed
@property
def irf_position_changed(self):
"""Position for IRF changed"""
# Here we do not use SkyCoord.separation to improve performance
# (it avoids equivalence comparisons for frame and units)
lon_cached, lat_cached = self._cached_position
lon, lat = self.model.position_lonlat
separation = angular_separation(lon, lat, lon_cached, lat_cached)
changed = separation > (self.model.evaluation_radius + CUTOUT_MARGIN).to_value(
u.rad
)
if changed:
self._cached_position = lon, lat
return changed
@lazyproperty
def _norm_idx(self):
"""norm index"""
names = self.model.parameters.names
ind = [idx for idx, name in enumerate(names) if name in ["norm", "amplitude"]]
if len(ind) == 1:
return ind[0]
else:
return None
def renorm(self):
value = self.model.parameters.value[self._norm_idx]
value_cached = self._cached_parameter_values_previous[self._norm_idx]
return value / value_cached
@lazyproperty
def methods_sequence(self):
"""order to apply irf"""
if self.apply_psf_after_edisp:
methods = [
self.compute_flux,
self.apply_exposure,
self.apply_edisp,
self.apply_psf,
]
if not self.psf or not self.model.apply_irf["psf"]:
methods.remove(self.apply_psf)
else:
methods = [
self.compute_flux_psf_convolved,
self.apply_exposure,
self.apply_edisp,
]
if not self.model.apply_irf["exposure"]:
methods.remove(self.apply_exposure)
if not self.model.apply_irf["edisp"]:
methods.remove(self.apply_edisp)
return methods
| 33.028056 | 98 | 0.608398 |
e3552593b394d9415acc174ae0293211e8951cd8 | 6,495 | py | Python | src/simple_volume_adjuster.py | jzucker2/RufusRaspberry | ee554ee0608688b85fa864c55921fef705c3aa55 | [
"MIT"
] | 1 | 2021-03-06T02:38:43.000Z | 2021-03-06T02:38:43.000Z | src/simple_volume_adjuster.py | jzucker2/RufusRaspberry | ee554ee0608688b85fa864c55921fef705c3aa55 | [
"MIT"
] | null | null | null | src/simple_volume_adjuster.py | jzucker2/RufusRaspberry | ee554ee0608688b85fa864c55921fef705c3aa55 | [
"MIT"
] | null | null | null | import logging
from functools import reduce
from dataclasses import dataclass
from enum import Enum
from datetime import datetime, timedelta
import threading
from .activities import ActivityName
log = logging.getLogger(__name__)
class RotationDirectionException(Exception): pass
class RotationDirection(Enum):
CLOCKWISE = 1
COUNTER_CLOCKWISE = -1
NO_OP = 0
@property
def activity_name(self):
if self == self.CLOCKWISE:
return ActivityName.MASTER_VOLUME_UP
elif self == self.COUNTER_CLOCKWISE:
return ActivityName.MASTER_VOLUME_DOWN
elif self == self.NO_OP:
return None
else:
raise RotationDirectionException(f'Unexpected direction: {self}')
class VolumeDomain(Enum):
GLOBAL = 1
LOCAL = 0
def __repr__(self):
return f'VolumeDomain => {self.name} ({self.value})'
@dataclass
class RotationEvent:
value: int
created: datetime
domain: VolumeDomain = VolumeDomain.LOCAL
@classmethod
def create_event(cls, value, domain=VolumeDomain.LOCAL):
return cls(value, datetime.utcnow(), domain=domain)
@property
def direction(self) -> RotationDirection:
if self.value > 0:
return RotationDirection.CLOCKWISE
elif self.value < 0:
return RotationDirection.COUNTER_CLOCKWISE
elif self.value == 0:
return RotationDirection.NO_OP
raise RotationDirectionException(f'Unexpected value: {self.value}')
@property
def activity_name(self) -> ActivityName:
return self.direction.activity_name
def __int__(self):
if self.direction == RotationDirection.CLOCKWISE:
return 1
elif self.direction == RotationDirection.COUNTER_CLOCKWISE:
return -1
elif self.direction == RotationDirection.NO_OP:
return 0
raise RotationDirectionException(f'Unexpected direction: {self.direction}')
class AbstractVolumerAdjusterException(Exception): pass
class NoEventsAbstractVolumeAdjusterException(AbstractVolumerAdjusterException): pass
class AbstractVolumeAdjuster(object):
def __init__(self, rufus_client, local_volume_activity_name, local_mute_activity_name, traffic_lights=None, debug=False, reverse_rotary_encoder=False):
self.events = []
self.rufus_client = rufus_client
self.traffic_lights = traffic_lights
self.debug = debug
self.local_volume_activity_name = local_volume_activity_name
self.local_mute_activity_name = local_mute_activity_name
self.reverse_rotary_encoder = reverse_rotary_encoder
def clear_events(self):
self.events = []
def add_event(self, value, domain=None) -> RotationEvent:
event = RotationEvent.create_event(value, domain=domain)
self.events.append(event)
return event
def get_volume_activity_for_domain(self, domain):
if domain == VolumeDomain.GLOBAL:
return ActivityName.GLOBAL_VOLUME_ADJUSTMENT
return self.local_volume_activity_name
def get_mute_activity_for_domain(self, domain):
if domain == VolumeDomain.GLOBAL:
return ActivityName.GLOBAL_MUTE_TOGGLE
return self.local_mute_activity_name
def adjust_volume(self, value, domain=VolumeDomain.LOCAL):
activity_name = self.get_volume_activity_for_domain(domain)
log.info(f'About to adjust volume ({value}) for domain: {domain}')
if self.reverse_rotary_encoder:
log.info('We have `reverse_rotary_encoder` set to True! So first we reverse the volume adjustment')
value = -value
log.info(f'Updated value is now =========> {value}')
response = self.rufus_client.perform_perform_full_activity(activity_name, custom_value=value, debug=self.debug, traffic_lights=self.traffic_lights)
log.info(f'For volume adjustment, got: {response}')
return response
def toggle_mute(self, domain=VolumeDomain.LOCAL):
activity_name = self.get_mute_activity_for_domain(domain)
log.info(f'About to toggle mute for domain: {domain}')
response = self.rufus_client.perform_perform_full_activity(activity_name, debug=self.debug, traffic_lights=self.traffic_lights)
log.info(f'For volume adjustment, got: {response}')
return response
def last_event_datetime(self):
if len(self.events):
event = self.events[-1]
return event.created
raise NoEventsAbstractVolumeAdjusterException('No events!')
def last_event_info(self):
if len(self.events):
event = self.events[-1]
return {
'created': event.created,
'domain': event.domain,
}
raise NoEventsAbstractVolumeAdjusterException('No events!')
class SimpleVolumeAdjuster(AbstractVolumeAdjuster):
def __init__(self, rufus_client, local_volume_activity_name, local_mute_activity_name, traffic_lights=None, debug=False, reverse_rotary_encoder=False):
super(SimpleVolumeAdjuster, self).__init__(rufus_client, local_volume_activity_name, local_mute_activity_name, traffic_lights=traffic_lights, debug=debug, reverse_rotary_encoder=reverse_rotary_encoder)
self.timer = None
@property
def request_delay(self):
return 1
@property
def event_debounce_duration(self):
return 1
def add_event(self, value, domain=None):
super(SimpleVolumeAdjuster, self).add_event(value, domain=domain)
if self.timer:
self.timer.cancel()
self.timer = threading.Timer(self.request_delay, self.simple_volume_request)
self.timer.start()
def simple_volume_request(self):
now = datetime.utcnow()
last_event_info = self.last_event_info()
time_difference = now - last_event_info['created']
if time_difference < timedelta(seconds=self.event_debounce_duration):
log.info(f'Only been {time_difference} so no request yet, returning ...')
return
domain = last_event_info['domain']
total_volume = self.get_total_adjustment()
log.info(f'Got total_volume: {total_volume} for intended domain: {domain}')
self.clear_events()
if total_volume == 0:
return
self.adjust_volume(total_volume, domain=domain)
def get_total_adjustment(self):
return reduce(lambda x, y:int(x)+int(y), self.events)
| 36.284916 | 209 | 0.692995 |
cdfbec18f65b27b4c5ddf6c9415c5347784cb3c7 | 4,794 | py | Python | osu-ac/loader.py | spook0/osu-anticheat | 23b153c741aef94a97769bee4cc75bfb6942b7b0 | [
"MIT"
] | null | null | null | osu-ac/loader.py | spook0/osu-anticheat | 23b153c741aef94a97769bee4cc75bfb6942b7b0 | [
"MIT"
] | null | null | null | osu-ac/loader.py | spook0/osu-anticheat | 23b153c741aef94a97769bee4cc75bfb6942b7b0 | [
"MIT"
] | null | null | null | import requests
from datetime import datetime
import time
import base64
from config import API_SCORES_ALL, API_SCORES_USER, API_REPLAY
def api(function):
"""
Decorator that checks if we can refresh the time at which we started our requests because
it's been more than RATELIMIT_RESET since the first request of the cycle.
If we've refreshed our ratelimits, sets start_time to be the current datetime.
"""
def wrapper(*args, **kwargs):
# check if we've refreshed our ratelimits yet
difference = datetime.now() - Loader.start_time
if(difference.seconds > Loader.RATELIMIT_RESET):
Loader.start_time = datetime.now()
return function(*args, **kwargs)
return wrapper
class Loader():
"""
Manages interactions with the osu api - if the api ratelimits the key we wait until we refresh our ratelimits
and retry the request.
This class is not meant to be instantiated, instead only static methods and class variables used.
This is because we only use one api key for the entire project, and making all methods static provides
cleaner access than passing around a single Loader class.
"""
RATELIMIT_RESET = 60 # time in seconds until the api refreshes our ratelimits
start_time = datetime.min # when we started our requests cycle
def __init__(self):
"""
This class should never be instantiated. All methods are static.
"""
raise Exception("This class is not meant to be instantiated. Use the static methods instead.")
@staticmethod
@api
def users_info(map_id, num=50):
"""
Returns a dict mapping the user_id to their replay_id for the top given number of replays
Args:
String map_id: The map id to get a list of users from.
Integer num: The number of ids to fetch. Defaults to 50.
"""
if(num > 100 or num < 1):
raise Exception("The number of top plays to fetch must be between 1 and 100 inclusive!")
response = requests.get(API_SCORES_ALL.format(map_id, num)).json()
if(Loader.check_response(response)):
Loader.enforce_ratelimit()
return Loader.users_info(map_id, num=num)
info = {x["user_id"]: x["score_id"] for x in response} # map user id to score id
return info
@staticmethod
@api
def user_info(map_id, user_id):
"""
Returns a dict mapping a user_id to their replay_id for the given user on the given map.
Args:
String map_id: The map id to get the replay_id from.
String user_id: The user id to get the replay_id from.
"""
response = requests.get(API_SCORES_USER.format(map_id, user_id)).json()
if(Loader.check_response(response)):
Loader.enforce_ratelimit()
return Loader.user_info(map_id, user_id)
info = {x["user_id"]: x["score_id"] for x in response} # map user id to score id, should only be one response
return info
@staticmethod
@api
def replay_data(map_id, user_id):
"""
Queries the api for replay data from the given user on the given map.
Args:
String map_id: The map id to get the replay off of.
String user_id: The user id to get the replay of.
Returns:
The lzma bytes (b64 decoded response) returned by the api.
"""
print("Requesting replay by {} on map {}".format(user_id, map_id))
response = requests.get(API_REPLAY.format(map_id, user_id)).json()
if(Loader.check_response(response)):
Loader.enforce_ratelimit()
return Loader.replay_data(map_id, user_id)
return base64.b64decode(response["content"])
@staticmethod
def check_response(response):
"""
Checks the given api response for a ratelimit error.
Args:
String response: The response to check.
Returns:
True if the key is ratelimited, False otherwise.
"""
if("error" in response):
return True
else:
return False
@staticmethod
def enforce_ratelimit():
"""
Enforces the ratelimit by sleeping the thread until it's safe to make requests again.
"""
difference = datetime.now() - Loader.start_time
seconds_passed = difference.seconds
if(seconds_passed > Loader.RATELIMIT_RESET):
return
# sleep the remainder of the reset cycle so we guarantee it's been that long since the first request
sleep_seconds = Loader.RATELIMIT_RESET - seconds_passed
print("Ratelimited. Sleeping for {} seconds".format(sleep_seconds))
time.sleep(sleep_seconds)
| 33.760563 | 117 | 0.64685 |
f0a58228cf41a07180d042966851a220d31d434f | 7,480 | py | Python | mmdet/models/roi_heads/bbox_heads/obb/obb_convfc_bbox_head.py | vpeopleonatank/OBBDetection | 86fb098d8d2ff3fc3cc447714d89a44c0939614a | [
"Apache-2.0"
] | 274 | 2021-04-06T15:46:06.000Z | 2022-03-31T02:00:10.000Z | mmdet/models/roi_heads/bbox_heads/obb/obb_convfc_bbox_head.py | LiWentomng/OBBDetection | bf0dda9553fcccd4a08dc6c0d2ab9c9af1efee63 | [
"Apache-2.0"
] | 136 | 2021-07-11T11:26:54.000Z | 2022-03-31T02:45:34.000Z | mmdet/models/roi_heads/bbox_heads/obb/obb_convfc_bbox_head.py | LiWentomng/OBBDetection | bf0dda9553fcccd4a08dc6c0d2ab9c9af1efee63 | [
"Apache-2.0"
] | 84 | 2021-05-29T06:58:14.000Z | 2022-03-31T07:44:10.000Z | import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .obbox_head import OBBoxHead
@HEADS.register_module()
class OBBConvFCBBoxHead(OBBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
*args,
**kwargs):
super(OBBConvFCBBoxHead, self).__init__(*args, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
self.fc_cls = nn.Linear(self.cls_last_dim, self.num_classes + 1)
if self.with_reg:
out_dim_reg = self.reg_dim if self.reg_class_agnostic else \
self.reg_dim * self.num_classes
self.fc_reg = nn.Linear(self.reg_last_dim, out_dim_reg)
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def init_weights(self):
super(OBBConvFCBBoxHead, self).init_weights()
# conv layers are already initialized by ConvModule
for module_list in [self.shared_fcs, self.cls_fcs, self.reg_fcs]:
for m in module_list.modules():
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
@HEADS.register_module()
class OBBShared2FCBBoxHead(OBBConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(OBBShared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class OBBShared4Conv1FCBBoxHead(OBBConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(OBBShared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 36.31068 | 79 | 0.568583 |
1ff2fdf50d3fcdb7047165964c37084706abe336 | 2,282 | py | Python | pygam_paramters_impact_with_pdp.py | mbronis/pyGAM | 23a482af1ccb8e81eeaadee3f5600bfcfa5f920d | [
"MIT"
] | 1 | 2019-11-12T21:13:32.000Z | 2019-11-12T21:13:32.000Z | pygam_paramters_impact_with_pdp.py | mbronis/pyGAM | 23a482af1ccb8e81eeaadee3f5600bfcfa5f920d | [
"MIT"
] | null | null | null | pygam_paramters_impact_with_pdp.py | mbronis/pyGAM | 23a482af1ccb8e81eeaadee3f5600bfcfa5f920d | [
"MIT"
] | null | null | null |
'''
TODO:
0) proper import and naming ot terms
00) usage of 0 variance terms
1) fit gam on continous and categorical data
2) integers as numerical and categorical data
3) gridsearch on lambdas
4) grid search on type of terms
Insights:
-all data required in numerical format (ndarray),
factors need to be transformed to integers
'''
#import pygam lib
import numpy as np
import pandas as pd
from pygam.datasets import wage
#utils
def crude_strCat_to_int(ar,findex):
'''
check if feature is a string
if so replaces its unique values with an integer
coresponding to lexicographical order
'''
if isinstance(ar[0,findex], str):
ar[:,findex]=np.unique(ar[:,findex], return_inverse=True)[1]
# load dataset (as pd.DataFrame) => describe features
df=wage(return_X_y=False)
df.describe(include='all')
'''
type of terms:
1) int/category ['year']
2) int ['age']
3) continous ['logwage']
4) category ['sex','maritl','race','education','religion','jobclass','health','health_ins']
'''
#prep X and y
features=['year', 'age', 'education']
X=df[features].values
crude_strCat_to_int(X,2)
y=df['wage'].values
# test different types of term on categorical feature
# term types: spline (default), linear effect, factor, spline with categorical dtype
from pygam import LinearGAM, s,f,l
gam1=LinearGAM(s(0)+s(1)+s(2)).fit(X,y)
gam2=LinearGAM(s(0)+s(1)+l(2)).fit(X,y)
gam3=LinearGAM(s(0)+s(1)+f(2)).fit(X,y)
gam4=LinearGAM(s(0)+s(1)+s(2, dtype='categorical')).fit(X,y)
gams=[gam1,gam2,gam3,gam4]
terms_names=['spline','linear','factor','categorical spline']
###########################################
#compare pdp
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (28, 28)
fig, axs = plt.subplots(4, X.shape[1], sharey='row')
for r, axr in enumerate(axs):
gam=gams[r]
for i, ax in enumerate(axr):
XX=gam.generate_X_grid(term=i, meshgrid=True)
pdep, confi = gam.partial_dependence(term=i, X=XX, meshgrid=True, width=.95)
ax.plot(XX[0], pdep)
ax.plot(XX[0], confi[:, 0], c='grey', ls='--')
ax.plot(XX[0], confi[:, 1], c='grey', ls='--')
if r==0:
ax.set_title(features[i])
if i==0:
ax.set_ylabel(terms_names[r], size='large')
| 24.276596 | 95 | 0.64943 |
880e831e3aa051d68ec8d8273c9bc44bca862741 | 3,791 | py | Python | assignment1/q3_sgd.py | HeroKillerEver/cs224n-spring-2017 | 52e82f93ddc7192263930ecd90df999c775d1238 | [
"MIT"
] | 3 | 2018-01-29T04:26:48.000Z | 2021-09-16T12:38:01.000Z | assignment1/q3_sgd.py | HeroKillerEver/cs224n-spring-2017 | 52e82f93ddc7192263930ecd90df999c775d1238 | [
"MIT"
] | null | null | null | assignment1/q3_sgd.py | HeroKillerEver/cs224n-spring-2017 | 52e82f93ddc7192263930ecd90df999c775d1238 | [
"MIT"
] | 3 | 2019-01-04T05:48:57.000Z | 2020-08-03T19:31:15.000Z | #!/usr/bin/env python
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
import glob
import random
import numpy as np
import os.path as op
import cPickle as pickle
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
with open("saved_params_%d.npy" % st, "r") as f:
params = pickle.load(f)
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
with open("saved_params_%d.npy" % iter, "w") as f:
pickle.dump(params, f)
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
PRINT_EVERY=10):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a cost and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
expcost = None
for iter in xrange(start_iter + 1, iterations + 1):
# Don't forget to apply the postprocessing after every iteration!
# You might want to print the progress every few iterations.
cost = None
### YOUR CODE HERE
cost, grad = f(x)
x -= step * grad
postprocessing(x)
### END YOUR CODE
if iter % PRINT_EVERY == 0:
if not expcost:
expcost = cost
else:
expcost = .95 * expcost + .05 * cost
print "iter %d: %f" % (iter, expcost)
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print "Running sanity checks..."
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print "test 1 result:", t1
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print "test 2 result:", t2
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print "test 3 result:", t3
assert abs(t3) <= 1e-6
print ""
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q3_sgd.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
# your_sanity_checks()
| 26.326389 | 75 | 0.600633 |
f8de76d20d2f1d124a495a1b9a7c5c4462c719dd | 19,517 | py | Python | examples/linformer/linformer_src/modules/multihead_linear_attention.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 2 | 2021-09-14T06:42:08.000Z | 2021-11-09T21:15:18.000Z | examples/linformer/linformer_src/modules/multihead_linear_attention.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | null | null | null | examples/linformer/linformer_src/modules/multihead_linear_attention.py | indra622/FBK-fairseq | 4357af09ef2ad1594f75a5b7bcc02d5b10cad2e5 | [
"MIT"
] | 3 | 2021-09-06T10:18:39.000Z | 2021-12-29T10:52:51.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
from typing import Dict, Optional, Tuple
import torch
import torch.nn.functional as F
from fairseq import utils
from fairseq.incremental_decoding_utils import with_incremental_state
from fairseq.modules.quant_noise import quant_noise
from torch import Tensor, nn
from torch.nn import Parameter
@with_incremental_state
class MultiheadLinearAttention(nn.Module):
"""Multi-headed linformer attention.
Projects the key and values down to the compressed dimension, before computing self-attention.
See "Linformer: Self-Attention with Linear Complexity" for more details.
"""
def __init__(
self,
embed_dim,
num_heads,
kdim=None,
vdim=None,
dropout=0.0,
bias=True,
add_bias_kv=False,
add_zero_attn=False,
self_attention=False,
encoder_decoder_attention=False,
q_noise=0.0,
qn_block_size=8,
compressed=1,
max_seq_len=256,
shared_kv_compressed=0,
shared_compress_layer=None,
freeze_compress=0,
):
super().__init__()
self.embed_dim = embed_dim
self.kdim = kdim if kdim is not None else embed_dim
self.vdim = vdim if vdim is not None else embed_dim
self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), "embed_dim must be divisible by num_heads"
self.scaling = self.head_dim ** -0.5
self.self_attention = self_attention
self.encoder_decoder_attention = encoder_decoder_attention
assert not self.self_attention or self.qkv_same_dim, (
"Self-attention requires query, key and " "value to be of the same size"
)
self.k_proj = quant_noise(
nn.Linear(self.kdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.v_proj = quant_noise(
nn.Linear(self.vdim, embed_dim, bias=bias), q_noise, qn_block_size
)
self.q_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
# used for compress sequence to subsequence
if shared_compress_layer is None:
self.compress_seq_len = max_seq_len // compressed
self.compress_k = nn.Linear(max_seq_len, self.compress_seq_len, bias=False)
if shared_kv_compressed == 0:
self.compress_v = nn.Linear(
max_seq_len, self.compress_seq_len, bias=False
)
self.layerwise_sharing = False
else:
self.shared_compression_among_layers = True
self.compress_k = [shared_compress_layer]
if shared_kv_compressed == 0:
self.compress_v = [shared_compress_layer]
self.layerwise_sharing = True
self.shared_kv_compressed = shared_kv_compressed
self.out_proj = quant_noise(
nn.Linear(embed_dim, embed_dim, bias=bias), q_noise, qn_block_size
)
if add_bias_kv:
self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))
self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))
else:
self.bias_k = self.bias_v = None
self.add_zero_attn = add_zero_attn
self.reset_parameters()
if freeze_compress == 1:
self.compress_k.weight.requires_grad = False
if shared_kv_compressed == 0:
self.compress_v.weight.requires_grad = False
self.onnx_trace = False
def prepare_for_onnx_export_(self):
self.onnx_trace = True
def reset_parameters(self):
if self.qkv_same_dim:
# Empirically observed the convergence to be much better with
# the scaled initialization
nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))
nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight, gain=1 / math.sqrt(2))
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(
self.compress_v.weight, gain=1 / math.sqrt(2)
)
else:
nn.init.xavier_uniform_(self.k_proj.weight)
nn.init.xavier_uniform_(self.v_proj.weight)
nn.init.xavier_uniform_(self.q_proj.weight)
if (
not self.layerwise_sharing
): # otherwise, we already initialize the parameters
nn.init.xavier_uniform_(self.compress_k.weight)
if self.shared_kv_compressed == 0:
nn.init.xavier_uniform_(self.compress_v.weight)
nn.init.xavier_uniform_(self.out_proj.weight)
if self.out_proj.bias is not None:
nn.init.constant_(self.out_proj.bias, 0.0)
if self.bias_k is not None:
nn.init.xavier_normal_(self.bias_k)
if self.bias_v is not None:
nn.init.xavier_normal_(self.bias_v)
def forward(
self,
query,
key: Optional[Tensor],
value: Optional[Tensor],
key_padding_mask: Optional[Tensor] = None,
incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,
need_weights: bool = True,
static_kv: bool = False,
attn_mask: Optional[Tensor] = None,
before_softmax: bool = False,
need_head_weights: bool = False,
) -> Tuple[Tensor, Optional[Tensor]]:
"""Input shape: Time x Batch x Channel
Args:
key_padding_mask (ByteTensor, optional): mask to exclude
keys that are pads, of shape `(batch, src_len)`, where
padding elements are indicated by 1s.
need_weights (bool, optional): return the attention weights,
averaged over heads (default: False).
attn_mask (ByteTensor, optional): typically used to
implement causal attention, where the mask prevents the
attention from looking forward in time (default: None).
before_softmax (bool, optional): return the raw attention
weights and values before the attention softmax.
need_head_weights (bool, optional): return the attention
weights for each head. Implies *need_weights*. Default:
return the average attention weights over all heads.
"""
if need_head_weights:
need_weights = True
if self.layerwise_sharing:
compress_k = self.compress_k[0]
if self.shared_kv_compressed == 0:
compress_v = self.compress_v[0]
else:
compress_k = self.compress_k
if self.shared_kv_compressed == 0:
compress_v = self.compress_v
tgt_len, bsz, embed_dim = query.size()
assert embed_dim == self.embed_dim
assert list(query.size()) == [tgt_len, bsz, embed_dim]
if incremental_state is not None:
saved_state = self._get_input_buffer(incremental_state)
if saved_state is not None and "prev_key" in saved_state:
# previous time steps are cached - no need to recompute
# key and value if they are static
if static_kv:
assert self.encoder_decoder_attention and not self.self_attention
key = value = None
else:
saved_state = None
if self.self_attention:
q = self.q_proj(query)
k_input = query.permute(1, 2, 0).contiguous() # B * C * T
k_input = (
F.linear(k_input, compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
k = self.k_proj(k_input)
v_input = query.permute(1, 2, 0).contiguous() # B * C * T
if self.shared_kv_compressed == 0:
v_input = (
F.linear(v_input, compress_v.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
if self.shared_kv_compressed == 1: # use shared kv compressed linear layer
v_input = (
F.linear(v_input, compress_k.weight[:, 0:tgt_len])
.permute(2, 0, 1)
.contiguous()
)
v = self.v_proj(v_input)
elif self.encoder_decoder_attention:
# encoder-decoder attention
q = self.q_proj(query)
if key is None:
assert value is None
k = v = None
else:
k = self.k_proj(key)
v = self.v_proj(key)
else:
assert key is not None and value is not None
q = self.q_proj(query)
k = self.k_proj(key)
v = self.v_proj(value)
q *= self.scaling
if self.bias_k is not None:
assert self.bias_v is not None
k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])
v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
if key_padding_mask is not None:
key_padding_mask = torch.cat(
[
key_padding_mask,
key_padding_mask.new_zeros(key_padding_mask.size(0), 1),
],
dim=1,
)
q = (
q.contiguous()
.view(tgt_len, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if k is not None:
k = (
k.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if v is not None:
v = (
v.contiguous()
.view(-1, bsz * self.num_heads, self.head_dim)
.transpose(0, 1)
)
if saved_state is not None:
# saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
if "prev_key" in saved_state:
_prev_key = saved_state["prev_key"]
assert _prev_key is not None
prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
k = prev_key
else:
assert k is not None
k = torch.cat([prev_key, k], dim=1)
if "prev_value" in saved_state:
_prev_value = saved_state["prev_value"]
assert _prev_value is not None
prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
if static_kv:
v = prev_value
else:
assert v is not None
v = torch.cat([prev_value, v], dim=1)
prev_key_padding_mask: Optional[Tensor] = None
if "prev_key_padding_mask" in saved_state:
prev_key_padding_mask = saved_state["prev_key_padding_mask"]
assert k is not None and v is not None
key_padding_mask = MultiheadLinearAttention._append_prev_key_padding_mask(
key_padding_mask=key_padding_mask,
prev_key_padding_mask=prev_key_padding_mask,
batch_size=bsz,
src_len=k.size(1),
static_kv=static_kv,
)
saved_state["prev_key"] = k.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_value"] = v.view(bsz, self.num_heads, -1, self.head_dim)
saved_state["prev_key_padding_mask"] = key_padding_mask
# In this branch incremental_state is never None
assert incremental_state is not None
incremental_state = self._set_input_buffer(incremental_state, saved_state)
assert k is not None
src_len = k.size(1)
if self.add_zero_attn:
assert v is not None
src_len += 1
k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)
v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)
if attn_mask is not None:
attn_mask = torch.cat(
[attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1
)
attn_weights = torch.bmm(q, k.transpose(1, 2))
attn_weights = MultiheadLinearAttention.apply_sparse_mask(
attn_weights, tgt_len, src_len, bsz
)
assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]
if attn_mask is not None:
attn_mask = attn_mask.unsqueeze(0)
if self.onnx_trace:
attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)
attn_weights += attn_mask
if before_softmax:
return attn_weights, v
attn_weights_float = utils.softmax(
attn_weights, dim=-1, onnx_trace=self.onnx_trace
)
attn_weights = attn_weights_float.type_as(attn_weights)
attn_probs = F.dropout(
attn_weights,
p=self.dropout,
training=self.training,
)
assert v is not None
attn = torch.bmm(attn_probs, v)
assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]
if self.onnx_trace and attn.size(1) == 1:
# when ONNX tracing a single decoder step (sequence length == 1)
# the transpose is a no-op copy before view, thus unnecessary
attn = attn.contiguous().view(tgt_len, bsz, embed_dim)
else:
attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
attn = self.out_proj(attn)
attn_weights: Optional[Tensor] = None
if need_weights:
attn_weights = attn_weights_float.view(
bsz, self.num_heads, tgt_len, src_len
).transpose(1, 0)
if not need_head_weights:
# average attention weights over heads
attn_weights = attn_weights.mean(dim=0)
return attn, attn_weights
@staticmethod
def _append_prev_key_padding_mask(
key_padding_mask: Optional[Tensor],
prev_key_padding_mask: Optional[Tensor],
batch_size: int,
src_len: int,
static_kv: bool,
) -> Optional[Tensor]:
# saved key padding masks have shape (bsz, seq_len)
if prev_key_padding_mask is not None and static_kv:
new_key_padding_mask = prev_key_padding_mask
elif prev_key_padding_mask is not None and key_padding_mask is not None:
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), key_padding_mask.float()], dim=1
)
# During incremental decoding, as the padding token enters and
# leaves the frame, there will be a time when prev or current
# is None
elif prev_key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - prev_key_padding_mask.size(1)),
device=prev_key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[prev_key_padding_mask.float(), filler.float()], dim=1
)
elif key_padding_mask is not None:
filler = torch.zeros(
(batch_size, src_len - key_padding_mask.size(1)),
device=key_padding_mask.device,
)
new_key_padding_mask = torch.cat(
[filler.float(), key_padding_mask.float()], dim=1
)
else:
new_key_padding_mask = prev_key_padding_mask
return new_key_padding_mask
@torch.jit.export
def reorder_incremental_state(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
new_order: Tensor,
):
"""Reorder buffered internal state (for incremental generation)."""
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
for k in input_buffer.keys():
input_buffer_k = input_buffer[k]
if input_buffer_k is not None:
if self.encoder_decoder_attention and input_buffer_k.size(
0
) == new_order.size(0):
break
input_buffer[k] = input_buffer_k.index_select(0, new_order)
incremental_state = self._set_input_buffer(incremental_state, input_buffer)
return incremental_state
def _get_input_buffer(
self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]
) -> Dict[str, Optional[Tensor]]:
result = self.get_incremental_state(incremental_state, "attn_state")
if result is not None:
return result
else:
empty_result: Dict[str, Optional[Tensor]] = {}
return empty_result
def _set_input_buffer(
self,
incremental_state: Dict[str, Dict[str, Optional[Tensor]]],
buffer: Dict[str, Optional[Tensor]],
):
return self.set_incremental_state(incremental_state, "attn_state", buffer)
def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):
return attn_weights
def upgrade_state_dict_named(self, state_dict, name):
prefix = name + "." if name != "" else ""
items_to_add = {}
keys_to_remove = []
for k in state_dict.keys():
if k.endswith(prefix + "in_proj_weight"):
# in_proj_weight used to be q + k + v with same dimensions
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.weight"] = state_dict[k][:dim]
items_to_add[prefix + "k_proj.weight"] = state_dict[k][dim : 2 * dim]
items_to_add[prefix + "v_proj.weight"] = state_dict[k][2 * dim :]
keys_to_remove.append(k)
k_bias = prefix + "in_proj_bias"
if k_bias in state_dict.keys():
dim = int(state_dict[k].shape[0] / 3)
items_to_add[prefix + "q_proj.bias"] = state_dict[k_bias][:dim]
items_to_add[prefix + "k_proj.bias"] = state_dict[k_bias][
dim : 2 * dim
]
items_to_add[prefix + "v_proj.bias"] = state_dict[k_bias][2 * dim :]
keys_to_remove.append(prefix + "in_proj_bias")
for k in keys_to_remove:
del state_dict[k]
for key, value in items_to_add.items():
state_dict[key] = value
| 39.749491 | 98 | 0.574935 |
5bd3852298e5b6a58c4c363c041983004b7c0170 | 1,478 | bzl | Python | tools/snapshot.bzl | xn3cr0nx/entangled | 92d03837a4e966600729521f9315750d509084bc | [
"Apache-2.0"
] | null | null | null | tools/snapshot.bzl | xn3cr0nx/entangled | 92d03837a4e966600729521f9315750d509084bc | [
"Apache-2.0"
] | null | null | null | tools/snapshot.bzl | xn3cr0nx/entangled | 92d03837a4e966600729521f9315750d509084bc | [
"Apache-2.0"
] | null | null | null | load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
SNAPSHOT_REPOSITORY = "https://raw.githubusercontent.com/iotaledger/snapshots/master"
SNAPSHOT_TIMESTAMP_MAINNET = "20181222"
SNAPSHOT_TIMESTAMP_TESTNET = "20180329"
def fetch_snapshot_files():
http_file(
name = "snapshot_conf_mainnet",
urls = [SNAPSHOT_REPOSITORY + "/mainnet/" + SNAPSHOT_TIMESTAMP_MAINNET + "/snapshot.json"],
sha256 = "e51ca0324704d79655962b63c90f76bfbae12f35f72124d7608b57fdeafd5caf",
)
http_file(
name = "snapshot_sig_mainnet",
urls = [SNAPSHOT_REPOSITORY + "/mainnet/" + SNAPSHOT_TIMESTAMP_MAINNET + "/snapshot.sig"],
sha256 = "603f34ea67b03a058bc33c216d0778743072f398afb42e63604ce636b1465cd2",
)
http_file(
name = "snapshot_mainnet",
urls = [SNAPSHOT_REPOSITORY + "/mainnet/" + SNAPSHOT_TIMESTAMP_MAINNET + "/snapshot.txt"],
sha256 = "f1879f2dc823064aeff256e7777d614d340fd7280d7a1421739bc3d394c2ed8b",
)
http_file(
name = "snapshot_conf_testnet",
urls = [SNAPSHOT_REPOSITORY + "/testnet/" + SNAPSHOT_TIMESTAMP_TESTNET + "/snapshot.json"],
sha256 = "afec30dfa697852ee12d8c183f86c9d01882c4f9d7d8c4cc58a94488b6d73068",
)
http_file(
name = "snapshot_testnet",
urls = [SNAPSHOT_REPOSITORY + "/testnet/" + SNAPSHOT_TIMESTAMP_TESTNET + "/snapshot.txt"],
sha256 = "713bbe5e986bbaccae44800aac2f5bdd0f3edc1246e31660a19a8be8265b63b4",
)
| 44.787879 | 99 | 0.716509 |
7705f7d9c8e06c76e98fd28276f85fe3fe1202e5 | 1,000 | py | Python | inlineplz/parsers/gherkinlint.py | kyleburton/inline-plz | 701b13e17ea0c3849b96c4746d7c0d2672fc78e4 | [
"ISC"
] | null | null | null | inlineplz/parsers/gherkinlint.py | kyleburton/inline-plz | 701b13e17ea0c3849b96c4746d7c0d2672fc78e4 | [
"ISC"
] | null | null | null | inlineplz/parsers/gherkinlint.py | kyleburton/inline-plz | 701b13e17ea0c3849b96c4746d7c0d2672fc78e4 | [
"ISC"
] | null | null | null | from __future__ import absolute_import
from __future__ import unicode_literals
import traceback
import dirtyjson as json
from inlineplz.parsers.base import ParserBase
class GherkinLintParser(ParserBase):
"""Parse json gherkin-lint output."""
def parse(self, lint_data):
messages = set()
try:
for filedata in json.loads(lint_data):
if filedata.get("errors") and filedata.get("filePath"):
path = filedata["filePath"]
for msgdata in filedata["errors"]:
try:
line = msgdata["line"]
msgbody = msgdata["message"]
messages.add((path, line, msgbody))
except (ValueError, KeyError):
print("Invalid message: {0}".format(msgdata))
except ValueError:
print(traceback.format_exc())
print(lint_data)
return messages
| 32.258065 | 73 | 0.548 |
4591939016acbf0d7446066692d0cd4efe2705fe | 48 | py | Python | src/genie/libs/parser/iosxe/tests/ShowSdwanVersion/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/iosxe/tests/ShowSdwanVersion/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/iosxe/tests/ShowSdwanVersion/cli/equal/golden_output_expected.py | balmasea/genieparser | d1e71a96dfb081e0a8591707b9d4872decd5d9d3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | expected_output = {"version": "10.145.1.0.214"}
| 24 | 47 | 0.666667 |
c0895c5b81fdda7d52c10e680827f50749204b7b | 698 | py | Python | cav_box/kafka/src/server.py | usdot-fhwa-stol/cav-education | 6e3698c6322a8115dac9437a6dbd6cadde891477 | [
"Apache-2.0"
] | null | null | null | cav_box/kafka/src/server.py | usdot-fhwa-stol/cav-education | 6e3698c6322a8115dac9437a6dbd6cadde891477 | [
"Apache-2.0"
] | 2 | 2021-01-29T21:47:23.000Z | 2021-02-09T14:30:02.000Z | cav_box/kafka/src/server.py | usdot-fhwa-stol/cav-education | 6e3698c6322a8115dac9437a6dbd6cadde891477 | [
"Apache-2.0"
] | 5 | 2021-01-19T21:47:22.000Z | 2021-06-07T13:19:08.000Z | import socket
import socketserver
from tcp_handler.handler import TCPHandler
import os
import logging
if __name__ == "__main__":
LOGLEVEL = os.environ.get('LOGLEVEL', 'WARNING').upper()
logging.basicConfig(level=LOGLEVEL)
DSRC_LISTENER_HOST = os.getenv('DSRC_LISTENER_HOST', "0.0.0.0")
DSRC_LISTENER_PORT = os.getenv('DSRC_LISTENER_PORT', 8882)
HOST, PORT = DSRC_LISTENER_HOST, int(DSRC_LISTENER_PORT)
# Create the server, binding to localhost on port 9999
with socketserver.TCPServer((HOST, PORT), TCPHandler) as server:
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
server.serve_forever() | 31.727273 | 68 | 0.724928 |
a6613cebcb76c92ab1f8105df96557908c9f683f | 4,827 | py | Python | samcli/commands/deploy/guided_config.py | michael-k/aws-sam-cli | a8525fc8157d507c4b102477ded4d221deaed145 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | samcli/commands/deploy/guided_config.py | michael-k/aws-sam-cli | a8525fc8157d507c4b102477ded4d221deaed145 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | samcli/commands/deploy/guided_config.py | michael-k/aws-sam-cli | a8525fc8157d507c4b102477ded4d221deaed145 | [
"BSD-2-Clause",
"Apache-2.0"
] | null | null | null | """
Set of Utilities to deal with reading/writing to configuration file during sam deploy
"""
import click
from samcli.cli.context import get_cmd_names
from samcli.commands.deploy.exceptions import GuidedDeployFailedError
from samcli.lib.config.samconfig import SamConfig, DEFAULT_ENV, DEFAULT_CONFIG_FILE_NAME
class GuidedConfig:
def __init__(self, template_file, section):
self.template_file = template_file
self.section = section
def get_config_ctx(self, config_file=None):
ctx = click.get_current_context()
samconfig_dir = getattr(ctx, "samconfig_dir", None)
samconfig = SamConfig(
config_dir=samconfig_dir if samconfig_dir else SamConfig.config_dir(template_file_path=self.template_file),
filename=config_file or DEFAULT_CONFIG_FILE_NAME,
)
return ctx, samconfig
def read_config_showcase(self, config_file=None):
_, samconfig = self.get_config_ctx(config_file)
status = "Found" if samconfig.exists() else "Not found"
msg = (
"Syntax invalid in samconfig.toml; save values "
"through sam deploy --guided to overwrite file with a valid set of values."
)
config_sanity = samconfig.sanity_check()
click.secho("\nConfiguring SAM deploy\n======================", fg="yellow")
click.echo(f"\n\tLooking for config file [{config_file}] : {status}")
if samconfig.exists():
click.echo("\tReading default arguments : {}".format("Success" if config_sanity else "Failure"))
if not config_sanity and samconfig.exists():
raise GuidedDeployFailedError(msg)
def save_config(
self,
parameter_overrides,
config_env=DEFAULT_ENV,
config_file=None,
signing_profiles=None,
image_repositories=None,
**kwargs,
):
ctx, samconfig = self.get_config_ctx(config_file)
cmd_names = get_cmd_names(ctx.info_name, ctx)
for key, value in kwargs.items():
if isinstance(value, (list, tuple)):
value = " ".join(val for val in value)
if value:
samconfig.put(cmd_names, self.section, key, value, env=config_env)
self._save_parameter_overrides(cmd_names, config_env, parameter_overrides, samconfig)
self._save_image_repositories(cmd_names, config_env, samconfig, image_repositories)
self._save_signing_profiles(cmd_names, config_env, samconfig, signing_profiles)
samconfig.flush()
click.echo("\n\tSaved arguments to config file")
click.echo("\tRunning 'sam deploy' for future deployments will use the parameters saved above.")
click.echo("\tThe above parameters can be changed by modifying samconfig.toml")
click.echo(
"\tLearn more about samconfig.toml syntax at "
"\n\thttps://docs.aws.amazon.com/serverless-application-model/latest/"
"developerguide/serverless-sam-cli-config.html\n"
)
def _save_signing_profiles(self, cmd_names, config_env, samconfig, signing_profiles):
if signing_profiles:
_params = []
for key, value in signing_profiles.items():
if value.get("profile_owner", None):
signing_profile_with_owner = f"{value['profile_name']}:{value['profile_owner']}"
_params.append(f"{key}={self.quote_parameter_values(signing_profile_with_owner)}")
else:
_params.append(f"{key}={self.quote_parameter_values(value['profile_name'])}")
if _params:
samconfig.put(cmd_names, self.section, "signing_profiles", " ".join(_params), env=config_env)
def _save_parameter_overrides(self, cmd_names, config_env, parameter_overrides, samconfig):
if parameter_overrides:
_params = []
for key, value in parameter_overrides.items():
if isinstance(value, dict):
if not value.get("Hidden"):
_params.append(f"{key}={self.quote_parameter_values(value.get('Value'))}")
else:
_params.append(f"{key}={self.quote_parameter_values(value)}")
if _params:
samconfig.put(cmd_names, self.section, "parameter_overrides", " ".join(_params), env=config_env)
def _save_image_repositories(self, cmd_names, config_env, samconfig, image_repositories):
if image_repositories:
_image_repositories = [f"{key}={value}" for key, value in image_repositories.items()]
samconfig.put(cmd_names, self.section, "image_repositories", _image_repositories, env=config_env)
def quote_parameter_values(self, parameter_value):
return '"{}"'.format(parameter_value)
| 43.881818 | 119 | 0.653201 |
c88dafccb186d433fae0e1bcb3847fa32b38035d | 3,909 | py | Python | consulalerting/utilities.py | jrxFive/consulalerting | 714fb2e7f76267864551dd9f3925bfe0325f62c2 | [
"MIT"
] | 10 | 2015-08-25T21:12:58.000Z | 2021-04-17T06:45:19.000Z | consulalerting/utilities.py | jrxFive/Consul-Alerting | 714fb2e7f76267864551dd9f3925bfe0325f62c2 | [
"MIT"
] | 6 | 2015-11-02T00:56:38.000Z | 2016-04-12T20:13:08.000Z | consulalerting/utilities.py | jrxFive/Consul-Alerting | 714fb2e7f76267864551dd9f3925bfe0325f62c2 | [
"MIT"
] | 5 | 2015-10-28T00:56:49.000Z | 2021-04-17T06:45:22.000Z | import consulate
import hashlib
import requests
import json
import settings
from ConsulHealthStruct import ConsulHealthStruct
def currentState():
try:
current = settings.consul.health.state("any")
settings.logger.debug("CurrentConsulHealth={health}".format(health=current))
return current
except KeyError:
settings.logger.error("Message=Could no obtain current catalog from consul"
"ConsulURI={u}".format(u=settings.consul._base_uri))
def priorState(key):
try:
prior = settings.consul.kv[key]
settings.logger.debug("PriorConsulHealth={health}".format(
health=prior))
return json.loads(prior)
except:
prior = []
settings.logger.warn("Message=No previous prior catalog health found from "
"ConsulURI={l}".format(l=key))
return prior
def getCheckTags(key):
try:
tags = settings.consul.kv[key]
settings.logger.debug("HealthCheckTags={tags}".format(
tags=tags))
return json.loads(tags)
except:
tags = []
settings.logger.warn("Message=Could not obtain system check tags from "
"ConsulURI={l}".format(l=key))
return tags
def createSession():
return settings.consul.session.create(ttl='10s', delay='0s', behavior='delete')
def getHash(currentState):
return hashlib.md5(str(currentState)).hexdigest()
def checkForKey(key):
return key in settings.consul.kv
def putKey(key, value):
try:
settings.consul.kv[key] = value
except:
pass
def acquireLock(key, session_id):
return settings.consul.kv.acquire_lock(key, session_id)
def releaseLock(key, session_id):
return settings.consul.kv.release_lock(key, session_id)
def getBlacklist(key):
try:
bl = settings.consul.kv[key]
settings.logger.debug("Key={k} Tags={t}".format(k=key, t=bl))
return json.loads(bl)
except KeyError:
settings.logger.warn("Message=Could not obtain node blacklist from "
"ConsulURI={location}".format(location=key))
return []
def createConsulHealthList(object_list):
"""
Creates a list of ConsulHealthStruct
"""
try:
object_list = [ConsulHealthStruct(**obj) for obj in object_list]
settings.logger.debug("ConsulHealthList={lo}".format(lo=object_list))
return object_list
except TypeError:
settings.logger.error("Message=createConsulHealthList failed, "
"object_list needs to be iterable")
raise
def getHashStateSet(object_list, state):
"""
Used to compare prior node state to current
"""
return set(
[hash(obj) for obj in object_list if obj.Status == state])
def getObjectListByState(object_list, state):
"""
Filter a list of ConsulHealtNodeStruct by state
States: passing,warning,critical,unknown
"""
return filter(
lambda obj: obj.Status == state, object_list)
def common_notifiers(obj, kv_tags_dictname, kv_dict):
keynames = set(kv_dict[kv_tags_dictname].keys())
obj_tags = set(obj.Tags)
common = keynames.intersection(obj_tags)
return common
def load_plugin(KV_LOCATION, tags_dictname=None):
# get request to 0.0.0.0:8500/v1/kv/notify/<plugin_name>
# which routes to consul master
plugin = json.loads(settings.consul.kv[KV_LOCATION])
# Convert Keys to lower case
plugin = _dict_keys_to_low(plugin)
if tags_dictname:
plugin[tags_dictname] = dict((key.lower(), value) for key,
value in plugin[tags_dictname].iteritems())
return plugin
def _dict_keys_to_low(dictionary):
dict_keys_lowercase = dict((key.lower(), value)
for key, value in dictionary.iteritems())
return dict_keys_lowercase
| 24.898089 | 84 | 0.651062 |
4b4c8fab786e0e826b5a76980dc69112b93e7255 | 5,613 | py | Python | DeepEBM/ESM/data.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 12 | 2020-05-23T10:02:12.000Z | 2021-03-25T19:54:00.000Z | DeepEBM/ESM/data.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 6 | 2021-03-19T15:30:28.000Z | 2022-03-12T00:51:16.000Z | DeepEBM/ESM/data.py | taufikxu/FD-ScoreMatching | 9df0789bb98bb798b3de57072f63ee4b2f19947f | [
"MIT"
] | 4 | 2020-11-04T03:52:45.000Z | 2021-12-28T16:07:08.000Z | import torch
import numpy as np
from torchvision import transforms, datasets
import PIL.Image
import torch.nn.functional as F
def inf_train_gen_imagenet(batch_size, flip=True, train=True, infinity=True):
if flip:
transf = transforms.Compose(
[
transforms.CenterCrop(128),
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
# transforms.Normalize((.5, .5, .5), (.5, .5, .5))
]
)
else:
transf = transforms.ToTensor()
if train is True:
split = "train"
else:
split = "val"
loader = torch.utils.data.DataLoader(
datasets.ImageNet("/home/LargeData/ImageNet/", split="train", transform=transf),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
yield img
else:
for img, labels in loader:
yield img
def inf_train_gen_cifar(batch_size, flip=True, train=True, infinity=True):
if flip:
transf = transforms.Compose(
[
transforms.RandomHorizontalFlip(p=0.5),
transforms.ToTensor(),
# transforms.Normalize((.5, .5, .5), (.5, .5, .5))
]
)
else:
transf = transforms.ToTensor()
loader = torch.utils.data.DataLoader(
datasets.CIFAR10(
"/home/LargeData/cifar/", train=train, download=True, transform=transf
),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
# print(img.shape)
yield img
else:
for img, labels in loader:
yield img
class NumpyImageDataset(torch.utils.data.Dataset):
def __init__(self, imgs, transform=None):
self.imgs = imgs
self.transform = transform
def __len__(self):
return len(self.imgs)
def __getitem__(self, index):
img = np.array(self.imgs[index])
# print(img.shape)
img = img.transpose([1, 2, 0])
img = PIL.Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
return img
def inf_train_gen_celeba(batch_size, flip=True, train=True, infinity=True):
if flip:
transf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]
)
else:
transf = transforms.ToTensor()
imgs = np.load("/home/LargeData/celebA_64x64.npy")
celeba_dataset = NumpyImageDataset(imgs, transf)
loader = torch.utils.data.DataLoader(
celeba_dataset, batch_size, drop_last=True, shuffle=True, num_workers=8
)
if infinity is True:
while True:
for img in loader:
yield img
else:
for img in loader:
yield img
def inf_train_gen_celeba32(batch_size, flip=True, train=True, infinity=True):
if flip:
transf = transforms.Compose(
[transforms.RandomHorizontalFlip(p=0.5), transforms.ToTensor()]
)
else:
transf = transforms.ToTensor()
imgs = np.load("/home/LargeData/celebA_64x64.npy")
celeba_dataset = NumpyImageDataset(imgs, transf)
loader = torch.utils.data.DataLoader(
celeba_dataset, batch_size, drop_last=True, shuffle=True, num_workers=8
)
if infinity is True:
while True:
for img in loader:
img = img[:, :, ::2, ::2]
yield img
else:
for img in loader:
img = img[:, :, ::2, ::2]
yield img
def inf_train_gen_mnist(batch_size, train=True, infinity=True):
transf = transforms.ToTensor()
loader = torch.utils.data.DataLoader(
datasets.MNIST(
"/home/LargeData/", train=train, download=True, transform=transf
),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
img = F.pad(img, [2, 2, 2, 2])
yield img
else:
for img, labels in loader:
img = F.pad(img, [2, 2, 2, 2])
yield img
def inf_train_gen_fashionmnist(batch_size, train=True, infinity=True):
transf = transforms.ToTensor()
loader = torch.utils.data.DataLoader(
datasets.FashionMNIST(
"/home/LargeData/", train=train, download=True, transform=transf
),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
img = F.pad(img, [2, 2, 2, 2])
yield img
else:
for img, labels in loader:
img = F.pad(img, [2, 2, 2, 2])
yield img
def inf_train_gen_svhn(batch_size, train=True, infinity=True):
split = "train" if train is True else "test"
transf = transforms.ToTensor()
loader = torch.utils.data.DataLoader(
datasets.SVHN(
"/home/LargeData/svhn", split=split, download=True, transform=transf
),
batch_size,
drop_last=True,
shuffle=True,
num_workers=8,
)
if infinity is True:
while True:
for img, labels in loader:
yield img
else:
for img, labels in loader:
yield img
| 26.601896 | 88 | 0.563157 |
20aeb7ac5c4b8fd70d09fd289d09318856df3c52 | 8,041 | py | Python | genomics/sequence.py | IC-Induja/genomics | 84bba3b54dc8f1f81c611b405070d10fc797ab0b | [
"MIT"
] | null | null | null | genomics/sequence.py | IC-Induja/genomics | 84bba3b54dc8f1f81c611b405070d10fc797ab0b | [
"MIT"
] | null | null | null | genomics/sequence.py | IC-Induja/genomics | 84bba3b54dc8f1f81c611b405070d10fc797ab0b | [
"MIT"
] | null | null | null | """ Contains the base Sequence class, along with its derivatives:
DNASequence, RNASequence, and AminoAcidSequence
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from .constants import CODON_TABLE
class Sequence(object):
"""Abstract sequence class.
# Properties
id (str): Identifier. Must be unique within a genome. If none is provided, uses a uuid.
id_type (str): Source of identifier.
data (str): Sequence contents.
start_index (int): Index representing first position in data. Default 1 (as 1-indexing is common in biology).
"""
# NOTE: HOW to handle sequences w/ multiple (2) strands? find/count/etc don't work...
# TODO: implement
def __init__(self, seq, **kwargs):
allowed_kwargs = {'id', 'id_type', 'index'}
raise NotImplementedError()
@classmethod
def from_file(filepath, format=None, **kwargs):
# NOTE: How should we handle multiline fasta? Ignore all but first sequence?
# return array of sequences? Throw error? Throw error and suggest using sequence set?
"""Reads sequence data from file.
Accepts a variety of biological formats (FASTA, Genbank, etc.).
Also can read a raw sequence from .txt
Accepted File Extentions:
- 2bit:
- FASTA: .fa, .fasta (if multiline FASTA, only first sequence is returned)
- Genbank: .gbk
- GFF: .gff, .gff3
- Nibble: .nib
- Plain Text: .txt
Note: Annotation data in the file is ignored when constructing a sequence.
# Arguments
filepath (str): Location of file (absolute or relative filepath, or a URL)
format (str): Format of file contents. If not provided, will be inferred form file extention.
# Returns
Sequence: sequence contained in file
"""
allowed_kwargs = {'accepted_chars', 'id', 'id_type', 'index'}
raise NotImplementedError()
# TODO: implement
@classmethod
def from_db(id, db="ncbi"):
"""Constructs a sequence by fetching data from a commonly used
biological database.
Supports NCBI, EMBL, and DDOJ databases.
# Arguments
# Returns
Sequence: Sequence retreived from database.
"""
raise NotImplementedError()
# TODO: implement
def __len__(self):
"""Sequence length.
# Returns
int: length
"""
return len(self.data)
# TODO: implement
def __repr__(self):
return "Sequence<id:" + self.id + " " + str(__len__(self)) + "bp " \
+ " " + self[0:20] + "..." + self[-20:-1] + ">"
# TODO: implement
# NOTE: should we allow for subsequences to keep their original indexing? or
# should we have all the sequence methods take a start and stop argument?
# (To specify what part of the sequence to be searching/ calculating in)
def get_subsequence(self, **kwargs):
# NOTE: what should we call our locations? Which one?: coordinate, position, loci, index
"""Gets subset of sequence.
Note: subsequence is inclusive of both start and end indices.
# Arguments
start (int): index of first subsequence element. Defaults to start of sequence.
stop (int): index of last subsequence element. Defaults to end of sequence.
reindex (bool): if true, indexing of sequence will start at 1. Otherwise,
indexing will be unchanged (ie. index of first element in sequence
will be it's current position in the sequence, and so on)
"""
allowed_kwargs = {'start', 'stop', 'reindex', 'index'}
raise NotImplementedError()
# TODO: implement
def find(self, query, strand=None):
"""locates query in sequence.
# Arguments
query (string): string to search for
# Returns
Region: location of first match on sequence
"""
raise NotImplementedError()
# TODO: implement
def count(self, query):
raise NotImplementedError()
# TODO: implement
def find_pattern(self, pattern, all=False):
raise NotImplementedError()
# TODO: implement
def count_pattern(self, pattern, include_overlapping=False):
raise NotImplementedError()
def compute_distance(self, regionA, regionB, fn="levhenstein"):
raise NotImplementedError()
def apply_variant(self, variant):
raise NotImplementedError()
def compute_sequence_distance(sequenceA, sequenceB, fn="levhenstein"):
raise NotImplementedError()
class DNASequence(Sequence):
"""Abstract sequence class.
# Properties
id (str): Identifier. Must be unique within a genome. If none is provided, uses a uuid.
id_type (str): Source of identifier.
data (str): Sequence contents.
start_index (int): Index representing first position in data. Default 1 (as 1-indexing is common in biology).
is_double_stranded (bool): Indicates whether sequence has complementary strand. Default True.
"""
# TODO: implement
def __init__(self, seq, **kwargs):
raise NotImplementedError()
# TODO: implement
def __repr__(self):
raise NotImplementedError()
# NOTE: see if find can find overlapping matches
# TODO: implement
def find(self, query, strand=None):
"""locates query in sequence.
# Arguments
query (string): string to search for
strand (string): One of {'+', '-'}. If provided, find will only search
the given strand.
# Returns
Region: location of first match on sequence
"""
raise NotImplementedError()
# TODO: implement
def count(self, query):
raise NotImplementedError()
# TODO: implement
def find_pattern(self, pattern, all=False):
raise NotImplementedError()
# TODO: implement
def count_pattern(self, pattern, include_overlapping=False):
raise NotImplementedError()
# TODO: implement
def get_dna(self, region):
raise NotImplementedError()
# TODO: implement
def get_rna(self, region): ## CDS??
raise NotImplementedError()
# TODO: implement
def get_aa(self, region, codon_table=CODON_TABLE):
if len(region) % 3 != 0:
raise ValueError('Length of region must be multiple of 3')
raise NotImplementedError()
# TODO: implement
def to_rna(self):
raise NotImplementedError()
def to_aa(self, codon_table=CODON_TABLE):
if len(self) % 3 != 0:
raise ValueError('Length of sequence must be multiple of 3')
raise NotImplementedError()
def get_genes(self):
raise NotImplementedError()
def get_transcripts(self):
raise NotImplementedError()
class RNASequence(Sequence):
"""
"""
def __init__(self, seq, **kwargs):
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
def get_dna(self, region):
raise NotImplementedError()
def get_rna(self, region):
raise NotImplementedError()
def get_amino_acids(self, region, codon_table=CODON_TABLE):
# consistent
if len(region) % 3 != 0:
raise ValueError('Length of region must be multiple of 3') # mention how many were left
raise NotImplementedError()
def to_dna(self):
raise NotImplementedError()
def to_aa(self, codon_table=CODON_TABLE):
if len(self) % 3 != 0:
raise ValueError('Length of sequence must be multiple of 3')
raise NotImplementedError()
class AminoAcidSequence(Sequence):
"""
"""
def __init__(self, seq, **kwargs):
raise NotImplementedError()
def __repr__(self):
raise NotImplementedError()
def get_aa(self, start, stop, index=1):
raise NotImplementedError()
def reverse_translate(self, output_type, **kwargs):
allowed_kwargs = {'codon_table', 'codon_usage'}
| 30.69084 | 113 | 0.64395 |
eb4a8865fd178ce8106c423fdcb3358c654ca6b3 | 10,807 | py | Python | salt/renderers/gpg.py | edusperoni/salt | c9bfb00c2a81a9d4734fa7d1aa80e893d5ef790b | [
"Apache-2.0"
] | 1 | 2017-06-26T18:14:56.000Z | 2017-06-26T18:14:56.000Z | salt/renderers/gpg.py | edusperoni/salt | c9bfb00c2a81a9d4734fa7d1aa80e893d5ef790b | [
"Apache-2.0"
] | 1 | 2015-10-05T22:03:10.000Z | 2015-10-05T22:03:10.000Z | salt/renderers/gpg.py | edusperoni/salt | c9bfb00c2a81a9d4734fa7d1aa80e893d5ef790b | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
r'''
Renderer that will decrypt GPG ciphers
Any key in the SLS file can be a GPG cipher, and this renderer will decrypt it
before passing it off to Salt. This allows you to safely store secrets in
source control, in such a way that only your Salt master can decrypt them and
distribute them only to the minions that need them.
The typical use-case would be to use ciphers in your pillar data, and keep a
secret key on your master. You can put the public key in source control so that
developers can add new secrets quickly and easily.
This renderer requires the gpg_ binary. No python libraries are required as of
the 2015.8.0 release.
.. _gpg: https://gnupg.org
Setup
-----
To set things up, first generate a keypair. On the master, run the following:
.. code-block:: bash
# mkdir -p /etc/salt/gpgkeys
# chmod 0700 /etc/salt/gpgkeys
# gpg --gen-key --homedir /etc/salt/gpgkeys
Do not supply a password for the keypair, and use a name that makes sense for
your application. Be sure to back up the ``gpgkeys`` directory someplace safe!
.. note::
Unfortunately, there are some scenarios - for example, on virtual machines
which don’t have real hardware - where insufficient entropy causes key
generation to be extremely slow. In these cases, there are usually means of
increasing the system entropy. On virtualised Linux systems, this can often
be achieved by installing the ``rng-tools`` package.
Export the Public Key
---------------------
.. code-block:: bash
# gpg --homedir /etc/salt/gpgkeys --armor --export <KEY-NAME> > exported_pubkey.gpg
Import the Public Key
---------------------
To encrypt secrets, copy the public key to your local machine and run:
.. code-block:: bash
$ gpg --import exported_pubkey.gpg
To generate a cipher from a secret:
.. code-block:: bash
$ echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r <KEY-name>
To apply the renderer on a file-by-file basis add the following line to the
top of any pillar with gpg data in it:
.. code-block:: yaml
#!yaml|gpg
Now with your renderer configured, you can include your ciphers in your pillar
data like so:
.. code-block:: yaml
#!yaml|gpg
a-secret: |
-----BEGIN PGP MESSAGE-----
Version: GnuPG v1
hQEMAweRHKaPCfNeAQf9GLTN16hCfXAbPwU6BbBK0unOc7i9/etGuVc5CyU9Q6um
QuetdvQVLFO/HkrC4lgeNQdM6D9E8PKonMlgJPyUvC8ggxhj0/IPFEKmrsnv2k6+
cnEfmVexS7o/U1VOVjoyUeliMCJlAz/30RXaME49Cpi6No2+vKD8a4q4nZN1UZcG
RhkhC0S22zNxOXQ38TBkmtJcqxnqT6YWKTUsjVubW3bVC+u2HGqJHu79wmwuN8tz
m4wBkfCAd8Eyo2jEnWQcM4TcXiF01XPL4z4g1/9AAxh+Q4d8RIRP4fbw7ct4nCJv
Gr9v2DTF7HNigIMl4ivMIn9fp+EZurJNiQskLgNbktJGAeEKYkqX5iCuB1b693hJ
FKlwHiJt5yA8X2dDtfk8/Ph1Jx2TwGS+lGjlZaNqp3R1xuAZzXzZMLyZDe5+i3RJ
skqmFTbOiA===Eqsm
-----END PGP MESSAGE-----
.. _encrypted-cli-pillar-data:
Encrypted CLI Pillar Data
-------------------------
.. versionadded:: 2016.3.0
Functions like :py:func:`state.highstate <salt.modules.state.highstate>` and
:py:func:`state.sls <salt.modules.state.sls>` allow for pillar data to be
passed on the CLI.
.. code-block:: bash
salt myminion state.highstate pillar="{'mypillar': 'foo'}"
Starting with the 2016.3.0 release of Salt, it is now possible for this pillar
data to be GPG-encrypted, and to use the GPG renderer to decrypt it.
Replacing Newlines
******************
To pass encrypted pillar data on the CLI, the ciphertext must have its newlines
replaced with a literal backslash-n (``\n``), as newlines are not supported
within Salt CLI arguments. There are a number of ways to do this:
With awk or Perl:
.. code-block:: bash
# awk
ciphertext=`echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r user@domain.com | awk '{printf "%s\\n",$0} END {print ""}'`
# Perl
ciphertext=`echo -n "supersecret" | gpg --armor --batch --trust-model always --encrypt -r user@domain.com | perl -pe 's/\n/\\n/g'`
With Python:
.. code-block:: python
import subprocess
secret, stderr = subprocess.Popen(
['gpg', '--armor', '--batch', '--trust-model', 'always', '--encrypt',
'-r', 'user@domain.com'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate(input='supersecret')
if secret:
print(secret.replace('\n', r'\n'))
else:
raise ValueError('No ciphertext found: {0}'.format(stderr))
.. code-block:: bash
ciphertext=`python /path/to/script.py`
The ciphertext can be included in the CLI pillar data like so:
.. code-block:: bash
salt myminion state.sls secretstuff pillar_enc=gpg pillar="{secret_pillar: '$ciphertext'}"
The ``pillar_enc=gpg`` argument tells Salt that there is GPG-encrypted pillar
data, so that the CLI pillar data is passed through the GPG renderer, which
will iterate recursively though the CLI pillar dictionary to decrypt any
encrypted values.
Encrypting the Entire CLI Pillar Dictionary
*******************************************
If several values need to be encrypted, it may be more convenient to encrypt
the entire CLI pillar dictionary. Again, this can be done in several ways:
With awk or Perl:
.. code-block:: bash
# awk
ciphertext=`echo -n "{'secret_a': 'CorrectHorseBatteryStaple', 'secret_b': 'GPG is fun!'}" | gpg --armor --batch --trust-model always --encrypt -r user@domain.com | awk '{printf "%s\\n",$0} END {print ""}'`
# Perl
ciphertext=`echo -n "{'secret_a': 'CorrectHorseBatteryStaple', 'secret_b': 'GPG is fun!'}" | gpg --armor --batch --trust-model always --encrypt -r user@domain.com | perl -pe 's/\n/\\n/g'`
With Python:
.. code-block:: python
import subprocess
pillar_data = {'secret_a': 'CorrectHorseBatteryStaple',
'secret_b': 'GPG is fun!'}
secret, stderr = subprocess.Popen(
['gpg', '--armor', '--batch', '--trust-model', 'always', '--encrypt',
'-r', 'user@domain.com'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate(input=repr(pillar_data))
if secret:
print(secret.replace('\n', r'\n'))
else:
raise ValueError('No ciphertext found: {0}'.format(stderr))
.. code-block:: bash
ciphertext=`python /path/to/script.py`
With the entire pillar dictionary now encrypted, it can be included in the CLI
pillar data like so:
.. code-block:: bash
salt myminion state.sls secretstuff pillar_enc=gpg pillar="$ciphertext"
'''
# Import python libs
from __future__ import absolute_import, print_function, unicode_literals
import os
import re
import logging
from subprocess import Popen, PIPE
# Import salt libs
import salt.utils.path
import salt.utils.stringio
import salt.syspaths
from salt.exceptions import SaltRenderError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
GPG_CIPHERTEXT = re.compile(
r'-----BEGIN PGP MESSAGE-----.*?-----END PGP MESSAGE-----', re.DOTALL)
def _get_gpg_exec():
'''
return the GPG executable or raise an error
'''
gpg_exec = salt.utils.path.which('gpg')
if gpg_exec:
return gpg_exec
else:
raise SaltRenderError('GPG unavailable')
def _get_key_dir():
'''
return the location of the GPG key directory
'''
gpg_keydir = None
if 'config.get' in __salt__:
gpg_keydir = __salt__['config.get']('gpg_keydir')
if not gpg_keydir:
gpg_keydir = __opts__.get(
'gpg_keydir',
os.path.join(
__opts__.get(
'config_dir',
os.path.dirname(__opts__['conf_file']),
),
'gpgkeys'
))
return gpg_keydir
def _decrypt_ciphertext(cipher):
'''
Given a block of ciphertext as a string, and a gpg object, try to decrypt
the cipher and return the decrypted string. If the cipher cannot be
decrypted, log the error, and return the ciphertext back out.
'''
if six.PY3:
cipher = cipher.encode(__salt_system_encoding__)
cmd = [_get_gpg_exec(), '--homedir', _get_key_dir(), '--status-fd', '2',
'--no-tty', '-d']
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=False)
decrypted_data, decrypt_error = proc.communicate(input=cipher)
if not decrypted_data:
if six.PY3:
cipher = cipher.decode(__salt_system_encoding__)
log.warning(
'Could not decrypt cipher %s, received: %s',
cipher,
decrypt_error
)
return cipher
else:
if six.PY3 and isinstance(decrypted_data, bytes):
decrypted_data = decrypted_data.decode(__salt_system_encoding__)
return six.text_type(decrypted_data)
def _decrypt_ciphertexts(cipher, translate_newlines=False):
if translate_newlines:
cipher = cipher.replace(r'\n', '\n')
ret, num = GPG_CIPHERTEXT.subn(lambda m: _decrypt_ciphertext(m.group()), cipher)
if num > 0:
# Remove trailing newlines. Without if crypted value initially specified as a YAML multiline
# it will conain unexpected trailing newline.
return ret.rstrip('\n')
else:
# Possibly just encrypted data without begin/end marks
return _decrypt_ciphertext(cipher)
def _decrypt_object(obj, translate_newlines=False):
'''
Recursively try to decrypt any object. If the object is a six.string_types
(string or unicode), and it contains a valid GPG header, decrypt it,
otherwise keep going until a string is found.
'''
if salt.utils.stringio.is_readable(obj):
return _decrypt_object(obj.getvalue(), translate_newlines)
if isinstance(obj, six.string_types):
return _decrypt_ciphertexts(obj, translate_newlines=translate_newlines)
elif isinstance(obj, dict):
for key, value in six.iteritems(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
elif isinstance(obj, list):
for key, value in enumerate(obj):
obj[key] = _decrypt_object(value,
translate_newlines=translate_newlines)
return obj
else:
return obj
def render(gpg_data, saltenv='base', sls='', argline='', **kwargs):
'''
Create a gpg object given a gpg_keydir, and then use it to try to decrypt
the data to be rendered.
'''
if not _get_gpg_exec():
raise SaltRenderError('GPG unavailable')
log.debug('Reading GPG keys from: %s', _get_key_dir())
translate_newlines = kwargs.get('translate_newlines', False)
return _decrypt_object(gpg_data, translate_newlines=translate_newlines)
| 31.692082 | 210 | 0.67373 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.