hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfdf1b07a4c2d38b9a7262a8c7774fc21e0d31e
| 3,370
|
py
|
Python
|
src/btengine/simulationfunctions.py
|
wthamisupposedtowritethere/Simple-Backtest-Environment
|
4a17fda4e4206a1cfc5f65a4a710a1b8a2578260
|
[
"MIT"
] | null | null | null |
src/btengine/simulationfunctions.py
|
wthamisupposedtowritethere/Simple-Backtest-Environment
|
4a17fda4e4206a1cfc5f65a4a710a1b8a2578260
|
[
"MIT"
] | null | null | null |
src/btengine/simulationfunctions.py
|
wthamisupposedtowritethere/Simple-Backtest-Environment
|
4a17fda4e4206a1cfc5f65a4a710a1b8a2578260
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun May 30 20:10:11 2021
This script contains functions used to perform the simulation.
@author: Anthony
@project: Systematic strategies in the context of cryptocurrencies trading.
@subproject: Backtesting Engine
@version: 1.0.0
CHANGELOG:
1.0.0
- File created with main functions
This script requires that `pandas`, `numpy`, `scipy.stats` be installed within
the Python environment you are running this script in.
This file can also be imported as a module and contains the following
methods:
* SelectionRules - Save an object in pickle format at the desired path.
THIS FILE IS PROTECTED BY GNU General Public License v3.0
ANY INFRINGEMENT TO THE LICENSE MIGHT AND WILL RESULT IN LEGAL ACTIONS.
"""
import numpy as np
import pandas as pd
from scipy.stats import norm
def get_drift(data, return_type='log'):
if return_type=='log':
lr = np.log(1+data.pct_change())
elif return_type=='simple':
lr = (data/data.shift(1))-1
else:
raise NotImplementedError("[-] The type " + return_type + " has not been implemented yet.")
# Mu - Var / 2
drift = lr.mean() - lr.var() / 2
try:
return drift.values
except:
return drift
def daily_returns(data, days, iterations, return_type='log', vol_multiplier = 1):
ft = get_drift(data, return_type)
# Computes volatility
if return_type == 'log':
try:
stv = np.log(1+data.pct_change()).std().values * vol_multiplier
except:
stv = np.log(1+data.pct_change()).std() * vol_multiplier
elif return_type=='simple':
try:
stv = ((data/data.shift(1))-1).std().values * vol_multiplier
except:
stv = ((data/data.shift(1))-1).std() * vol_multiplier
# Drifted normal distribution / Cauchy distribution
dr = np.exp(ft + stv * norm.ppf(np.random.rand(days, iterations)))
return dr
def simulate(data, days, iterations, return_type='log', vol_multiplier = 1):
"""
Simulates
"""
# Generate daily returns
returns = daily_returns(data, days, iterations, return_type, vol_multiplier)
# Create empty matrix
price_list = np.zeros_like(returns)
# Put the last actual price in the first row of matrix.
price_list[0] = data.iloc[-1]
# Calculate the price of each day
for t in range(1, days):
price_list[t] = price_list[t-1] * returns[t]
return pd.DataFrame(price_list)
"""
def monte_carlo(tickers, data, days_forecast, iterations, start_date = '2000-1-1', return_type = 'log', vol_multiplier = 1):
simulations = {}
indices = pd.date_range(returns.index[-1] + timedelta(1), returns.index[-1] + timedelta(days_to_forecast * 2), freq=BDay())[:days_to_forecast + 1]
for t in tqdm(range(len(tickers))):
y = simulate(data.iloc[:,t], (days_forecast+1), iterations, return_type, vol_multiplier = 1)
y.index = indices
simulations[tickers[t]] = y
return simulations
ret_sim_df = monte_carlo(returns.columns, returns, days_forecast= days_to_forecast, iterations=simulation_trials, start_date=start)
"""
| 30.36036
| 155
| 0.626113
|
acfdf270c9b2d6ea3c9c284708cf77a60e968634
| 7,737
|
py
|
Python
|
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_SetApMacAddressControlMode_WhitelistFilter.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_SetApMacAddressControlMode_WhitelistFilter.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
testscripts/RDKB/component/WIFIHAL/TS_WIFIHAL_SetApMacAddressControlMode_WhitelistFilter.py
|
cablelabs/tools-tdkb
|
1fd5af0f6b23ce6614a4cfcbbaec4dde430fad69
|
[
"Apache-2.0"
] | null | null | null |
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2018 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
'''
<?xml version="1.0" encoding="UTF-8"?><xml>
<id/>
<version>2</version>
<name>TS_WIFIHAL_SetApMacAddressControlMode_WhitelistFilter</name>
<primitive_test_id/>
<primitive_test_name>WIFIHAL_GetOrSetParamIntValue</primitive_test_name>
<primitive_test_version>3</primitive_test_version>
<status>FREE</status>
<synopsis>To set and get the mac address filter control mode with filter as white list</synopsis>
<groups_id/>
<execution_time>1</execution_time>
<long_duration>false</long_duration>
<advanced_script>false</advanced_script>
<remarks/>
<skip>false</skip>
<box_types>
<box_type>Broadband</box_type>
</box_types>
<rdk_versions>
<rdk_version>RDKB</rdk_version>
</rdk_versions>
<test_cases>
<test_case_id>TC_WIFIHAL_176</test_case_id>
<test_objective>To set and get the mac address filter control mode with filter as white list</test_objective>
<test_type>Positive</test_type>
<test_setup>Broadband</test_setup>
<pre_requisite>1.Ccsp Components should be in a running state else invoke cosa_start.sh manually that includes all the ccsp components and TDK Component
2.TDK Agent should be in running state or invoke it through StartTdk.sh script</pre_requisite>
<api_or_interface_used>wifi_getApMacAddressControlMode()
wifi_setApMacAddressControlMode()</api_or_interface_used>
<input_parameters>methodName : getApMacAddressControlMode
methodName : setApMacAddressControlMode
ApIndex : 0 and 1
filterMode = 1</input_parameters>
<automation_approch>1. Load wifihal module
2. Using WIFIHAL_GetOrSetParamIntValue invoke wifi_getApMacAddressControlMode() and save the get value
3. Using WIFIHAL_GetOrSetParamIntValue invoke wifi_setApMacAddressControlMode() and set filtermode as 1(white list)
4. Invoke wifi_getApMacAddressControlMode() to get the previously set value.
5. Compare the above two results. If the two values are same return SUCCESS else return FAILURE
6. Revert the MacAddressControlMode back to initial value
7. Unload wifihal module</automation_approch>
<except_output>Set and get values of MacAddressControlMode should be the same</except_output>
<priority>High</priority>
<test_stub_interface>WIFIHAL</test_stub_interface>
<test_script>TS_WIFIHAL_SetApMacAddressControlMode_WhitelistFilter</test_script>
<skipped>No</skipped>
<release_version/>
<remarks/>
</test_cases>
<script_tags/>
</xml>
'''
# use tdklib library,which provides a wrapper for tdk testcase script
import tdklib;
from wifiUtility import *;
#Test component to be tested
obj = tdklib.TDKScriptingLibrary("wifihal","1");
#IP and Port of box, No need to change,
#This will be replaced with correspoing Box Ip and port while executing script
ip = <ipaddress>
port = <port>
obj.configureTestCase(ip,port,'TS_WIFIHAL_2.4GHzSetApMacAddressControlMode_WhitelistFilter');
loadmodulestatus =obj.getLoadModuleResult();
print "[LIB LOAD STATUS] : %s" %loadmodulestatus
if "SUCCESS" in loadmodulestatus.upper():
obj.setLoadModuleStatus("SUCCESS");
for apIndex in range(0,2):
expectedresult="SUCCESS";
getMethod = "getApMacAddressControlMode"
primitive = 'WIFIHAL_GetOrSetParamIntValue'
#Calling the method to execute wifi_getApMacAddressControlMode()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, 0, getMethod)
if expectedresult in actualresult:
initMode = details.split(":")[1].strip()
expectedresult="SUCCESS";
setMethod = "setApMacAddressControlMode"
primitive = 'WIFIHAL_GetOrSetParamIntValue'
#0 == filter disabled, 1 == filter as whitelist, 2 == filter as blacklist
setMode = 1
#Calling the method to execute wifi_setApMacAddressControlMode()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, setMode, setMethod)
if expectedresult in actualresult:
expectedresult="SUCCESS";
getMethod = "getApMacAddressControlMode"
primitive = 'WIFIHAL_GetOrSetParamIntValue'
#Calling the method to execute wifi_getApMacAddressControlMode()
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, 0, getMethod)
if expectedresult in actualresult:
finalMode = details.split(":")[1].strip()
if int(finalMode) == setMode:
print "TEST STEP: Setting the MacAddress filter ControlMode with filter as whitelist for apIndex %s"%apIndex
print "EXPECTED RESULT: Set and get values should be the same"
print "ACTUAL RESULT : Set and get values are the same"
print "Set value: %s"%setMode
print "Get value: %s"%finalMode
print "TEST EXECUTION RESULT :SUCCESS"
tdkTestObj.setResultStatus("SUCCESS");
else:
print "TEST STEP: Setting the MacAddress filter ControlMode filter as whitelist for apIndex %s"%apIndex
print "EXPECTED RESULT: Set and get values should be the same"
print "ACTUAL RESULT : Set and get values are NOT the same"
print "Set value: %s"%setMode
print "Get value: %s"%finalMode
print "TEST EXECUTION RESULT :FAILURE"
tdkTestObj.setResultStatus("FAILURE");
#Revert back to initial value
setMethod = "setApMacAddressControlMode"
primitive = 'WIFIHAL_GetOrSetParamIntValue'
setMode = int(initMode)
tdkTestObj, actualresult, details = ExecuteWIFIHalCallMethod(obj, primitive, apIndex, setMode, setMethod)
if expectedresult in actualresult:
tdkTestObj.setResultStatus("SUCCESS");
print "Successfully reverted back to inital value"
else:
tdkTestObj.setResultStatus("FAILURE");
print "Unable to revert to initial value"
else:
tdkTestObj.setResultStatus("FAILURE");
print "getApMacAddressControlMode() function call failed after set operation"
else:
tdkTestObj.setResultStatus("FAILURE");
print "setApMacAddressControlMode() function call failed"
else:
tdkTestObj.setResultStatus("FAILURE");
print "getApMacAddressControlMode() function call failed"
obj.unloadModule("wifihal");
else:
print "Failed to load wifi module";
obj.setLoadModuleStatus("FAILURE");
| 46.608434
| 157
| 0.669381
|
acfdf2d5560d3ad221f524f04b7522504b29381e
| 91,706
|
py
|
Python
|
venv/Lib/site-packages/astropy/table/tests/test_table.py
|
KwanYu/Airbnb-Backend
|
61b4c89f891378181447fc251fa0d1c2c5f435de
|
[
"MIT"
] | 2
|
2020-08-25T13:55:00.000Z
|
2020-08-25T16:36:03.000Z
|
downloadable-site-packages/astropy/table/tests/test_table.py
|
ProjectZeroDays/Pyto
|
d5d77f3541f329bbb28142d18606b22f115b7df6
|
[
"MIT"
] | null | null | null |
downloadable-site-packages/astropy/table/tests/test_table.py
|
ProjectZeroDays/Pyto
|
d5d77f3541f329bbb28142d18606b22f115b7df6
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import gc
import sys
import copy
from io import StringIO
from collections import OrderedDict
import pytest
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from astropy.io import fits
from astropy.table import Table, QTable, MaskedColumn, TableReplaceWarning
from astropy.tests.helper import (assert_follows_unicode_guidelines,
ignore_warnings, catch_warnings)
from astropy.coordinates import SkyCoord
from astropy.utils.data import get_pkg_data_filename
from astropy import table
from astropy import units as u
from astropy.time import Time, TimeDelta
from .conftest import MaskedTable, MIXIN_COLS
try:
with ignore_warnings(DeprecationWarning):
# Ignore DeprecationWarning on pandas import in Python 3.5--see
# https://github.com/astropy/astropy/issues/4380
import pandas # pylint: disable=W0611
except ImportError:
HAS_PANDAS = False
else:
HAS_PANDAS = True
class SetupData:
def _setup(self, table_types):
self._table_type = table_types.Table
self._column_type = table_types.Column
@property
def a(self):
if self._column_type is not None:
if not hasattr(self, '_a'):
self._a = self._column_type(
[1, 2, 3], name='a', format='%d',
meta={'aa': [0, 1, 2, 3, 4]})
return self._a
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(
[4, 5, 6], name='b', format='%d', meta={'aa': 1})
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type([7, 8, 9], 'c')
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type([7, 8, 7], 'd')
return self._d
@property
def obj(self):
if self._column_type is not None:
if not hasattr(self, '_obj'):
self._obj = self._column_type([1, 'string', 3], 'obj', dtype='O')
return self._obj
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b])
return self._t
@pytest.mark.usefixtures('table_types')
class TestSetTableColumn(SetupData):
def test_set_row(self, table_types):
"""Set a row from a tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[1] = (20, 21)
assert t['a'][0] == 1
assert t['a'][1] == 20
assert t['a'][2] == 3
assert t['b'][0] == 4
assert t['b'][1] == 21
assert t['b'][2] == 6
def test_set_row_existing(self, table_types):
"""Set a row from another existing row"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t[0] = t[1]
assert t[0][0] == 2
assert t[0][1] == 5
def test_set_row_fail_1(self, table_types):
"""Set a row from an incorrectly-sized or typed set of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = (20, 21, 22)
with pytest.raises(ValueError):
t[1] = 0
def test_set_row_fail_2(self, table_types):
"""Set a row from an incorrectly-typed tuple of values"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError):
t[1] = ('abc', 'def')
def test_set_new_col_new_table(self, table_types):
"""Create a new column in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = self.a
# Test that the new column name is 'aa' and that the values match
assert np.all(t['aa'] == self.a)
assert t.colnames == ['aa']
def test_set_new_col_new_table_quantity(self, table_types):
"""Create a new column (from a quantity) in empty table using the item access syntax"""
self._setup(table_types)
t = table_types.Table()
t['aa'] = np.array([1, 2, 3]) * u.m
assert np.all(t['aa'] == np.array([1, 2, 3]))
assert t['aa'].unit == u.m
t['bb'] = 3 * u.m
assert np.all(t['bb'] == 3)
assert t['bb'].unit == u.m
def test_set_new_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Add a column
t['bb'] = self.b
assert np.all(t['bb'] == self.b)
assert t.colnames == ['a', 'bb']
assert t['bb'].meta == self.b.meta
assert t['bb'].format == self.b.format
# Add another column
t['c'] = t['a']
assert np.all(t['c'] == t['a'])
assert t.colnames == ['a', 'bb', 'c']
assert t['c'].meta == t['a'].meta
assert t['c'].format == t['a'].format
# Add a multi-dimensional column
t['d'] = table_types.Column(np.arange(12).reshape(3, 2, 2))
assert t['d'].shape == (3, 2, 2)
assert t['d'][0, 0, 1] == 1
# Add column from a list
t['e'] = ['hello', 'the', 'world']
assert np.all(t['e'] == np.array(['hello', 'the', 'world']))
# Make sure setting existing column still works
t['e'] = ['world', 'hello', 'the']
assert np.all(t['e'] == np.array(['world', 'hello', 'the']))
# Add a column via broadcasting
t['f'] = 10
assert np.all(t['f'] == 10)
# Add a column from a Quantity
t['g'] = np.array([1, 2, 3]) * u.m
assert np.all(t['g'].data == np.array([1, 2, 3]))
assert t['g'].unit == u.m
# Add a column from a (scalar) Quantity
t['g'] = 3 * u.m
assert np.all(t['g'].data == 3)
assert t['g'].unit == u.m
def test_set_new_unmasked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.Column(name='b', data=[1, 2, 3]) # unmasked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_masked_col_existing_table(self, table_types):
"""Create a new column in an existing table using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a]) # masked or unmasked
b = table.MaskedColumn(name='b', data=[1, 2, 3]) # masked
t['b'] = b
assert np.all(t['b'] == b)
def test_set_new_col_existing_table_fail(self, table_types):
"""Generate failure when creating a new column using the item access syntax"""
self._setup(table_types)
t = table_types.Table([self.a])
# Wrong size
with pytest.raises(ValueError):
t['b'] = [1, 2]
@pytest.mark.usefixtures('table_types')
class TestEmptyData():
def test_1(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, length=100))
assert len(t['a']) == 100
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, ), length=100))
assert len(t['a']) == 100
def test_3(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int))
assert len(t['a']) == 0
def test_4(self, table_types):
t = table_types.Table() # length is not given
t.add_column(table_types.Column(name='a', dtype=int, shape=(3, 4)))
assert len(t['a']) == 0
def test_5(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a')) # dtype is not specified
assert len(t['a']) == 0
def test_add_via_setitem_and_slice(self, table_types):
"""Test related to #3023 where a MaskedColumn is created with name=None
and then gets changed to name='a'. After PR #2790 this test fails
without the #3023 fix."""
t = table_types.Table()
t['a'] = table_types.Column([1, 2, 3])
t2 = t[:]
assert t2.colnames == t.colnames
@pytest.mark.usefixtures('table_types')
class TestNewFromColumns():
def test_simple(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6], dtype=np.float32)]
t = table_types.Table(cols)
assert np.all(t['a'].data == np.array([1, 2, 3]))
assert np.all(t['b'].data == np.array([4, 5, 6], dtype=np.float32))
assert type(t['b'][1]) is np.float32
def test_from_np_array(self, table_types):
cols = [table_types.Column(name='a', data=np.array([1, 2, 3], dtype=np.int64),
dtype=np.float64),
table_types.Column(name='b', data=np.array([4, 5, 6], dtype=np.float32))]
t = table_types.Table(cols)
assert np.all(t['a'] == np.array([1, 2, 3], dtype=np.float64))
assert np.all(t['b'] == np.array([4, 5, 6], dtype=np.float32))
assert type(t['a'][1]) is np.float64
assert type(t['b'][1]) is np.float32
def test_size_mismatch(self, table_types):
cols = [table_types.Column(name='a', data=[1, 2, 3]),
table_types.Column(name='b', data=[4, 5, 6, 7])]
with pytest.raises(ValueError):
table_types.Table(cols)
def test_name_none(self, table_types):
"""Column with name=None can init a table whether or not names are supplied"""
c = table_types.Column(data=[1, 2], name='c')
d = table_types.Column(data=[3, 4])
t = table_types.Table([c, d], names=(None, 'd'))
assert t.colnames == ['c', 'd']
t = table_types.Table([c, d])
assert t.colnames == ['c', 'col1']
@pytest.mark.usefixtures('table_types')
class TestReverse():
def test_reverse(self, table_types):
t = table_types.Table([[1, 2, 3],
['a', 'b', 'cc']])
t.reverse()
assert np.all(t['col0'] == np.array([3, 2, 1]))
assert np.all(t['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=False)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2 = table_types.Table(t, copy=True)
assert np.all(t2['col0'] == np.array([3, 2, 1]))
assert np.all(t2['col1'] == np.array(['cc', 'b', 'a']))
t2.sort('col0')
assert np.all(t2['col0'] == np.array([1, 2, 3]))
assert np.all(t2['col1'] == np.array(['a', 'b', 'cc']))
def test_reverse_big(self, table_types):
x = np.arange(10000)
y = x + 1
t = table_types.Table([x, y], names=('x', 'y'))
t.reverse()
assert np.all(t['x'] == x[::-1])
assert np.all(t['y'] == y[::-1])
def test_reverse_mixin(self):
"""Test reverse for a mixin with no item assignment, fix for #9836"""
sc = SkyCoord([1, 2], [3, 4], unit='deg')
t = Table([[2, 1], sc], names=['a', 'sc'])
t.reverse()
assert np.all(t['a'] == [1, 2])
assert np.allclose(t['sc'].ra.to_value('deg'), [2, 1])
@pytest.mark.usefixtures('table_types')
class TestColumnAccess():
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t['a']
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[1, 2, 3]))
assert np.all(t['a'] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t['b'] # column does not exist
def test_itercols(self, table_types):
names = ['a', 'b', 'c']
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures('table_types')
class TestAddLength(SetupData):
def test_right_length(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b)
def test_too_long(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5, 6, 7])) # data too long
def test_too_short(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='b', data=[4, 5])) # data too short
@pytest.mark.usefixtures('table_types')
class TestAddPosition(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 0)
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, 1)
def test_3(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a, -1)
def test_5(self, table_types):
self._setup(table_types)
t = table_types.Table()
with pytest.raises(ValueError):
t.index_column('b')
def test_6(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b)
assert t.columns.keys() == ['a', 'b']
def test_7(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a'))
assert t.columns.keys() == ['b', 'a']
def test_8(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.add_column(self.b, t.index_column('a') + 1)
assert t.columns.keys() == ['a', 'b']
def test_9(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
t.add_column(self.b, t.index_column('a') + 1)
t.add_column(self.c, t.index_column('b'))
assert t.columns.keys() == ['a', 'c', 'b']
def test_10(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
ia = t.index_column('a')
t.add_column(self.b, ia + 1)
t.add_column(self.c, ia)
assert t.columns.keys() == ['c', 'a', 'b']
@pytest.mark.usefixtures('table_types')
class TestAddName(SetupData):
def test_override_name(self, table_types):
self._setup(table_types)
t = table_types.Table()
# Check that we can override the name of the input column in the Table
t.add_column(self.a, name='b')
t.add_column(self.b, name='a')
assert t.columns.keys() == ['b', 'a']
# Check that we did not change the name of the input column
assert self.a.info.name == 'a'
assert self.b.info.name == 'b'
# Now test with an input column from another table
t2 = table_types.Table()
t2.add_column(t['a'], name='c')
assert t2.columns.keys() == ['c']
# Check that we did not change the name of the input column
assert t.columns.keys() == ['b', 'a']
# Check that we can give a name if none was present
col = table_types.Column([1, 2, 3])
t.add_column(col, name='c')
assert t.columns.keys() == ['b', 'a', 'c']
def test_default_name(self, table_types):
t = table_types.Table()
col = table_types.Column([1, 2, 3])
t.add_column(col)
assert t.columns.keys() == ['col0']
@pytest.mark.usefixtures('table_types')
class TestInitFromTable(SetupData):
def test_from_table_cols(self, table_types):
"""Ensure that using cols from an existing table gives
a clean copy.
"""
self._setup(table_types)
t = self.t
cols = t.columns
# Construct Table with cols via Table._new_from_cols
t2a = table_types.Table([cols['a'], cols['b'], self.c])
# Construct with add_column
t2b = table_types.Table()
t2b.add_column(cols['a'])
t2b.add_column(cols['b'])
t2b.add_column(self.c)
t['a'][1] = 20
t['b'][1] = 21
for t2 in [t2a, t2b]:
t2['a'][2] = 10
t2['b'][2] = 11
t2['c'][2] = 12
t2.columns['a'].meta['aa'][3] = 10
assert np.all(t['a'] == np.array([1, 20, 3]))
assert np.all(t['b'] == np.array([4, 21, 6]))
assert np.all(t2['a'] == np.array([1, 2, 10]))
assert np.all(t2['b'] == np.array([4, 5, 11]))
assert np.all(t2['c'] == np.array([7, 8, 12]))
assert t2['a'].name == 'a'
assert t2.columns['a'].meta['aa'][3] == 10
assert t.columns['a'].meta['aa'][3] == 3
@pytest.mark.usefixtures('table_types')
class TestAddColumns(SetupData):
def test_add_columns1(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c])
assert t.colnames == ['a', 'b', 'c']
def test_add_columns2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d])
assert t.colnames == ['a', 'b', 'c', 'd']
assert np.all(t['c'] == np.array([7, 8, 9]))
def test_add_columns3(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[1, 0])
assert t.colnames == ['d', 'a', 'c', 'b']
def test_add_columns4(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[0, 0])
assert t.colnames == ['c', 'd', 'a', 'b']
def test_add_columns5(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_columns([self.c, self.d], indexes=[2, 2])
assert t.colnames == ['a', 'b', 'c', 'd']
def test_add_columns6(self, table_types):
"""Check that we can override column names."""
self._setup(table_types)
t = table_types.Table()
t.add_columns([self.a, self.b, self.c], names=['b', 'c', 'a'])
assert t.colnames == ['b', 'c', 'a']
def test_add_columns7(self, table_types):
"""Check that default names are used when appropriate."""
t = table_types.Table()
col0 = table_types.Column([1, 2, 3])
col1 = table_types.Column([4, 5, 3])
t.add_columns([col0, col1])
assert t.colnames == ['col0', 'col1']
def test_add_duplicate_column(self, table_types):
self._setup(table_types)
t = table_types.Table()
t.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(table_types.Column(name='a', data=[0, 1, 2]))
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
t.add_column(self.b)
t.add_column(self.c)
assert t.colnames == ['a', 'a_1', 'b', 'c']
t.add_column(table_types.Column(name='a', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2']
# test adding column from a separate Table
t1 = table_types.Table()
t1.add_column(self.a)
with pytest.raises(ValueError):
t.add_column(t1['a'])
t.add_column(t1['a'], rename_duplicate=True)
t1['a'][0] = 100 # Change original column
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3']
assert t1.colnames == ['a']
# Check new column didn't change (since name conflict forced a copy)
assert t['a_3'][0] == self.a[0]
# Check that rename_duplicate=True is ok if there are no duplicates
t.add_column(table_types.Column(name='q', data=[0, 1, 2]),
rename_duplicate=True)
assert t.colnames == ['a', 'a_1', 'b', 'c', 'a_2', 'a_3', 'q']
def test_add_duplicate_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
with pytest.raises(ValueError):
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]), table_types.Column(name='b', data=[0, 1, 2])])
t.add_columns([table_types.Column(name='a', data=[0, 1, 2]),
table_types.Column(name='b', data=[0, 1, 2])],
rename_duplicate=True)
t.add_column(self.d)
assert t.colnames == ['a', 'b', 'c', 'a_1', 'b_1', 'd']
@pytest.mark.usefixtures('table_types')
class TestAddRow(SetupData):
@property
def b(self):
if self._column_type is not None:
if not hasattr(self, '_b'):
self._b = self._column_type(name='b', data=[4.0, 5.1, 6.2])
return self._b
@property
def c(self):
if self._column_type is not None:
if not hasattr(self, '_c'):
self._c = self._column_type(name='c', data=['7', '8', '9'])
return self._c
@property
def d(self):
if self._column_type is not None:
if not hasattr(self, '_d'):
self._d = self._column_type(name='d', data=[[1, 2], [3, 4], [5, 6]])
return self._d
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a, self.b, self.c])
return self._t
def test_add_none_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'c'), dtype=('(2,)i', 'S4', 'O'))
t.add_row()
assert np.all(t['a'][0] == [0, 0])
assert t['b'][0] == ''
assert t['c'][0] == 0
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['c'][1] == 0
def test_add_stuff_to_empty_table(self, table_types):
self._setup(table_types)
t = table_types.Table(names=('a', 'b', 'obj'), dtype=('(2,)i', 'S8', 'O'))
t.add_row([[1, 2], 'hello', 'world'])
assert np.all(t['a'][0] == [1, 2])
assert t['b'][0] == 'hello'
assert t['obj'][0] == 'world'
# Make sure it is not repeating last row but instead
# adding zeros (as documented)
t.add_row()
assert np.all(t['a'][1] == [0, 0])
assert t['b'][1] == ''
assert t['obj'][1] == 0
def test_add_table_row(self, table_types):
self._setup(table_types)
t = self.t
t['d'] = self.d
t2 = table_types.Table([self.a, self.b, self.c, self.d])
t.add_row(t2[0])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
assert np.all(t['d'] == np.array([[1, 2], [3, 4], [5, 6], [1, 2]]))
def test_add_table_row_obj(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.obj])
t.add_row([1, 4.0, [10]])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 1]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 4.0]))
assert np.all(t['obj'] == np.array([1, 'string', 3, [10]], dtype='O'))
def test_add_qtable_row_multidimensional(self):
q = [[1, 2], [3, 4]] * u.m
qt = table.QTable([q])
qt.add_row(([5, 6] * u.km,))
assert np.all(qt['col0'] == [[1, 2], [3, 4], [5000, 6000]] * u.m)
def test_add_with_tuple(self, table_types):
self._setup(table_types)
t = self.t
t.add_row((4, 7.2, '1'))
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '1']))
def test_add_with_list(self, table_types):
self._setup(table_types)
t = self.t
t.add_row([4, 7.2, '10'])
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
assert np.all(t['c'] == np.array(['7', '8', '9', '10']))
def test_add_with_dict(self, table_types):
self._setup(table_types)
t = self.t
t.add_row({'a': 4, 'b': 7.2})
assert len(t) == 4
assert np.all(t['a'] == np.array([1, 2, 3, 4]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 7.2]))
if t.masked:
assert np.all(t['c'] == np.array(['7', '8', '9', '7']))
else:
assert np.all(t['c'] == np.array(['7', '8', '9', '']))
def test_add_with_none(self, table_types):
self._setup(table_types)
t = self.t
t.add_row()
assert len(t) == 4
assert np.all(t['a'].data == np.array([1, 2, 3, 0]))
assert np.allclose(t['b'], np.array([4.0, 5.1, 6.2, 0.0]))
assert np.all(t['c'].data == np.array(['7', '8', '9', '']))
def test_add_missing_column(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row({'bad_column': 1})
def test_wrong_size_tuple(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(ValueError):
t.add_row((1, 2))
def test_wrong_vals_type(self, table_types):
self._setup(table_types)
t = self.t
with pytest.raises(TypeError):
t.add_row(1)
def test_add_row_failures(self, table_types):
self._setup(table_types)
t = self.t
t_copy = table_types.Table(t, copy=True)
# Wrong number of columns
try:
t.add_row([1, 2, 3, 4])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
# Wrong data type
try:
t.add_row(['one', 2, 3])
except ValueError:
pass
assert len(t) == 3
assert np.all(t.as_array() == t_copy.as_array())
def test_insert_table_row(self, table_types):
"""
Light testing of Table.insert_row() method. The deep testing is done via
the add_row() tests which calls insert_row(index=len(self), ...), so
here just test that the added index parameter is handled correctly.
"""
self._setup(table_types)
row = (10, 40.0, 'x', [10, 20])
for index in range(-3, 4):
indices = np.insert(np.arange(3), index, 3)
t = table_types.Table([self.a, self.b, self.c, self.d])
t2 = t.copy()
t.add_row(row) # By now we know this works
t2.insert_row(index, row)
for name in t.colnames:
if t[name].dtype.kind == 'f':
assert np.allclose(t[name][indices], t2[name])
else:
assert np.all(t[name][indices] == t2[name])
for index in (-4, 4):
t = table_types.Table([self.a, self.b, self.c, self.d])
with pytest.raises(IndexError):
t.insert_row(index, row)
@pytest.mark.usefixtures('table_types')
class TestTableColumn(SetupData):
def test_column_view(self, table_types):
self._setup(table_types)
t = self.t
a = t.columns['a']
a[2] = 10
assert t['a'][2] == 10
@pytest.mark.usefixtures('table_types')
class TestArrayColumns(SetupData):
def test_1d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, ), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2)
assert t['b'][0].shape == (2, )
def test_2d(self, table_types):
self._setup(table_types)
b = table_types.Column(name='b', dtype=int, shape=(2, 4), length=3)
t = table_types.Table([self.a])
t.add_column(b)
assert t['b'].shape == (3, 2, 4)
assert t['b'][0].shape == (2, 4)
def test_3d(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
b = table_types.Column(name='b', dtype=int, shape=(2, 4, 6), length=3)
t.add_column(b)
assert t['b'].shape == (3, 2, 4, 6)
assert t['b'][0].shape == (2, 4, 6)
@pytest.mark.usefixtures('table_types')
class TestRemove(SetupData):
@property
def t(self):
if self._table_type is not None:
if not hasattr(self, '_t'):
self._t = self._table_type([self.a])
return self._t
@property
def t2(self):
if self._table_type is not None:
if not hasattr(self, '_t2'):
self._t2 = self._table_type([self.a, self.b, self.c])
return self._t2
def test_1(self, table_types):
self._setup(table_types)
self.t.remove_columns('a')
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_columns('a')
assert self.t.columns.keys() == ['b']
assert self.t.dtype.names == ('b',)
assert np.all(self.t['b'] == np.array([4, 5, 6]))
def test_3(self, table_types):
"""Check remove_columns works for a single column with a name of
more than one character. Regression test against #2699"""
self._setup(table_types)
self.t['new_column'] = self.t['a']
assert 'new_column' in self.t.columns.keys()
self.t.remove_columns('new_column')
assert 'new_column' not in self.t.columns.keys()
def test_remove_nonexistent_row(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
self.t.remove_row(4)
def test_remove_row_0(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(0)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['b'] == np.array([5, 6]))
def test_remove_row_1(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(1)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
def test_remove_row_2(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_row(2)
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([7, 8]))
def test_remove_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows(slice(0, 2, 1))
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_remove_row_list(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
self.t.remove_rows([0, 2])
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_remove_row_preserves_meta(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.remove_rows([0, 2])
assert self.t['a'].meta == {'aa': [0, 1, 2, 3, 4]}
assert self.t.dtype == np.dtype([('a', 'int'),
('b', 'int')])
def test_delitem_row(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[1]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['a'] == np.array([1, 3]))
@pytest.mark.parametrize("idx", [[0, 2], np.array([0, 2])])
def test_delitem_row_list(self, table_types, idx):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[idx]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([8]))
def test_delitem_row_slice(self, table_types):
self._setup(table_types)
self.t.add_column(self.b)
self.t.add_column(self.c)
del self.t[0:2]
assert self.t.colnames == ['a', 'b', 'c']
assert np.all(self.t['c'] == np.array([9]))
def test_delitem_row_fail(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[4]
def test_delitem_row_float(self, table_types):
self._setup(table_types)
with pytest.raises(IndexError):
del self.t[1.]
def test_delitem1(self, table_types):
self._setup(table_types)
del self.t['a']
assert self.t.columns.keys() == []
assert self.t.as_array().size == 0
# Regression test for gh-8640
assert not self.t
assert isinstance(self.t == None, np.ndarray)
assert (self.t == None).size == 0
def test_delitem2(self, table_types):
self._setup(table_types)
del self.t2['b']
assert self.t2.colnames == ['a', 'c']
def test_delitems(self, table_types):
self._setup(table_types)
del self.t2['a', 'b']
assert self.t2.colnames == ['c']
def test_delitem_fail(self, table_types):
self._setup(table_types)
with pytest.raises(KeyError):
del self.t['d']
@pytest.mark.usefixtures('table_types')
class TestKeep(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns([])
assert t.columns.keys() == []
assert t.as_array().size == 0
# Regression test for gh-8640
assert not t
assert isinstance(t == None, np.ndarray)
assert (t == None).size == 0
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.keep_columns('b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([4, 5, 6]))
@pytest.mark.usefixtures('table_types')
class TestRename(SetupData):
def test_1(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a])
t.rename_column('a', 'b')
assert t.columns.keys() == ['b']
assert t.dtype.names == ('b',)
assert np.all(t['b'] == np.array([1, 2, 3]))
def test_2(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.rename_column('a', 'c')
t.rename_column('b', 'a')
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
if t.masked:
assert t.mask.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_by_attr(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t['a'].name = 'c'
t['b'].name = 'a'
assert t.columns.keys() == ['c', 'a']
assert t.dtype.names == ('c', 'a')
assert np.all(t['c'] == np.array([1, 2, 3]))
assert np.all(t['a'] == np.array([4, 5, 6]))
def test_rename_columns(self, table_types):
self._setup(table_types)
t = table_types.Table([self.a, self.b, self.c])
t.rename_columns(('a', 'b', 'c'), ('aa', 'bb', 'cc'))
assert t.colnames == ['aa', 'bb', 'cc']
t.rename_columns(['bb', 'cc'], ['b', 'c'])
assert t.colnames == ['aa', 'b', 'c']
with pytest.raises(TypeError):
t.rename_columns(('aa'), ['a'])
with pytest.raises(ValueError):
t.rename_columns(['a'], ['b', 'c'])
@pytest.mark.usefixtures('table_types')
class TestSort():
def test_single(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a')
assert np.all(t['a'] == np.array([1, 2, 3]))
assert np.all(t['b'] == np.array([5, 6, 4]))
assert np.all(t['c'] == np.array([[3, 4],
[1, 2],
[4, 5]]))
t.sort('b')
assert np.all(t['a'] == np.array([3, 1, 2]))
assert np.all(t['b'] == np.array([4, 5, 6]))
assert np.all(t['c'] == np.array([[4, 5],
[3, 4],
[1, 2]]))
def test_single_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4]))
t.add_column(table_types.Column(name='c', data=[(1, 2), (3, 4), (4, 5)]))
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
t.sort('a', reverse=True)
assert np.all(t['a'] == np.array([3, 2, 1]))
assert np.all(t['b'] == np.array([4, 6, 5]))
assert np.all(t['c'] == np.array([[4, 5],
[1, 2],
[3, 4]]))
t.sort('b', reverse=True)
assert np.all(t['a'] == np.array([2, 1, 3]))
assert np.all(t['b'] == np.array([6, 5, 4]))
assert np.all(t['c'] == np.array([[1, 2],
[3, 4],
[4, 5]]))
def test_single_big(self, table_types):
"""Sort a big-ish table with a non-trivial sort order"""
x = np.arange(10000)
y = np.sin(x)
t = table_types.Table([x, y], names=('x', 'y'))
t.sort('y')
idx = np.argsort(y)
assert np.all(t['x'] == x[idx])
assert np.all(t['y'] == y[idx])
@pytest.mark.parametrize('reverse', [True, False])
def test_empty_reverse(self, table_types, reverse):
t = table_types.Table([[], []], dtype=['f4', 'U1'])
t.sort('col1', reverse=reverse)
def test_multiple(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'])
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
t.sort(['b', 'a'])
assert np.all(t['a'] == np.array([2, 1, 3, 1, 3, 2]))
assert np.all(t['b'] == np.array([3, 4, 4, 5, 5, 6]))
t.sort(('a', 'b'))
assert np.all(t['a'] == np.array([1, 1, 2, 2, 3, 3]))
assert np.all(t['b'] == np.array([4, 5, 3, 6, 4, 5]))
def test_multiple_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t['a'] == np.array([2, 1, 3, 2, 3, 1]))
assert np.all(t['b'] == np.array([6, 5, 4, 3, 5, 4]))
t.sort(['a', 'b'], reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
t.sort(['b', 'a'], reverse=True)
assert np.all(t['a'] == np.array([2, 3, 1, 3, 1, 2]))
assert np.all(t['b'] == np.array([6, 5, 5, 4, 4, 3]))
t.sort(('a', 'b'), reverse=True)
assert np.all(t['a'] == np.array([3, 3, 2, 2, 1, 1]))
assert np.all(t['b'] == np.array([5, 4, 6, 3, 5, 4]))
def test_multiple_with_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array([b"John", b"Jo", b"Max"])])
assert np.all([t['name'] == np.array([b"Jackson", b"Miller", b"Miller"])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_multiple_with_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
t.sort(['name', 'firstname'])
assert np.all([t['firstname'] == np.array(
[str(x) for x in ["John", "Jo", "Max"]])])
assert np.all([t['name'] == np.array(
[str(x) for x in ["Jackson", "Miller", "Miller"]])])
assert np.all([t['tel'] == np.array([19, 15, 12])])
def test_argsort(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort() == t.as_array().argsort())
i0 = t.argsort('a')
i1 = t.as_array().argsort(order=['a'])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'])
i1 = t.as_array().argsort(order=['a', 'b'])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_reverse(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='a', data=[2, 1, 3, 2, 3, 1]))
t.add_column(table_types.Column(name='b', data=[6, 5, 4, 3, 5, 4]))
assert np.all(t.argsort(reverse=True) == np.array([4, 2, 0, 3, 1, 5]))
i0 = t.argsort('a', reverse=True)
i1 = np.array([4, 2, 3, 0, 5, 1])
assert np.all(t['a'][i0] == t['a'][i1])
i0 = t.argsort(['a', 'b'], reverse=True)
i1 = np.array([4, 2, 0, 3, 1, 5])
assert np.all(t['a'][i0] == t['a'][i1])
assert np.all(t['b'][i0] == t['b'][i1])
def test_argsort_bytes(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name='firstname', data=[b"Max", b"Jo", b"John"]))
t.add_column(table_types.Column(name='name', data=[b"Miller", b"Miller", b"Jackson"]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_argsort_unicode(self, table_types):
# Before Numpy 1.6.2, sorting with multiple column names
# failed when a unicode column was present.
t = table_types.Table()
t.add_column(table_types.Column(
name='firstname',
data=[str(x) for x in ["Max", "Jo", "John"]]))
t.add_column(table_types.Column(
name='name',
data=[str(x) for x in ["Miller", "Miller", "Jackson"]]))
t.add_column(table_types.Column(name='tel', data=[12, 15, 19]))
assert np.all(t.argsort(['name', 'firstname']) == np.array([2, 1, 0]))
def test_rebuild_column_view_then_rename(self, table_types):
"""
Issue #2039 where renaming fails after any method that calls
_rebuild_table_column_view (this includes sort and add_row).
"""
t = table_types.Table([[1]], names=('a',))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.add_row((2,))
assert t.colnames == ['a']
assert t.dtype.names == ('a',)
t.rename_column('a', 'b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.sort('b')
assert t.colnames == ['b']
assert t.dtype.names == ('b',)
t.rename_column('b', 'c')
assert t.colnames == ['c']
assert t.dtype.names == ('c',)
@pytest.mark.usefixtures('table_types')
class TestIterator():
def test_iterator(self, table_types):
d = np.array([(2, 1),
(3, 6),
(4, 5)], dtype=[('a', 'i4'), ('b', 'i4')])
t = table_types.Table(d)
if t.masked:
with pytest.raises(ValueError):
t[0] == d[0]
else:
for row, np_row in zip(t, d):
assert np.all(row == np_row)
@pytest.mark.usefixtures('table_types')
class TestSetMeta():
def test_set_meta(self, table_types):
d = table_types.Table(names=('a', 'b'))
d.meta['a'] = 1
d.meta['b'] = 1
d.meta['c'] = 1
d.meta['d'] = 1
assert list(d.meta.keys()) == ['a', 'b', 'c', 'd']
@pytest.mark.usefixtures('table_types')
class TestConvertNumpyArray():
def test_convert_numpy_array(self, table_types):
d = table_types.Table([[1, 2], [3, 4]], names=('a', 'b'))
np_data = np.array(d)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert np_data is not d.as_array()
assert d.colnames == list(np_data.dtype.names)
np_data = np.array(d, copy=False)
if table_types.Table is not MaskedTable:
assert np.all(np_data == d.as_array())
assert d.colnames == list(np_data.dtype.names)
with pytest.raises(ValueError):
np_data = np.array(d, dtype=[('c', 'i8'), ('d', 'i8')])
def test_as_array_byteswap(self, table_types):
"""Test for https://github.com/astropy/astropy/pull/4080"""
byte_orders = ('>', '<')
native_order = byte_orders[sys.byteorder == 'little']
for order in byte_orders:
col = table_types.Column([1.0, 2.0], name='a', dtype=order + 'f8')
t = table_types.Table([col])
arr = t.as_array()
assert arr['a'].dtype.byteorder in (native_order, '=')
arr = t.as_array(keep_byteorder=True)
if order == native_order:
assert arr['a'].dtype.byteorder in (order, '=')
else:
assert arr['a'].dtype.byteorder == order
def test_byteswap_fits_array(self, table_types):
"""
Test for https://github.com/astropy/astropy/pull/4080, demonstrating
that FITS tables are converted to native byte order.
"""
non_native_order = ('>', '<')[sys.byteorder != 'little']
filename = get_pkg_data_filename('data/tb.fits',
'astropy.io.fits.tests')
t = table_types.Table.read(filename)
arr = t.as_array()
for idx in range(len(arr.dtype)):
assert arr.dtype[idx].byteorder != non_native_order
with fits.open(filename, character_as_bytes=True) as hdul:
data = hdul[1].data
for colname in data.columns.names:
assert np.all(data[colname] == arr[colname])
arr2 = t.as_array(keep_byteorder=True)
for colname in data.columns.names:
assert (data[colname].dtype.byteorder ==
arr2[colname].dtype.byteorder)
def _assert_copies(t, t2, deep=True):
assert t.colnames == t2.colnames
np.testing.assert_array_equal(t.as_array(), t2.as_array())
assert t.meta == t2.meta
for col, col2 in zip(t.columns.values(), t2.columns.values()):
if deep:
assert not np.may_share_memory(col, col2)
else:
assert np.may_share_memory(col, col2)
def test_copy():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_masked():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'], masked=True,
meta={'name': 'test'})
t['x'].mask == [True, False, True]
t2 = t.copy()
_assert_copies(t, t2)
def test_copy_protocol():
t = table.Table([[1, 2, 3], [2, 3, 4]], names=['x', 'y'])
t2 = copy.copy(t)
t3 = copy.deepcopy(t)
_assert_copies(t, t2, deep=False)
_assert_copies(t, t3)
def test_disallow_inequality_comparisons():
"""
Regression test for #828 - disallow comparison operators on whole Table
"""
t = table.Table()
with pytest.raises(TypeError):
t > 2
with pytest.raises(TypeError):
t < 1.1
with pytest.raises(TypeError):
t >= 5.5
with pytest.raises(TypeError):
t <= -1.1
def test_values_equal_part1():
col1 = [1, 2]
col2 = [1.0, 2.0]
col3 = ['a', 'b']
t1 = table.Table([col1, col2, col3], names=['a', 'b', 'c'])
t2 = table.Table([col1, col2], names=['a', 'b'])
t3 = table.table_helpers.simple_table()
tm = t1.copy()
tm['time'] = Time([1, 2], format='cxcsec')
tm1 = tm.copy()
tm1['time'][0] = np.ma.masked
tq = table.table_helpers.simple_table()
tq['quantity'] = [1., 2., 3.]*u.m
tsk = table.table_helpers.simple_table()
tsk['sk'] = SkyCoord(1, 2, unit='deg')
with pytest.raises(ValueError, match='cannot compare tables with different column names'):
t2.values_equal(t1)
with pytest.raises(ValueError, match='unable to compare column a'):
# Shape mismatch
t3.values_equal(t1)
with pytest.raises(ValueError, match='unable to compare column c'):
# Type mismatch in column c causes FutureWarning
t1.values_equal(2)
with pytest.raises(ValueError, match='unable to compare column c'):
t1.values_equal([1, 2])
with pytest.raises(TypeError, match='comparison for column sk'):
tsk.values_equal(tsk)
eq = t2.values_equal(t2)
for col in eq.colnames:
assert np.all(eq[col] == [True, True])
eq1 = tm1.values_equal(tm)
for col in eq1.colnames:
assert np.all(eq1[col] == [True, True])
eq2 = tq.values_equal(tq)
for col in eq2.colnames:
assert np.all(eq2[col] == [True, True, True])
eq3 = t2.values_equal(2)
for col in eq3.colnames:
assert np.all(eq3[col] == [False, True])
eq4 = t2.values_equal([1, 2])
for col in eq4.colnames:
assert np.all(eq4[col] == [True, True])
# Compare table to its first row
t = table.Table(rows=[(1, 'a'),
(1, 'b')])
eq = t.values_equal(t[0])
assert np.all(eq['col0'] == [True, True])
assert np.all(eq['col1'] == [True, False])
def test_rows_equal():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7'],
format='ascii')
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t.as_array() == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
def test_equality_masked():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# Make into masked table
t = table.Table(t, masked=True)
# All rows are equal
assert np.all(t == t)
# Assert no rows are different
assert not np.any(t != t)
# Check equality result for a given row
assert np.all((t == t[3]) == np.array([0, 0, 0, 1, 0, 0, 0, 0], dtype=bool))
# Check inequality result for a given row
assert np.all((t != t[3]) == np.array([1, 1, 1, 0, 1, 1, 1, 1], dtype=bool))
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
# In the above cases, Row.__eq__ gets called, but now need to make sure
# Table.__eq__ also gets called.
assert np.all((t == t2) == np.array([1, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([0, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that masking a value causes the row to differ
t.mask['a'][0] = True
assert np.all((t == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
assert np.all((t != t2) == np.array([1, 0, 1, 0, 1, 0, 1, 0], dtype=bool))
# Check that comparing to a structured array works
assert np.all((t == t2.as_array()) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
@pytest.mark.xfail
def test_equality_masked_bug():
"""
This highlights a Numpy bug. Once it works, it can be moved into the
test_equality_masked test. Related Numpy bug report:
https://github.com/numpy/numpy/issues/3840
"""
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
t = table.Table(t, masked=True)
t2 = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 3 b 6.0 2',
' 2 a 4.0 3',
' 0 a 1.0 4',
' 1 b 3.0 5',
' 1 c 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert np.all((t.as_array() == t2) == np.array([0, 1, 0, 1, 0, 1, 0, 1], dtype=bool))
# Check that the meta descriptor is working as expected. The MetaBaseTest class
# takes care of defining all the tests, and we simply have to define the class
# and any minimal set of args to pass.
from astropy.utils.tests.test_metadata import MetaBaseTest
class TestMetaTable(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance('', bytes):
return
# Define unicode literals
string_a = 'астрономическая питона'
string_b = 'миллиарды световых лет'
a = table.Table(
[[string_a, 2],
[string_b, 3]],
names=('a', 'b'))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode('utf-8') in bytes(a)
def test_unicode_policy():
t = table.Table.read([' a b c d',
' 2 c 7.0 0',
' 2 b 5.0 1',
' 2 b 6.0 2',
' 2 a 4.0 3',
' 0 a 0.0 4',
' 1 b 3.0 5',
' 1 a 2.0 6',
' 1 a 1.0 7',
], format='ascii')
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize('uni', ['питона', 'ascii'])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. Thi
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode('utf-8')
t = table_types.Table([[byt], [uni], [1]], dtype=('S', 'U', 'i'))
assert t['col0'].dtype.kind == 'S'
assert t['col1'].dtype.kind == 'U'
assert t['col2'].dtype.kind == 'i'
t['col0'].description = 'col0'
t['col1'].description = 'col1'
t['col0'].meta['val'] = 'val0'
t['col1'].meta['val'] = 'val1'
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1['col0'].dtype.kind == 'S'
assert t1['col1'].dtype.kind == 'S'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1['col0'])[0] == byt
assert np.array(t1['col1'])[0] == byt
assert np.array(t1['col2'])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1['col0'].dtype.kind == 'U'
assert t1['col1'].dtype.kind == 'U'
assert t1['col2'].dtype.kind == 'i'
# Meta made it through
assert t1['col0'].description == 'col0'
assert t1['col1'].description == 'col1'
assert t1['col0'].meta['val'] == 'val0'
assert t1['col1'].meta['val'] == 'val1'
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1['col0'])[0] == uni
assert np.array(t1['col1'])[0] == uni
assert np.array(t1['col2'])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({'a': [1, 2, 3]})
the_id = id(t)
assert t['a'].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=['a'])
out = []
for r1 in t:
for r2 in t:
out.append((r1['a'], r2['a']))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.skipif('not HAS_PANDAS')
class TestPandas:
def test_simple(self):
t = table.Table()
for endian in ['<', '>']:
for kind in ['f', 'i']:
for byte in ['2', '4', '8']:
dtype = np.dtype(endian + kind + byte)
x = np.array([1, 2, 3], dtype=dtype)
t[endian + kind + byte] = x
t['u'] = ['a', 'b', 'c']
t['s'] = ['a', 'b', 'c']
d = t.to_pandas()
for column in t.columns:
if column == 'u':
assert np.all(t['u'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
elif column == 's':
assert np.all(t['s'] == np.array(['a', 'b', 'c']))
assert d[column].dtype == np.dtype("O") # upstream feature of pandas
else:
# We should be able to compare exact values here
assert np.all(t[column] == d[column])
if t[column].dtype.byteorder in ('=', '|'):
assert d[column].dtype == t[column].dtype
else:
assert d[column].dtype == t[column].byteswap().newbyteorder().dtype
# Regression test for astropy/astropy#1156 - the following code gave a
# ValueError: Big-endian buffer not supported on little-endian
# compiler. We now automatically swap the endian-ness to native order
# upon adding the arrays to the data frame.
d[['<i4', '>i4']]
d[['<f4', '>f4']]
t2 = table.Table.from_pandas(d)
for column in t.columns:
if column in ('u', 's'):
assert np.all(t[column] == t2[column])
else:
assert_allclose(t[column], t2[column])
if t[column].dtype.byteorder in ('=', '|'):
assert t[column].dtype == t2[column].dtype
else:
assert t[column].byteswap().newbyteorder().dtype == t2[column].dtype
def test_2d(self):
t = table.Table()
t['a'] = [1, 2, 3]
t['b'] = np.ones((3, 2))
with pytest.raises(ValueError,
match='Cannot convert a table with multidimensional columns'):
t.to_pandas()
def test_mixin_pandas(self):
t = table.QTable()
for name in sorted(MIXIN_COLS):
if name != 'ndarray':
t[name] = MIXIN_COLS[name]
t['dt'] = TimeDelta([0, 2, 4, 6], format='sec')
tp = t.to_pandas()
t2 = table.Table.from_pandas(tp)
assert np.allclose(t2['quantity'], [0, 1, 2, 3])
assert np.allclose(t2['longitude'], [0., 1., 5., 6.])
assert np.allclose(t2['latitude'], [5., 6., 10., 11.])
assert np.allclose(t2['skycoord.ra'], [0, 1, 2, 3])
assert np.allclose(t2['skycoord.dec'], [0, 1, 2, 3])
assert np.allclose(t2['arraywrap'], [0, 1, 2, 3])
assert np.allclose(t2['earthlocation.y'], [0, 110708, 547501, 654527], rtol=0, atol=1)
# For pandas, Time, TimeDelta are the mixins that round-trip the class
assert isinstance(t2['time'], Time)
assert np.allclose(t2['time'].jyear, [2000, 2001, 2002, 2003])
assert np.all(t2['time'].isot == ['2000-01-01T12:00:00.000',
'2000-12-31T18:00:00.000',
'2002-01-01T00:00:00.000',
'2003-01-01T06:00:00.000'])
assert t2['time'].format == 'isot'
# TimeDelta
assert isinstance(t2['dt'], TimeDelta)
assert np.allclose(t2['dt'].value, [0, 2, 4, 6])
assert t2['dt'].format == 'sec'
def test_to_pandas_index(self):
import pandas as pd
row_index = pd.RangeIndex(0, 2, 1)
tm_index = pd.DatetimeIndex(['1998-01-01', '2002-01-01'],
dtype='datetime64[ns]',
name='tm', freq=None)
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.QTable([tm, x], names=['tm', 'x'])
tp = t.to_pandas()
assert np.all(tp.index == row_index)
tp = t.to_pandas(index='tm')
assert np.all(tp.index == tm_index)
t.add_index('tm')
tp = t.to_pandas()
assert np.all(tp.index == tm_index)
# Make sure writing to pandas didn't hack the original table
assert t['tm'].info.indices
tp = t.to_pandas(index=True)
assert np.all(tp.index == tm_index)
tp = t.to_pandas(index=False)
assert np.all(tp.index == row_index)
with pytest.raises(ValueError) as err:
t.to_pandas(index='not a column')
assert 'index must be None, False' in str(err.value)
def test_mixin_pandas_masked(self):
tm = Time([1, 2, 3], format='cxcsec')
dt = TimeDelta([1, 2, 3], format='sec')
tm[1] = np.ma.masked
dt[1] = np.ma.masked
t = table.QTable([tm, dt], names=['tm', 'dt'])
tp = t.to_pandas()
assert np.all(tp['tm'].isnull() == [False, True, False])
assert np.all(tp['dt'].isnull() == [False, True, False])
t2 = table.Table.from_pandas(tp)
assert np.all(t2['tm'].mask == tm.mask)
assert np.ma.allclose(t2['tm'].jd, tm.jd, rtol=1e-14, atol=1e-14)
assert np.all(t2['dt'].mask == dt.mask)
assert np.ma.allclose(t2['dt'].jd, dt.jd, rtol=1e-14, atol=1e-14)
def test_from_pandas_index(self):
tm = Time([1998, 2002], format='jyear')
x = [1, 2]
t = table.Table([tm, x], names=['tm', 'x'])
tp = t.to_pandas(index='tm')
t2 = table.Table.from_pandas(tp)
assert t2.colnames == ['x']
t2 = table.Table.from_pandas(tp, index=True)
assert t2.colnames == ['tm', 'x']
assert np.allclose(t2['tm'].jyear, tm.jyear)
def test_masking(self):
t = table.Table(masked=True)
t['a'] = [1, 2, 3]
t['a'].mask = [True, False, True]
t['b'] = [1., 2., 3.]
t['b'].mask = [False, False, True]
t['u'] = ['a', 'b', 'c']
t['u'].mask = [False, True, False]
t['s'] = ['a', 'b', 'c']
t['s'].mask = [False, True, False]
# https://github.com/astropy/astropy/issues/7741
t['Source'] = [2584290278794471936, 2584290038276303744,
2584288728310999296]
t['Source'].mask = [False, False, False]
with pytest.warns(TableReplaceWarning,
match="converted column 'a' from integer to float"):
d = t.to_pandas()
t2 = table.Table.from_pandas(d)
for name, column in t.columns.items():
assert np.all(column.data == t2[name].data)
if hasattr(t2[name], 'mask'):
assert np.all(column.mask == t2[name].mask)
# Masked integer type comes back as float. Nothing we can do about this.
if column.dtype.kind == 'i':
if np.any(column.mask):
assert t2[name].dtype.kind == 'f'
else:
assert t2[name].dtype.kind == 'i'
assert_array_equal(column.data,
t2[name].data.astype(column.dtype))
else:
if column.dtype.byteorder in ('=', '|'):
assert column.dtype == t2[name].dtype
else:
assert column.byteswap().newbyteorder().dtype == t2[name].dtype
@pytest.mark.usefixtures('table_types')
class TestReplaceColumn(SetupData):
def test_fail_replace_column(self, table_types):
"""Raise exception when trying to replace column via table.columns object"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
with pytest.raises(ValueError, match=r"Cannot replace column 'a'. Use Table.replace_column.. instead."):
t.columns['a'] = [1, 2, 3]
with pytest.raises(ValueError, match=r"column name not there is not in the table"):
t.replace_column('not there', [1, 2, 3])
with pytest.raises(ValueError, match=r"length of new column must match table length"):
t.replace_column('a', [1, 2])
def test_replace_column(self, table_types):
"""Replace existing column with a new column"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
ta = t['a']
tb = t['b']
vals = [1.2, 3.4, 5.6]
for col in (vals,
table_types.Column(vals),
table_types.Column(vals, name='a'),
table_types.Column(vals, name='b')):
t.replace_column('a', col)
assert np.all(t['a'] == vals)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].meta == {}
assert t['a'].format is None
# Special case: replacing the only column can resize table
del t['b']
assert len(t) == 3
t['a'] = [1, 2]
assert len(t) == 2
def test_replace_index_column(self, table_types):
"""Replace index column and generate expected exception"""
self._setup(table_types)
t = table_types.Table([self.a, self.b])
t.add_index('a')
with pytest.raises(ValueError) as err:
t.replace_column('a', [1, 2, 3])
assert err.value.args[0] == 'cannot replace a table index column'
def test_replace_column_no_copy(self):
t = Table([[1, 2], [3, 4]], names=['a', 'b'])
a = np.array([1.5, 2.5])
t.replace_column('a', a, copy=False)
assert t['a'][0] == a[0]
t['a'][0] = 10
assert t['a'][0] == a[0]
def test_replace_with_masked_col_with_units_in_qtable(self):
"""This is a small regression from #8902"""
t = QTable([[1, 2], [3, 4]], names=['a', 'b'])
t['a'] = MaskedColumn([5, 6], unit='m')
assert isinstance(t['a'], u.Quantity)
class Test__Astropy_Table__():
"""
Test initializing a Table subclass from a table-like object that
implements the __astropy_table__ interface method.
"""
class SimpleTable:
def __init__(self):
self.columns = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9] * u.m]
self.names = ['a', 'b', 'c']
self.meta = OrderedDict([('a', 1), ('b', 2)])
def __astropy_table__(self, cls, copy, **kwargs):
a, b, c = self.columns
c.info.name = 'c'
cols = [table.Column(a, name='a'),
table.MaskedColumn(b, name='b'),
c]
names = [col.info.name for col in cols]
return cls(cols, names=names, copy=copy, meta=kwargs or self.meta)
def test_simple_1(self):
"""Make a SimpleTable and convert to Table, QTable with copy=False, True"""
for table_cls in (table.Table, table.QTable):
col_c_class = u.Quantity if table_cls is table.QTable else table.Column
for cpy in (False, True):
st = self.SimpleTable()
# Test putting in a non-native kwarg `extra_meta` to Table initializer
t = table_cls(st, copy=cpy, extra_meta='extra!')
assert t.colnames == ['a', 'b', 'c']
assert t.meta == {'extra_meta': 'extra!'}
assert np.all(t['a'] == st.columns[0])
assert np.all(t['b'] == st.columns[1])
vals = t['c'].value if table_cls is table.QTable else t['c']
assert np.all(st.columns[2].value == vals)
assert isinstance(t['a'], table.Column)
assert isinstance(t['b'], table.MaskedColumn)
assert isinstance(t['c'], col_c_class)
assert t['c'].unit is u.m
assert type(t) is table_cls
# Copy being respected?
t['a'][0] = 10
assert st.columns[0][0] == 1 if cpy else 10
def test_simple_2(self):
"""Test converting a SimpleTable and changing column names and types"""
st = self.SimpleTable()
dtypes = [np.int32, np.float32, np.float16]
names = ['a', 'b', 'c']
meta = OrderedDict([('c', 3)])
t = table.Table(st, dtype=dtypes, names=names, meta=meta)
assert t.colnames == names
assert all(col.dtype.type is dtype
for col, dtype in zip(t.columns.values(), dtypes))
# The supplied meta is overrides the existing meta. Changed in astropy 3.2.
assert t.meta != st.meta
assert t.meta == meta
def test_kwargs_exception(self):
"""If extra kwargs provided but without initializing with a table-like
object, exception is raised"""
with pytest.raises(TypeError) as err:
table.Table([[1]], extra_meta='extra!')
assert '__init__() got unexpected keyword argument' in str(err.value)
def test_table_meta_copy():
"""
Test no copy vs light (key) copy vs deep copy of table meta for different
situations. #8404.
"""
t = table.Table([[1]])
meta = {1: [1, 2]}
# Assigning meta directly implies using direct object reference
t.meta = meta
assert t.meta is meta
# Table slice implies key copy, so values are unchanged
t2 = t[:]
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the list same object
# Table init with copy=False implies key copy
t2 = table.Table(t, copy=False)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is t.meta[1] # Value IS the same list object
# Table init with copy=True implies deep copy
t2 = table.Table(t, copy=True)
assert t2.meta is not t.meta # NOT the same OrderedDict object but equal
assert t2.meta == t.meta
assert t2.meta[1] is not t.meta[1] # Value is NOT the same list object
def test_table_meta_copy_with_meta_arg():
"""
Test no copy vs light (key) copy vs deep copy of table meta when meta is
supplied as a table init argument. #8404.
"""
meta = {1: [1, 2]}
meta2 = {2: [3, 4]}
t = table.Table([[1]], meta=meta, copy=False)
assert t.meta is meta
t = table.Table([[1]], meta=meta) # default copy=True
assert t.meta is not meta
assert t.meta == meta
# Test initializing from existing table with meta with copy=False
t2 = table.Table(t, meta=meta2, copy=False)
assert t2.meta is meta2
assert t2.meta != t.meta # Change behavior in #8404
# Test initializing from existing table with meta with default copy=True
t2 = table.Table(t, meta=meta2)
assert t2.meta is not meta2
assert t2.meta != t.meta # Change behavior in #8404
# Table init with copy=True and empty dict meta gets that empty dict
t2 = table.Table(t, copy=True, meta={})
assert t2.meta == {}
# Table init with copy=True and kwarg meta=None gets the original table dict.
# This is a somewhat ambiguous case because it could be interpreted as the
# user wanting NO meta set on the output. This could be implemented by inspecting
# call args.
t2 = table.Table(t, copy=True, meta=None)
assert t2.meta == t.meta
# Test initializing empty table with meta with copy=False
t = table.Table(meta=meta, copy=False)
assert t.meta is meta
assert t.meta[1] is meta[1]
# Test initializing empty table with meta with default copy=True (deepcopy meta)
t = table.Table(meta=meta)
assert t.meta is not meta
assert t.meta == meta
assert t.meta[1] is not meta[1]
def test_replace_column_qtable():
"""Replace existing Quantity column with a new column in a QTable"""
a = [1, 2, 3] * u.m
b = [4, 5, 6]
t = table.QTable([a, b], names=['a', 'b'])
ta = t['a']
tb = t['b']
ta.info.meta = {'aa': [0, 1, 2, 3, 4]}
ta.info.format = '%f'
t.replace_column('a', a.to('cm'))
assert np.all(t['a'] == ta)
assert t['a'] is not ta # New a column
assert t['b'] is tb # Original b column unchanged
assert t.colnames == ['a', 'b']
assert t['a'].info.meta is None
assert t['a'].info.format is None
def test_replace_update_column_via_setitem():
"""
Test table update like ``t['a'] = value``. This leverages off the
already well-tested ``replace_column`` and in-place update
``t['a'][:] = value``, so this testing is fairly light.
"""
a = [1, 2] * u.m
b = [3, 4]
t = table.QTable([a, b], names=['a', 'b'])
assert isinstance(t['a'], u.Quantity)
# Inplace update
ta = t['a']
t['a'] = 5 * u.m
assert np.all(t['a'] == [5, 5] * u.m)
assert t['a'] is ta
# Replace
t['a'] = [5, 6]
assert np.all(t['a'] == [5, 6])
assert isinstance(t['a'], table.Column)
assert t['a'] is not ta
def test_replace_update_column_via_setitem_warnings_normal():
"""
Test warnings related to table replace change in #5556:
Normal warning-free replace
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
t['a'] = [10, 20, 30] # replace column
assert len(w) == 0
def test_replace_update_column_via_setitem_warnings_slice():
"""
Test warnings related to table replace change in #5556:
Replace a slice, one warning.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t2 = t[:2]
t2['a'] = 0 # in-place slice update
assert np.all(t['a'] == [0, 0, 3])
assert len(w) == 0
t2['a'] = [10, 20] # replace slice
assert len(w) == 1
assert "replaced column 'a' which looks like an array slice" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_attributes():
"""
Test warnings related to table replace change in #5556:
Lost attributes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and column attributes ['unit']" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_refcount():
"""
Test warnings related to table replace change in #5556:
Reference count changes.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a'] # Generate an extra reference to original column
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings',
['refcount', 'attributes', 'slice']):
t['a'] = [10, 20, 30]
assert len(w) == 1
assert "replaced column 'a' and the number of references" in str(w[0].message)
def test_replace_update_column_via_setitem_warnings_always():
"""
Test warnings related to table replace change in #5556:
Test 'always' setting that raises warning for any replace.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
with catch_warnings() as w:
with table.conf.set_temp('replace_warnings', ['always']):
t['a'] = 0 # in-place slice update
assert len(w) == 0
from inspect import currentframe, getframeinfo
frameinfo = getframeinfo(currentframe())
t['a'] = [10, 20, 30] # replace column
assert len(w) == 1
assert "replaced column 'a'" == str(w[0].message)
# Make sure the warning points back to the user code line
assert w[0].lineno == frameinfo.lineno + 1
assert w[0].category is table.TableReplaceWarning
assert 'test_table' in w[0].filename
def test_replace_update_column_via_setitem_replace_inplace():
"""
Test the replace_inplace config option related to #5556. In this
case no replace is done.
"""
t = table.Table([[1, 2, 3], [4, 5, 6]], names=['a', 'b'])
ta = t['a']
t['a'].unit = 'm'
with catch_warnings() as w:
with table.conf.set_temp('replace_inplace', True):
with table.conf.set_temp('replace_warnings',
['always', 'refcount', 'attributes', 'slice']):
t['a'] = 0 # in-place update
assert len(w) == 0
assert ta is t['a']
t['a'] = [10, 20, 30] # normally replaces column, but not now
assert len(w) == 0
assert ta is t['a']
assert np.all(t['a'] == [10, 20, 30])
def test_primary_key_is_inherited():
"""Test whether a new Table inherits the primary_key attribute from
its parent Table. Issue #4672"""
t = table.Table([(2, 3, 2, 1), (8, 7, 6, 5)], names=('a', 'b'))
t.add_index('a')
original_key = t.primary_key
# can't test if tuples are equal, so just check content
assert original_key[0] == 'a'
t2 = t[:]
t3 = t.copy()
t4 = table.Table(t)
# test whether the reference is the same in the following
assert original_key == t2.primary_key
assert original_key == t3.primary_key
assert original_key == t4.primary_key
# just test one element, assume rest are equal if assert passes
assert t.loc[1] == t2.loc[1]
assert t.loc[1] == t3.loc[1]
assert t.loc[1] == t4.loc[1]
def test_qtable_read_for_ipac_table_with_char_columns():
'''Test that a char column of a QTable is assigned no unit and not
a dimensionless unit, otherwise conversion of reader output to
QTable fails.'''
t1 = table.QTable([["A"]], names="B")
out = StringIO()
t1.write(out, format="ascii.ipac")
t2 = table.QTable.read(out.getvalue(), format="ascii.ipac", guess=False)
assert t2["B"].unit is None
def test_create_table_from_final_row():
"""Regression test for issue #8422: passing the last row of a table into
Table should return a new table containing that row."""
t1 = table.Table([(1, 2)], names=['col'])
row = t1[-1]
t2 = table.Table(row)['col']
assert t2[0] == 2
def test_key_values_in_as_array():
# Test for cheking column slicing using key_values in Table.as_array()
data_rows = [(1, 2.0, 'x'),
(4, 5.0, 'y'),
(5, 8.2, 'z')]
# Creating a table with three columns
t1 = table.Table(rows=data_rows, names=('a', 'b', 'c'),
meta={'name': 'first table'},
dtype=('i4', 'f8', 'S1'))
# Values of sliced column a,b is stored in a numpy array
a = np.array([(1, 2.), (4, 5.), (5, 8.2)],
dtype=[('a', '<i4'), ('b', '<f8')])
# Values fo sliced column c is stored in a numpy array
b = np.array([(b'x',), (b'y',), (b'z',)], dtype=[('c', 'S1')])
# Comparing initialised array with sliced array using Table.as_array()
assert np.array_equal(a, t1.as_array(names=['a', 'b']))
assert np.array_equal(b, t1.as_array(names=['c']))
def test_tolist():
t = table.Table([[1, 2, 3], [1.1, 2.2, 3.3], [b'foo', b'bar', b'hello']],
names=('a', 'b', 'c'))
assert t['a'].tolist() == [1, 2, 3]
assert_array_equal(t['b'].tolist(), [1.1, 2.2, 3.3])
assert t['c'].tolist() == ['foo', 'bar', 'hello']
assert isinstance(t['a'].tolist()[0], int)
assert isinstance(t['b'].tolist()[0], float)
assert isinstance(t['c'].tolist()[0], str)
t = table.Table([[[1, 2], [3, 4]],
[[b'foo', b'bar'], [b'hello', b'world']]],
names=('a', 'c'))
assert t['a'].tolist() == [[1, 2], [3, 4]]
assert t['c'].tolist() == [['foo', 'bar'], ['hello', 'world']]
assert isinstance(t['a'].tolist()[0][0], int)
assert isinstance(t['c'].tolist()[0][0], str)
def test_broadcasting_8933():
"""Explicitly check re-work of code related to broadcasting in #8933"""
t = table.Table([[1, 2]]) # Length=2 table
t['a'] = [[3, 4]] # Can broadcast if ndim > 1 and shape[0] == 1
t['b'] = 5
t['c'] = [1] # Treat as broadcastable scalar, not length=1 array (which would fail)
assert np.all(t['a'] == [[3, 4], [3, 4]])
assert np.all(t['b'] == [5, 5])
assert np.all(t['c'] == [1, 1])
# Test that broadcasted column is writeable
t['c'][1] = 10
assert np.all(t['c'] == [1, 10])
def test_custom_masked_column_in_nonmasked_table():
"""Test the refactor and change in column upgrades introduced
in 95902650f. This fixes a regression introduced by #8789
(Change behavior of Table regarding masked columns)."""
class MyMaskedColumn(table.MaskedColumn):
pass
class MySubMaskedColumn(MyMaskedColumn):
pass
class MyColumn(table.Column):
pass
class MySubColumn(MyColumn):
pass
class MyTable(table.Table):
Column = MyColumn
MaskedColumn = MyMaskedColumn
a = table.Column([1])
b = table.MaskedColumn([2], mask=[True])
c = MyMaskedColumn([3], mask=[True])
d = MySubColumn([4])
e = MySubMaskedColumn([5], mask=[True])
# Two different pathways for making table
t1 = MyTable([a, b, c, d, e], names=['a', 'b', 'c', 'd', 'e'])
t2 = MyTable()
t2['a'] = a
t2['b'] = b
t2['c'] = c
t2['d'] = d
t2['e'] = e
for t in (t1, t2):
assert type(t['a']) is MyColumn
assert type(t['b']) is MyMaskedColumn # upgrade
assert type(t['c']) is MyMaskedColumn
assert type(t['d']) is MySubColumn
assert type(t['e']) is MySubMaskedColumn # sub-class not downgraded
def test_sort_with_non_mutable():
"""Test sorting a table that has a non-mutable column such as SkyCoord"""
t = Table([[2, 1], SkyCoord([4, 3], [6, 5], unit='deg,deg')], names=['a', 'sc'])
meta = {'a': [1, 2]}
t['sc'].info.meta = meta
t.sort('a')
assert np.all(t['a'] == [1, 2])
assert np.allclose(t['sc'].ra.to_value(u.deg), [3, 4])
assert np.allclose(t['sc'].dec.to_value(u.deg), [5, 6])
# Got a deep copy of SkyCoord column
t['sc'].info.meta['a'][0] = 100
assert meta['a'][0] == 1
def test_init_with_list_of_masked_arrays():
"""Test the fix for #8977"""
m0 = np.ma.array([0, 1, 2], mask=[True, False, True])
m1 = np.ma.array([3, 4, 5], mask=[False, True, False])
mc = [m0, m1]
# Test _init_from_list
t = table.Table([mc], names=['a'])
# Test add_column
t['b'] = [m1, m0]
assert t['a'].shape == (2, 3)
assert np.all(t['a'][0] == m0)
assert np.all(t['a'][1] == m1)
assert np.all(t['a'][0].mask == m0.mask)
assert np.all(t['a'][1].mask == m1.mask)
assert t['b'].shape == (2, 3)
assert np.all(t['b'][0] == m1)
assert np.all(t['b'][1] == m0)
assert np.all(t['b'][0].mask == m1.mask)
assert np.all(t['b'][1].mask == m0.mask)
def test_data_to_col_convert_strategy():
"""Test the update to how data_to_col works (#8972), using the regression
example from #8971.
"""
t = table.Table([[0, 1]])
t['a'] = 1
t['b'] = np.int64(2) # Failed previously
assert np.all(t['a'] == [1, 1])
assert np.all(t['b'] == [2, 2])
def test_rows_with_mixins():
"""Test for #9165 to allow adding a list of mixin objects.
Also test for fix to #9357 where group_by() failed due to
mixin object not having info.indices set to [].
"""
tm = Time([1, 2], format='cxcsec')
q = [1, 2] * u.m
mixed1 = [1 * u.m, 2] # Mixed input, fails to convert to Quantity
mixed2 = [2, 1 * u.m] # Mixed input, not detected as potential mixin
rows = [(1, q[0], tm[0]),
(2, q[1], tm[1])]
t = table.QTable(rows=rows)
t['a'] = [q[0], q[1]]
t['b'] = [tm[0], tm[1]]
t['m1'] = mixed1
t['m2'] = mixed2
assert np.all(t['col1'] == q)
assert np.all(t['col2'] == tm)
assert np.all(t['a'] == q)
assert np.all(t['b'] == tm)
assert np.all(t['m1'][ii] == mixed1[ii] for ii in range(2))
assert np.all(t['m2'][ii] == mixed2[ii] for ii in range(2))
assert type(t['m1']) is table.Column
assert t['m1'].dtype is np.dtype(object)
assert type(t['m2']) is table.Column
assert t['m2'].dtype is np.dtype(object)
# Ensure group_by() runs without failing for sortable columns.
# The columns 'm1', and 'm2' are object dtype and not sortable.
for name in ['col0', 'col1', 'col2', 'a', 'b']:
t.group_by(name)
# For good measure include exactly the failure in #9357 in which the
# list of Time() objects is in the Table initializer.
mjds = [Time(58000, format="mjd")]
t = Table([mjds, ["gbt"]], names=("mjd", "obs"))
t.group_by("obs")
def test_iterrows():
dat = [(1, 2, 3),
(4, 5, 6),
(7, 8, 6)]
t = table.Table(rows=dat, names=('a', 'b', 'c'))
c_s = []
a_s = []
for c, a in t.iterrows('c', 'a'):
a_s.append(a)
c_s.append(c)
assert np.all(t['a'] == a_s)
assert np.all(t['c'] == c_s)
rows = [row for row in t.iterrows()]
assert rows == dat
with pytest.raises(ValueError, match='d is not a valid column name'):
t.iterrows('d')
| 35.572537
| 119
| 0.539594
|
acfdf2f73e42d2977838f7e88b4ad906723ddf52
| 3,666
|
py
|
Python
|
utils/server_utils/server.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | 1
|
2018-07-18T11:50:45.000Z
|
2018-07-18T11:50:45.000Z
|
utils/server_utils/server.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | null | null | null |
utils/server_utils/server.py
|
havesupper/DeepPavlov
|
142f9ff05d53e78bae77f6392613eccea0aa57f7
|
[
"Apache-2.0"
] | null | null | null |
import sys
from pathlib import Path
from flask import Flask, request, jsonify, redirect
from flasgger import Swagger
from flask_cors import CORS
from deeppavlov.core.common.file import read_json
from deeppavlov.core.commands.infer import build_model_from_config
from deeppavlov.core.data.utils import check_nested_dict_keys, jsonify_data
from deeppavlov.core.common.log import get_logger
SERVER_CONFIG_FILENAME = 'server_config.json'
log = get_logger(__name__)
app = Flask(__name__)
Swagger(app)
CORS(app)
def init_model(model_config_path):
model_config = read_json(model_config_path)
model = build_model_from_config(model_config)
return model
def get_server_params(server_config_path, model_config_path):
server_config = read_json(server_config_path)
model_config = read_json(model_config_path)
server_params = server_config['common_defaults']
if check_nested_dict_keys(model_config, ['metadata', 'labels', 'server_utils']):
model_tag = model_config['metadata']['labels']['server_utils']
if model_tag in server_config['model_defaults']:
model_defaults = server_config['model_defaults'][model_tag]
for param_name in model_defaults.keys():
if model_defaults[param_name]:
server_params[param_name] = model_defaults[param_name]
for param_name in server_params.keys():
if not server_params[param_name]:
log.error('"{}" parameter should be set either in common_defaults '
'or in model_defaults section of {}'.format(param_name, SERVER_CONFIG_FILENAME))
sys.exit(1)
return server_params
def interact(model, params_names):
if not request.is_json:
return jsonify({
"error": "request must contains json data"
}), 400
model_args = []
data = request.get_json()
for param_name in params_names:
param_value = data.get(param_name)
if param_value is None or (isinstance(param_value, list) and len(param_value) > 0):
model_args.append(param_value)
else:
return jsonify({'error': f"nonempty array expected but got '{param_name}'={repr(param_value)}"}), 400
lengths = {len(i) for i in model_args if i is not None}
if not lengths:
return jsonify({'error': 'got empty request'}), 400
elif len(lengths) > 1:
return jsonify({'error': 'got several different batch sizes'}), 400
if len(params_names) == 1:
model_args = model_args[0]
else:
batch_size = list(lengths)[0]
model_args = [arg or [None] * batch_size for arg in model_args]
model_args = list(zip(*model_args))
prediction = model(model_args)
result = jsonify_data(prediction)
return jsonify(result), 200
def start_model_server(model_config_path):
server_config_dir = Path(__file__).resolve().parent
server_config_path = Path(server_config_dir, SERVER_CONFIG_FILENAME).resolve()
model = init_model(model_config_path)
server_params = get_server_params(server_config_path, model_config_path)
host = server_params['host']
port = server_params['port']
model_endpoint = server_params['model_endpoint']
model_args_names = server_params['model_args_names']
@app.route('/')
def index():
return redirect('/apidocs/')
@app.route(model_endpoint, methods=['POST'])
def answer():
"""
Skill
---
parameters:
- name: data
in: body
required: true
type: json
"""
return interact(model, model_args_names)
app.run(host=host, port=port)
| 31.333333
| 113
| 0.681942
|
acfdf379d6091082f2857a16118c0d492b42b7a8
| 10,867
|
py
|
Python
|
models/ghostnet.py
|
JIABI/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 2
|
2021-08-23T08:43:35.000Z
|
2021-11-28T17:22:29.000Z
|
models/ghostnet.py
|
selkerdawy/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 1
|
2021-11-01T08:35:07.000Z
|
2021-11-01T08:35:07.000Z
|
models/ghostnet.py
|
selkerdawy/GhostShiftAddNet
|
870c38248fa1df23ec1262b6690e20c437d1d5d4
|
[
"MIT"
] | 3
|
2021-11-10T08:37:50.000Z
|
2022-02-08T13:28:16.000Z
|
# 2020.06.09-Changed for building GhostNet
# Huawei Technologies Co., Ltd. <foss@huawei.com>
"""
Creates a GhostNet Model as defined in:
GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang, Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
https://arxiv.org/abs/1911.11907
Modified from https://github.com/d-li14/mobilenetv3.pytorch and https://github.com/rwightman/pytorch-image-models
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from adder import adder
__all__ = ['ghostnet']
def conv_add(in_planes, out_planes, kernel_size, stride, padding, bias=False, quantize=False, weight_bits=8, quantize_v='sbm'):
" 3x3 convolution with padding "
add = adder.Adder2D(in_planes, out_planes, kernel_size=kernel_size, stride=stride, groups=in_planes, padding=padding, bias=bias, quantize=quantize, weight_bits=weight_bits, quantize_v=quantize_v)
#add = adder2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
return nn.Sequential(add)
def conv5x5(in_planes, out_planes, kernel_size, stride, padding, bias=False, groups=1, quantize=False, weight_bits=8, quantize_v='sbm'):
" 3x3 convolution with padding "
shift = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, padding=padding, stride=stride, groups=groups, bias=bias)
return nn.Sequential(shift)
def conv_1x1(in_planes, out_planes, kernel_size, stride, padding, bias=False, quantize=False, weight_bits=8, quantize_v='sbm'):
" 3x3 convolution with padding "
shift = nn.Conv2d(in_planes, out_planes, kernel_size=1, padding=1//2, stride=1, groups=1,
bias=bias)
add = adder.Adder2D(out_planes, out_planes, kernel_size=1, stride=1, groups=1, padding=0, bias=bias, quantize=quantize, weight_bits=weight_bits, quantize_v=quantize_v)
#add = adder2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias)
return nn.Sequential(shift, add)
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def hard_sigmoid(x, inplace: bool = False):
if inplace:
return x.add_(3.).clamp_(0., 6.).div_(6.)
else:
return F.relu6(x + 3.) / 6.
class SqueezeExcite(nn.Module):
def __init__(self, in_chs, se_ratio=0.25, reduced_base_chs=None,
act_layer=nn.ReLU, gate_fn=hard_sigmoid, divisor=4, **_):
super(SqueezeExcite, self).__init__()
self.gate_fn = gate_fn
reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=False)
self.act1 = act_layer(inplace=True)
self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=False)
def forward(self, x):
x_se = self.avg_pool(x)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
x = x * self.gate_fn(x_se)
return x
class ConvBnAct(nn.Module):
def __init__(self, in_chs, out_chs, kernel_size,
stride=1, act_layer=nn.ReLU):
super(ConvBnAct, self).__init__()
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False)
self.bn1 = nn.BatchNorm2d(out_chs)
self.act1 = act_layer(inplace=True)
def forward(self, x):
x = self.conv(x)
x = self.bn1(x)
x = self.act1(x)
return x
class GhostModule(nn.Module):
def __init__(self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, relu=True, dilation=1,):
super(GhostModule, self).__init__()
self.oup = oup
init_channels = math.ceil(oup / ratio)
new_channels = init_channels * (ratio - 1)
padding = (kernel_size - 1) // 2 * dilation
self.primary_conv = nn.Sequential(
conv5x5(inp, init_channels, dw_size, stride=1, padding=1, groups=1, bias=False),
nn.BatchNorm2d(init_channels),
#conv5x5(init_channels, init_channels, dw_size, stride=1, padding=1, groups=init_channels, bias=False),
#nn.BatchNorm2d(init_channels),
nn.Identity(),
)
self.cheap_operation = nn.Sequential(
conv5x5(init_channels, new_channels, dw_size, stride=1, padding=1, groups=init_channels, bias=False),
#nn.BatchNorm2d(new_channels),
#conv5x5(new_channels, new_channels, kernel_size, stride=1, padding=1, groups=new_channels,
# bias=False),
conv_add(new_channels, new_channels, kernel_size, stride=1, padding=0, bias=False),
# conv5x5(init_channels, new_channels, dw_size, 1, dw_size//2, groups=1, bias=False),
nn.BatchNorm2d(new_channels),
nn.Hardswish(inplace=True) if relu else nn.Sequential(),
)
def forward(self, x):
x1 = self.primary_conv(x)
x2 = self.cheap_operation(x1)
out = torch.cat([x1, x2], dim=1)
return out[:, :self.oup, :, :]
class GhostBottleneck(nn.Module):
""" Ghost bottleneck w/ optional SE"""
def __init__(self, in_chs, mid_chs, out_chs, dw_kernel_size=3,
stride=1, act_layer=nn.ReLU, se_ratio=0.):
super(GhostBottleneck, self).__init__()
has_se = se_ratio is not None and se_ratio > 0.
self.stride = stride
# Point-wise expansion
self.ghost1 = GhostModule(in_chs, mid_chs, stride=stride, relu=True)
#self.ghost1 = conv_1x1(in_chs, mid_chs, kernel_size=1, stride=1, padding=0, bias=False)
# Depth-wise convolution
if self.stride > 1:
#self.conv_dw = nn.Conv2d(out_chs, out_chs, dw_kernel_size, stride=stride,
# padding=(dw_kernel_size - 1) // 2,
# groups=out_chs, bias=False)
#self.bn_dw = nn.BatchNorm2d(out_chs)
self.conv_dw = nn.MaxPool2d(dw_kernel_size, stride=2, padding=(dw_kernel_size - 1) // 2)
# Squeeze-and-excitation
if has_se:
self.se = SqueezeExcite(out_chs, se_ratio=se_ratio)
else:
self.se = None
# Point-wise linear projection
self.ghost2 = GhostModule(mid_chs, out_chs, relu=False)
#self.ghost2 = conv_1x1(mid_chs, out_chs, kernel_size=1, stride=1, padding=0, bias=False)
# shortcut
if (in_chs == out_chs and self.stride == 1):
self.shortcut = nn.Sequential()
else:
self.shortcut = nn.Sequential(
nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_chs),
nn.Conv2d(out_chs, out_chs, dw_kernel_size, stride=stride,
padding=(dw_kernel_size - 1) // 2, groups=out_chs, bias=False),
nn.BatchNorm2d(out_chs),
)
def forward(self, x):
residual = x
# 1st ghost bottleneck
x = self.ghost1(x)
# 2nd ghost bottleneck
x = self.ghost2(x)
# Depth-wise convolution
if self.stride > 1:
x = self.conv_dw(x)
#x = self.bn_dw(x)
# Squeeze-and-excitation
if self.se is not None:
x = self.se(x)
x += self.shortcut(residual)
return x
class GhostNet(nn.Module):
def __init__(self, cfgs, num_classes=10, width=0.5, dropout=0.2):
super(GhostNet, self).__init__()
# setting of inverted residual blocks
self.cfgs = cfgs
self.dropout = dropout
# building first layer
output_channel = _make_divisible(16 * width, 4)
self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(output_channel)
self.act1 = nn.ReLU(inplace=True)
input_channel = output_channel
# building inverted residual blocks
stages = []
block = GhostBottleneck
for cfg in self.cfgs:
layers = []
for k, exp_size, c, se_ratio, s in cfg:
output_channel = _make_divisible(c * width, 4)
hidden_channel = _make_divisible(exp_size * width, 4)
layers.append(block(input_channel, hidden_channel, output_channel, k, s,
se_ratio=se_ratio))
input_channel = output_channel
stages.append(nn.Sequential(*layers))
output_channel = _make_divisible(exp_size * width, 4)
stages.append(nn.Sequential(ConvBnAct(input_channel, output_channel, 1)))
input_channel = output_channel
self.blocks = nn.Sequential(*stages)
# building last several layers
output_channel = 1280
self.global_pool = nn.AdaptiveAvgPool2d((1, 1))
self.conv_head = nn.Conv2d(input_channel, output_channel, 1, 1, 0, bias=False)
self.act2 = nn.ReLU(inplace=True)
self.classifier = nn.Linear(output_channel, num_classes)
def forward(self, x):
x = self.conv_stem(x)
x = self.bn1(x)
x = self.act1(x)
x = self.blocks(x)
x = self.global_pool(x)
x = self.conv_head(x)
x = self.act2(x)
x = x.view(x.size(0), -1)
if self.dropout > 0.:
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.classifier(x)
return x
def ghostnet(**kwargs):
"""
Constructs a GhostNet model
"""
cfgs = [
# k, t, c, SE, s
# stage1
[[3, 16, 16, 0, 1]],
# stage2
[[3, 48, 24, 0, 2]],
#[[3, 72, 24, 0, 1]],
# stage3
[[3, 72, 40, 0.25, 2]],
#[[5, 120, 40, 0.25, 1]],
# stage4
[[3, 240, 80, 0, 2]],
[
# [3, 200, 80, 0, 1],
# [3, 184, 80, 0, 1],
# [3, 184, 80, 0, 1],
[3, 480, 112, 0.25, 1],
[3, 672, 112, 0.25, 1]
],
# stage5
[[3, 672, 160, 0.25, 2]],
[[3, 960, 160, 0, 1],
[3, 960, 160, 0.25, 1],
# [5, 960, 160, 0, 1],
# [5, 960, 160, 0.25, 1]
]
]
return GhostNet(cfgs, **kwargs)
if __name__ == '__main__':
model = ghostnet()
model.eval()
print(model)
input = torch.randn(32, 3, 320, 256)
y = model(input)
print(y.size())
| 37.864111
| 199
| 0.607343
|
acfdf3f0870a3bf0f85e4ffc4d4cbbb7100d6f87
| 13,881
|
py
|
Python
|
arweave/arweave_lib.py
|
xiaojay/arweave-python-client
|
1ac5afd9a56540012a8a22943ba562729d776912
|
[
"MIT"
] | null | null | null |
arweave/arweave_lib.py
|
xiaojay/arweave-python-client
|
1ac5afd9a56540012a8a22943ba562729d776912
|
[
"MIT"
] | null | null | null |
arweave/arweave_lib.py
|
xiaojay/arweave-python-client
|
1ac5afd9a56540012a8a22943ba562729d776912
|
[
"MIT"
] | null | null | null |
import json
import os
import io
import requests
import logging
import hashlib
import psutil
import arrow
import nacl.bindings
from jose import jwk
from jose.utils import base64url_encode, base64url_decode, base64
from jose.backends.cryptography_backend import CryptographyRSAKey
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_PSS
from Crypto.Hash import SHA256
from .utils import (
winston_to_ar,
ar_to_winston,
owner_to_address,
create_tag,
encode_tag,
decode_tag
)
from .deep_hash import deep_hash
from .merkle import compute_root_hash, generate_transaction_chunks
logger = logging.getLogger(__name__)
TRANSACTION_DATA_LIMIT_IN_BYTES = 2000000
API_URL = "https://arweave.net"
class ArweaveTransactionException(Exception):
pass
class Wallet(object):
HASH = 'sha256'
def __init__(self, jwk_file='jwk_file.json'):
with open(jwk_file, 'r') as j_file:
self.jwk_data = json.loads(j_file.read())
self.jwk_data['p2s'] = ''
self.jwk = jwk.construct(self.jwk_data, algorithm=jwk.ALGORITHMS.RS256)
self.rsa = RSA.importKey(self.jwk.to_pem())
self.owner = self.jwk_data.get('n')
self.address = owner_to_address(self.owner)
self.api_url = API_URL
@property
def balance(self):
url = "{}/wallet/{}/balance".format(self.api_url, self.address)
response = requests.get(url)
if response.status_code == 200:
balance = winston_to_ar(response.text)
else:
raise ArweaveTransactionException(response.text)
return balance
def sign(self, message):
h = SHA256.new(message)
signed_data = PKCS1_PSS.new(self.rsa).sign(h)
return signed_data
def verify(self):
pass
def get_last_transaction_id(self):
url = "{}/tx_anchor".format(self.api_url)
response = requests.get(url)
if response.status_code == 200:
self.last_tx = response.text
else:
raise ArweaveTransactionException(response.text)
return self.last_tx
class Transaction(object):
def __init__(self, wallet, **kwargs):
self.jwk_data = wallet.jwk_data
self.jwk = jwk.construct(self.jwk_data, algorithm="RS256")
self.wallet = wallet
self.id = kwargs.get('id', '')
self.last_tx = wallet.get_last_transaction_id()
self.owner = self.jwk_data.get('n')
self.tags = []
self.format = kwargs.get('format', 2)
self.api_url = API_URL
self.chunks = None
data = kwargs.get('data', '')
self.data_size = len(data)
if type(data) is bytes:
self.data = base64url_encode(data)
else:
self.data = base64url_encode(data.encode('utf-8'))
if self.data is None:
self.data = ''
self.file_handler = kwargs.get('file_handler', None)
if self.file_handler:
self.uses_uploader = True
self.data_size = os.stat(kwargs['file_path']).st_size
else:
self.uses_uploader = False
if kwargs.get('transaction'):
self.from_serialized_transaction(kwargs.get('transaction'))
else:
self.data_root = ""
self.data_tree = []
self.target = kwargs.get('target', '')
self.to = kwargs.get('to', '')
if self.target == '' and self.to != '':
self.target = self.to
self.quantity = kwargs.get('quantity', '0')
if float(self.quantity) > 0:
if self.target == '':
raise ArweaveTransactionException(
"Unable to send {} AR without specifying a target address".format(self.quantity))
# convert to winston
self.quantity = ar_to_winston(float(self.quantity))
reward = kwargs.get('reward', None)
if reward is not None:
self.reward = reward
self.signature = ''
self.status = None
def from_serialized_transaction(self, transaction_json):
if type(transaction_json) == str:
self.load_json(transaction_json)
else:
raise ArweaveTransactionException(
"Please supply a string containing json to initialize a serialized transaction")
def get_reward(self, data_size, target_address=None):
url = "{}/price/{}".format(self.api_url, data_size)
if target_address:
url = "{}/price/{}/{}".format(self.api_url, data_size, target_address)
response = requests.get(url)
if response.status_code == 200:
reward = response.text
return reward
def add_tag(self, name, value):
tag = create_tag(name, value, self.format == 2)
self.tags.append(tag)
def encode_tags(self):
tags = []
for tag in self.tags:
tags.append(encode_tag(tag))
self.tags = tags
def sign(self):
data_to_sign = self.get_signature_data()
raw_signature = self.wallet.sign(data_to_sign)
self.signature = base64url_encode(raw_signature)
self.id = base64url_encode(hashlib.sha256(raw_signature).digest())
if type(self.id) == bytes:
self.id = self.id.decode()
def get_signature_data(self):
self.reward = self.get_reward(self.data_size, target_address=self.target if len(self.target) > 0 else None)
if int(self.data_size) > 0 and self.data_root == "" and not self.uses_uploader:
if type(self.data) == str:
root_hash = compute_root_hash(io.StringIO(self.data))
if type(self.data) == bytes:
root_hash = compute_root_hash(io.BytesIO(self.data))
self.data_root = base64url_encode(root_hash)
if self.format == 1:
tag_str = ""
for tag in self.tags:
name, value = decode_tag(tag)
tag_str += "{}{}".format(name.decode(), value.decode())
owner = base64url_decode(self.jwk_data['n'].encode())
target = base64url_decode(self.target)
data = base64url_decode(self.data)
quantity = self.quantity.encode()
reward = self.reward.encode()
last_tx = base64url_decode(self.last_tx.encode())
signature_data = owner + target + data + quantity + reward + last_tx + tag_str.encode()
if self.format == 2:
if self.uses_uploader:
self.prepare_chunks()
tag_list = [[tag['name'].encode(), tag['value'].encode()] for tag in self.tags]
signature_data_list = [
"2".encode(),
base64url_decode(self.jwk_data['n'].encode()),
base64url_decode(self.target.encode()),
str(self.quantity).encode(),
self.reward.encode(),
base64url_decode(self.last_tx.encode()),
tag_list,
str(self.data_size).encode(),
base64url_decode(self.data_root)]
signature_data = deep_hash(signature_data_list)
return signature_data
def send(self):
url = "{}/tx".format(self.api_url)
headers = {'Content-Type': 'application/json', 'Accept': 'text/plain'}
json_data = self.json_data
response = requests.post(url, data=json_data, headers=headers)
logger.error("{}\n\n{}".format(response.text, self.json_data))
if response.status_code == 200:
logger.debug("RESPONSE 200: {}".format(response.text))
else:
logger.error("{}\n\n{}".format(response.text, self.json_data))
return self.last_tx
def to_dict(self):
if self.data is None:
self.data = ''
data = {
'data': self.data.decode() if type(self.data) == bytes else self.data,
'id': self.id.decode() if type(self.id) == bytes else self.id,
'last_tx': self.last_tx,
'owner': self.owner,
'quantity': self.quantity,
'reward': self.reward,
'signature': self.signature.decode(),
'tags': self.tags,
'target': self.target
}
if self.format == 2:
self.encode_tags()
data['tags'] = self.tags
data['format'] = 2
if len(self.data_root) > 0:
data['data_root'] = self.data_root.decode()
else:
data['data_root'] = ""
data['data_size'] = str(self.data_size)
data['data_tree'] = []
return data
@property
def json_data(self):
data = self.to_dict()
json_str = json.dumps(data)
logger.error(json_str)
return json_str.replace(' ', '')
def get_status(self):
url = "{}/tx/{}/status".format(self.api_url, self.id)
response = requests.get(url)
if response.status_code == 200:
self.status = json.loads(response.text)
else:
logger.error(response.text)
self.status = "PENDING"
return self.status
def get_transaction(self):
url = "{}/tx/{}".format(self.api_url, self.id)
response = requests.get(url)
tx = None
if response.status_code == 200:
self.load_json(response.text)
else:
logger.error(response.text)
return tx
def get_price(self):
url = "{}/price/{}".format(self.api_url, self.data_size)
response = requests.get(url)
if response.status_code == 200:
return winston_to_ar(response.text)
else:
logger.error(response.text)
def get_data(self):
url = "{}/{}/".format(self.api_url, self.id)
response = requests.get(url)
if response.status_code == 200:
self.data = response.content
else:
logger.error(response.text)
raise ArweaveTransactionException(
response.text
)
def load_json(self, json_str):
json_data = json.loads(json_str)
self.data = json_data.get('data', '')
self.last_tx = json_data.get('last_tx', '')
self.owner = json_data.get('owner', '')
self.quantity = json_data.get('quantity', '')
self.reward = json_data.get('reward', '')
self.signature = json_data.get('signature', '')
self.tags = [decode_tag(tag) for tag in json_data.get('tags', [])]
self.target = json_data.get('target', '')
self.data_size = json_data.get('data_size', '0')
self.data_root = json_data.get('data_root', '')
self.data_tree = json_data.get('data_tree', [])
logger.debug(json_data)
def prepare_chunks(self):
if not self.chunks:
self.chunks = generate_transaction_chunks(self.file_handler)
self.data_root = base64url_encode(self.chunks.get('data_root'))
if not self.chunks:
self.chunks = {
"chunks": [],
"data_root": b'',
"proof": []
}
self.data_root = ''
def get_chunk(self, idx):
if self.chunks is None:
raise ArweaveTransactionException("Chunks have not been prepared")
proof = self.chunks.get('proofs')[idx]
chunk = self.chunks.get('chunks')[idx]
self.file_handler.seek(chunk.min_byte_range)
chunk_data = self.file_handler.read(chunk.data_size)
return {
"data_root": self.data_root.decode(),
"data_size": str(self.data_size),
"data_path": base64url_encode(proof.proof),
"offset": str(proof.offset),
"chunk": base64url_encode(chunk_data)
}
def arql(wallet, query):
"""
Creat your query like so:
query = {
"op": "and",
"expr1": {
"op": "equals",
"expr1": "from",
"expr2": "hnRI7JoN2vpv__w90o4MC_ybE9fse6SUemwQeY8hFxM"
},
"expr2": {
"op": "or",
"expr1": {
"op": "equals",
"expr1": "type",
"expr2": "post"
},
"expr2": {
"op": "equals",
"expr1": "type",
"expr2": "comment"
}
}
:param wallet:
:param query:
:return list of Transaction instances:
"""
data = json.dumps(query)
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
response = requests.post("{}/arql".format(API_URL), data=data, headers=headers)
if response.status_code == 200:
transaction_ids = json.loads(response.text)
return transaction_ids
return None
def arql_with_transaction_data(wallet, query):
"""
Creat your query like so:
query = {
"op": "and",
"expr1": {
"op": "equals",
"expr1": "from",
"expr2": "hnRI7JoN2vpv__w90o4MC_ybE9fse6SUemwQeY8hFxM"
},
"expr2": {
"op": "or",
"expr1": {
"op": "equals",
"expr1": "type",
"expr2": "post"
},
"expr2": {
"op": "equals",
"expr1": "type",
"expr2": "comment"
}
}
:param wallet:
:param query:
:return list of Transaction instances:
"""
transaction_ids = arql(wallet, query)
if transaction_ids:
transactions = []
for transaction_id in transaction_ids:
tx = Transaction(wallet, id=transaction_id)
tx.get_transaction()
tx.get_data()
transactions.append(tx)
return None
| 28.979123
| 115
| 0.561991
|
acfdf534c3fd74af524fcc9ec9dfcf3c68453118
| 587
|
py
|
Python
|
src/main_client_dotenv.py
|
ConnectionMaster/sp22-discord-bot
|
1d749ed6a2d59d8c668badc1a30e27d0a39bf483
|
[
"Apache-2.0"
] | null | null | null |
src/main_client_dotenv.py
|
ConnectionMaster/sp22-discord-bot
|
1d749ed6a2d59d8c668badc1a30e27d0a39bf483
|
[
"Apache-2.0"
] | null | null | null |
src/main_client_dotenv.py
|
ConnectionMaster/sp22-discord-bot
|
1d749ed6a2d59d8c668badc1a30e27d0a39bf483
|
[
"Apache-2.0"
] | 3
|
2022-01-25T02:09:50.000Z
|
2022-01-28T17:45:41.000Z
|
import os
import dotenv
import nextcord
# Needed for us to tell Discord what information our bot will want to access
myIntents = nextcord.Intents.default()
# Specifically note that we want access to member information
myIntents.members = True
# Create a Client object, the actual connection to Discord
client = nextcord.Client(intents=myIntents)
# This will load the `.env` file data as a system environment variable
dotenv.load_dotenv()
# Assign variable myToken to be the string in the 'token' environment variable; loaded in .env
myToken = os.getenv("token")
client.run(myToken)
| 29.35
| 94
| 0.785349
|
acfdf733a1e94bc8bbfb7781b9eea7705e0815a3
| 334
|
py
|
Python
|
client/fmcmds/app_list.py
|
AlexRogalskiy/caastle
|
bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3
|
[
"Apache-2.0"
] | 19
|
2017-09-01T03:42:00.000Z
|
2018-01-25T09:53:59.000Z
|
client/fmcmds/app_list.py
|
mrhm-dev/caastle
|
bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3
|
[
"Apache-2.0"
] | 34
|
2017-08-30T14:11:16.000Z
|
2017-12-16T01:52:44.000Z
|
client/fmcmds/app_list.py
|
AlexRogalskiy/caastle
|
bb832c6828c6e97ac18d58ac0f23d8f61ff7bec3
|
[
"Apache-2.0"
] | 4
|
2019-01-20T22:04:59.000Z
|
2022-01-09T02:25:35.000Z
|
from cliff.command import Command
import call_server as server
class AppList(Command):
def get_parser(self, prog_name):
parser = super(AppList, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
response = server.TakeAction().get_app_list()
print(response)
| 20.875
| 59
| 0.688623
|
acfdf7829cdbf2c5a191fd213ee9d036f4af29ff
| 4,927
|
py
|
Python
|
connvitals/collector.py
|
fossabot/connvitals
|
0a185ee34fe872bab7188bc4b201dd8b6a80fe4d
|
[
"Apache-2.0"
] | null | null | null |
connvitals/collector.py
|
fossabot/connvitals
|
0a185ee34fe872bab7188bc4b201dd8b6a80fe4d
|
[
"Apache-2.0"
] | 1
|
2018-08-21T18:11:26.000Z
|
2018-08-21T18:11:26.000Z
|
connvitals/collector.py
|
fossabot/connvitals
|
0a185ee34fe872bab7188bc4b201dd8b6a80fe4d
|
[
"Apache-2.0"
] | 1
|
2018-08-21T18:04:40.000Z
|
2018-08-21T18:04:40.000Z
|
# Copyright 2018 Comcast Cable Communications Management, LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module defines a single worker to collect stats from a single host"""
import multiprocessing
import math
from . import utils, config, ping, traceroute, ports
def dummy(_):
pass
class Collector(multiprocessing.Process):
"""
A threaded worker that collects stats for a single host.
"""
trace = None
result = [utils.PingResult(-1, -1, -1, -1, 100.),
utils.Trace([utils.TraceStep('*', -1)] * 10),
utils.ScanResult(None, None, None)]
def __init__(self, host:str, ID:int, conf:config.Config = config.CONFIG):
"""
Initializes the Collector, and its worker pool
"""
super(Collector, self).__init__()
self.hostname = host
self.conf = conf
self.host = conf.HOSTS[host]
self.name = host
self.ID = ID
self.pipe = multiprocessing.Pipe()
def run(self):
"""
Called when the thread is run
"""
with multiprocessing.pool.ThreadPool() as pool:
pscan_result, trace_result, ping_result = None, None, None
if self.conf.PORTSCAN:
pscan_result = pool.apply_async(ports.portScan,
(self.host, pool),
error_callback=utils.error)
if self.conf.TRACE:
trace_result = pool.apply_async(traceroute.trace,
(self.host, self.ID, self.conf),
error_callback=utils.error)
if not self.conf.NOPING:
try:
self.ping(pool)
except (multiprocessing.TimeoutError, ValueError):
self.result[0] = type(self).result[0]
else:
self.result[0] = None
if self.conf.TRACE:
try:
self.result[1] = trace_result.get(self.conf.HOPS)
except multiprocessing.TimeoutError:
self.result[1] = type(self).result[1]
else:
self.result[1] = None
if self.conf.PORTSCAN:
try:
self.result[2] = pscan_result.get(0.5)
except multiprocessing.TimeoutError:
self.result[2] = type(self).result[2]
else:
self.result[2] = None
try:
self.pipe[1].send(self.result)
except OSError as e:
utils.error(OSError("Error sending results: %s" % e))
def ping(self, pool:multiprocessing.pool.ThreadPool, pinger:ping.Pinger = None):
"""
Pings the host
"""
destroyPinger = dummy
if pinger is None:
pinger = ping.Pinger(self.host, bytes(self.conf.PAYLOAD))
destroyPinger = lambda x: x.sock.close()
# Aggregates round-trip time for each packet in the sequence
rtt, lost = [], 0
# Sends, receives and parses all icmp packets asynchronously
results = pool.map_async(pinger.ping,
range(self.conf.NUMPINGS),
error_callback=utils.error)
pkts = results.get(8)
for pkt in pkts:
if pkt != None and pkt > 0:
rtt.append(pkt*1000)
else:
lost += 1
try:
avg = sum(rtt) / len(rtt)
std = 0.
for item in rtt:
std += (avg - item)**2
std /= len(rtt) - 1
std = math.sqrt(std)
except ZeroDivisionError:
std = 0.
finally:
destroyPinger(pinger)
if rtt:
self.result[0] = utils.PingResult(min(rtt), avg, max(rtt), std, lost/self.conf.NUMPINGS *100.0)
else:
self.result[0] = type(self).result[0]
def __str__(self) -> str:
"""
Implements 'str(self)'
Returns a plaintext output result
"""
ret = []
if self.host[0] == self.hostname:
ret.append(self.hostname)
else:
ret.append("%s (%s)" % (self.hostname, self.host[0]))
pings, trace, scans = self.result
if pings:
ret.append(str(pings))
if trace and trace != self.trace:
self.trace = trace
# Dirty hack because I can't inherit with strong typing in Python 3.4
ret.append(utils.traceToStr(trace))
if scans:
ret.append(str(scans))
return "\n".join(ret)
def __repr__(self) -> repr:
"""
Implements `repr(self)`
Returns a JSON output result
"""
ret = [r'{"addr":"%s"' % self.host[0]]
ret.append(r'"name":"%s"' % self.hostname)
if not self.conf.NOPING:
ret.append(r'"ping":%s' % repr(self.result[0]))
if self.conf.TRACE and self.trace != self.result[1]:
self.trace = self.result[1]
# Dirty hack because I can't inherit with strong typing in Python 3.4
ret.append(r'"trace":%s' % utils.traceRepr(self.result[1]))
if self.conf.PORTSCAN:
ret.append(r'"scan":%s' % repr(self.result[2]))
return ','.join(ret) + '}'
def recv(self):
"""
Returns a message from the Collector's Pipe
"""
return self.pipe[0].recv()
| 26.777174
| 98
| 0.654759
|
acfdf7a4d5056c4f69b5bbef62c56f9374fafa52
| 1,330
|
py
|
Python
|
src/Placeable.py
|
benedicteb/outcast
|
0fe16e0bafdbf4bb02d63e93cae208c42d2d7824
|
[
"Apache-2.0"
] | null | null | null |
src/Placeable.py
|
benedicteb/outcast
|
0fe16e0bafdbf4bb02d63e93cae208c42d2d7824
|
[
"Apache-2.0"
] | 8
|
2015-10-25T16:02:17.000Z
|
2015-11-06T09:47:43.000Z
|
src/Placeable.py
|
benedicteb/outcast
|
0fe16e0bafdbf4bb02d63e93cae208c42d2d7824
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
Contains placeable classes.
Things that goes on top of the landscape.
"""
import numpy as np
import os
import pygame
import logging
import Game
class Placeable(object):
"""
Base class for Persons and Items.
"""
def __init__(self, position, sprite):
"""
Defaults to facing south. Facing codes:
- 0: South
- 1: East
- 2: North
- 3: West
@param sprite name of sprite-file, no need for path nor extension.
"""
if not isinstance(position, (tuple, list, np.ndarray)):
logging.error(
"Position should be arraylike with [x, y]. Set it to [0, 0]."
)
position = [0, 0]
self.position = np.array(position)
self.facing = 0
self._sprite = pygame.image.load(Game.resource_path(
Game.Game.SPRITES_LOCATION, sprite + Game.Game.SPRITES_EXT
)).convert_alpha()
def get_sprite(self):
# Rotate the sprite while keeping its center and size.
rot_image = pygame.transform.rotate(
self._sprite, self.facing*90
)
rot_rect = self._sprite.get_rect().copy()
rot_rect.center = rot_image.get_rect().center
rot_image = rot_image.subsurface(rot_rect).copy()
return rot_image
| 26.6
| 77
| 0.596992
|
acfdf8924b69a24c1654bbab889f076de1a50df1
| 1,229
|
py
|
Python
|
autoscalingsim/scaling/policiesbuilder/adjustmentplacement/desired_adjustment_calculator/scoring/score/score_impl/price_score.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 6
|
2021-03-10T16:23:10.000Z
|
2022-01-14T04:57:46.000Z
|
autoscalingsim/scaling/policiesbuilder/adjustmentplacement/desired_adjustment_calculator/scoring/score/score_impl/price_score.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | null | null | null |
autoscalingsim/scaling/policiesbuilder/adjustmentplacement/desired_adjustment_calculator/scoring/score/score_impl/price_score.py
|
Remit/autoscaling-simulator
|
091943c0e9eedf9543e9305682a067ab60f56def
|
[
"MIT"
] | 1
|
2022-01-14T04:57:55.000Z
|
2022-01-14T04:57:55.000Z
|
import numbers
from autoscalingsim.utils.functions import InvertingFunction
from autoscalingsim.scaling.policiesbuilder.adjustmentplacement.desired_adjustment_calculator.scoring.score.score import Score
@Score.register('PriceScoreCalculator')
class PriceScore(Score):
def __init__(self, price_in : float = float('Inf')):
price = price_in if isinstance(price_in, numbers.Number) else price_in.value
super().__init__(InvertingFunction(lambda price: 1 / price),
InvertingFunction(lambda score: 1 / score))
self.score = self.score_computer(price)
def __add__(self, other : 'PriceScore'):
return self.__class__(self.original_value + other.original_value)
def __mul__(self, other : numbers.Number):
new_score = self.__class__()
new_score.score = self.score * other
return new_score
def __truediv__(self, other : numbers.Number):
new_score = self.__class__()
new_score.score = float('Inf') if other == 0 else self.score / other
return new_score
@classmethod
def build_init_score(cls):
return cls(0)
@classmethod
def build_worst_score(cls):
return cls(float('Inf'))
| 27.311111
| 126
| 0.688365
|
acfdf90a83b4c14be733966aded6f97c8d1da47e
| 830
|
py
|
Python
|
setup.py
|
Quantum56/AlphaZero-AI
|
504522feb4e67211d5fb592f4b14a2cb8271d015
|
[
"MIT"
] | 1
|
2019-11-12T01:55:36.000Z
|
2019-11-12T01:55:36.000Z
|
setup.py
|
Quantum56/AlphaZero-AI
|
504522feb4e67211d5fb592f4b14a2cb8271d015
|
[
"MIT"
] | 14
|
2019-11-12T00:09:26.000Z
|
2022-02-10T00:46:30.000Z
|
setup.py
|
Quantum56/AlphaZero-AI
|
504522feb4e67211d5fb592f4b14a2cb8271d015
|
[
"MIT"
] | null | null | null |
from setuptools import setup
setup(
name='AlphaZero-AI',
version='1.0',
description='Modular AI using AlphaZero neural networking',
author='Quantum56',
packages=['AlphaZero-AI'], #same as name
package_dir={'AlphaZero-AI': 'src\\DeepReinforcementLearning'},
install_requires=['absl-py','appnope','astor','bleach','cycler','decorator','graphviz','grpcio','h5py','html5lib','ipython','ipython-genutils','jedi','jupyter-client','jupyter-core','Keras',
'kiwisolver','Markdown','matplotlib','numpy','parso','pexpect','pickleshare','prompt-toolkit','protobuf','ptyprocess','pydot','pydot-ng','Pygments','pyparsing','python-dateutil',
'pytz','PyYAML','pyzmq','scipy','simplegeneric','six','tensorboard','tensorflow','termcolor','tornado','traitlets','wcwidth','Werkzeug'], #external packages as dependencies
)
| 63.846154
| 193
| 0.712048
|
acfdf9cf9c72af7d9c92d35c8112cf2501f64e07
| 663
|
py
|
Python
|
exercises/ja/exc_03_11.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | 1
|
2021-12-30T06:40:11.000Z
|
2021-12-30T06:40:11.000Z
|
exercises/ja/exc_03_11.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | null | null | null |
exercises/ja/exc_03_11.py
|
YanaPalacheva/spacy-course
|
59975f7348a601532303be91474d75d02d0540ef
|
[
"MIT"
] | 1
|
2020-06-08T13:26:06.000Z
|
2020-06-08T13:26:06.000Z
|
import spacy
from spacy.tokens import Span
nlp = spacy.load("en_core_web_sm")
def get_wikipedia_url(span):
# もしスパンにいずれかのラベルがついているなら、WikipediaのURLを返す
if ____ in ("PERSON", "ORG", "GPE", "LOCATION"):
entity_text = span.text.replace(" ", "_")
return "https://en.wikipedia.org/w/index.php?search=" + entity_text
# Spanの拡張属性であるwikipedia_urlにget_wikipedia_urlゲッターを登録
____.____(____, ____=____)
doc = nlp(
"In over fifty years from his very first recordings right through to his "
"last album, David Bowie was at the vanguard of contemporary culture."
)
for ent in doc.ents:
# 固有表現のテキストとwikipedia URLをプリント
print(____, ____)
| 27.625
| 78
| 0.717949
|
acfdf9fbc9fab805aa2a8c345b03d458e9e8d3f8
| 2,203
|
py
|
Python
|
std_number_validation/tests/validators/boolean_validator_test.py
|
lgrabowski/std-number-validation
|
b27a66ed3bd7c7ac25b64b99b462f1c3e3380f20
|
[
"MIT"
] | null | null | null |
std_number_validation/tests/validators/boolean_validator_test.py
|
lgrabowski/std-number-validation
|
b27a66ed3bd7c7ac25b64b99b462f1c3e3380f20
|
[
"MIT"
] | null | null | null |
std_number_validation/tests/validators/boolean_validator_test.py
|
lgrabowski/std-number-validation
|
b27a66ed3bd7c7ac25b64b99b462f1c3e3380f20
|
[
"MIT"
] | null | null | null |
import unittest
from std_number_validation import validators
from std_number_validation import algorithms
from std_number_validation import exceptions
class LuhnAlgorithmTestCase(unittest.TestCase):
BOOLEAN_VALIDATOR_CLASS = validators.BooleanValidator
LUHN_ALGORITHM = algorithms.LuhnAlgorithm
CORRECT_NUMBER = 79927398713
INCORRECT_NUMBER = 79927398711
def setUp(self) -> None:
pass
def test_checks_if_bool_validator_accepts_valid_number(self):
"""
BoolValidator should accept correct number
"""
# given
validator = self.BOOLEAN_VALIDATOR_CLASS(self.CORRECT_NUMBER, algorithm=self.LUHN_ALGORITHM)
# when
is_valid = validator.is_valid()
# then
self.assertTrue(is_valid)
def test_checks_if_bool_validator_rejects_invalid_number(self):
"""
BoolValidator should reject invalid number
"""
# given
validator = self.BOOLEAN_VALIDATOR_CLASS(self.INCORRECT_NUMBER, algorithm=self.LUHN_ALGORITHM)
# when
is_valid = validator.is_valid()
# then
self.assertFalse(is_valid)
def test_checks_if_bool_validator_rejects_invalid_param(self):
"""
BoolValidator should reject invalid param
"""
# given
invalid_param = "invalid_param_bazzzzingaaa" # we check it despsite type checks... this is no java.
validator = self.BOOLEAN_VALIDATOR_CLASS(invalid_param, algorithm=self.LUHN_ALGORITHM)
# when
is_valid = validator.is_valid()
# then
self.assertFalse(is_valid)
def test_checks_if_bool_validator_rejects_invalid_number_and_raises_exception(self):
"""
BoolValidator should reject invalid number
"""
# given
validator = self.BOOLEAN_VALIDATOR_CLASS(self.INCORRECT_NUMBER,
algorithm=self.LUHN_ALGORITHM,
exc_to_raise=exceptions.ValidationError)
# when
# then
self.assertRaises(exceptions.ValidationError, validator.is_valid)
| 35.532258
| 109
| 0.650477
|
acfdfb843ca5ea34a47df42eb7b8d3623a4bf8a2
| 2,744
|
py
|
Python
|
pychron/spectrometer/tasks/mass_cal/mass_calibration_task.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/spectrometer/tasks/mass_cal/mass_calibration_task.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/spectrometer/tasks/mass_cal/mass_calibration_task.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from pyface.tasks.task_layout import TaskLayout, PaneItem, VSplitter
from traits.api import Any, Instance
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.database.isotope_database_manager import IsotopeDatabaseManager
from pychron.envisage.tasks.editor_task import BaseEditorTask
from pychron.spectrometer.mass_cal.mass_calibrator import MassCalibratorSweep
from pychron.spectrometer.tasks.mass_cal.editor import MassCalibrationEditor
from pychron.spectrometer.tasks.mass_cal.panes import (
MassCalibrationTablePane,
MassCalibrationsPane,
MassCalibrationControlPane,
)
class MassCalibrationTask(BaseEditorTask):
name = "Mass Calibration"
spectrometer_manager = Any
scanner = Instance(MassCalibratorSweep)
def _active_editor_changed(self):
if self.active_editor:
self.scanner.graph = self.active_editor.graph
self.scanner.setup_graph()
def _scanner_default(self):
spec = self.spectrometer_manager.spectrometer
s = MassCalibratorSweep(spectrometer=spec, db=IsotopeDatabaseManager())
if spec.simulation:
s.integration_time = 0.065536
s.verbose = True
return s
def activated(self):
editor = MassCalibrationEditor()
self._open_editor(editor)
def create_dock_panes(self):
return [
MassCalibrationTablePane(model=self),
MassCalibrationsPane(model=self),
MassCalibrationControlPane(model=self),
]
def _default_layout_default(self):
return TaskLayout(
left=VSplitter(
PaneItem("pychron.mass_calibration.cal_points"),
PaneItem("pychron.mass_calibration.controls"),
)
)
# ============= EOF =============================================
| 35.636364
| 81
| 0.636297
|
acfdfb90494e87a07425bc0995669f55d5c52aa4
| 3,072
|
py
|
Python
|
utils/stitch/stitch.py
|
VladPerish/keras_segmentation
|
851771ff2b8d02879574f37e86a3acba12a4d299
|
[
"MIT"
] | 1
|
2020-09-09T12:42:39.000Z
|
2020-09-09T12:42:39.000Z
|
utils/stitch/stitch.py
|
nayemabs/keras_segmentation
|
851771ff2b8d02879574f37e86a3acba12a4d299
|
[
"MIT"
] | null | null | null |
utils/stitch/stitch.py
|
nayemabs/keras_segmentation
|
851771ff2b8d02879574f37e86a3acba12a4d299
|
[
"MIT"
] | null | null | null |
import numpy as np
from PIL import Image
from PIL import ImageFilter
import os
from shutil import rmtree
import matplotlib.pyplot as plt
import pandas as pd
from scipy.misc import toimage
# to remove zeropadding
from zeropad_remove import zeropad_remove
# 2014-12-05_0000718813_1.png
def stitch(input_dir, output_dir, total_grid):
images = []
unique_name = []
for img in os.listdir(input_dir):
images.append(img)
date, name, ext = img.split('_', 3)[:3]
num, format = ext.split('.')
# Take the unique instances of all occurance matches only on date_name
if date + '_' + name in img:
if date + '_' + name not in unique_name:
unique_name.append(date + '_' + name)
# print(date + '_' + name + '_' + num + '.' + format)
# Sorting image list using 2nd occurance of _ to .png
images.sort(key=lambda x: int(x[x.find('_', 15) + len('_'): x.rfind('.png')]))
# Iterates through number of main images
for kk in range(len(unique_name)):
# Iterate through number of total grid images
list_image = []
for ii in range(len(images)):
# Iterates through number of grid images for each image
for jj in range(total_grid+1):
date, name, ext = (unique_name[kk] + '_' + str(jj) + '.' + format).split('_', 3)[:3]
num, format = ext.split('.')
if images[ii] == date + '_' + name + '_' + str(jj) + '.' + format:
print('Image: {}'.format(images[ii]))
list_image.append(os.path.join(input_dir, images[ii]))
comb_width = int(224 * 32)
comb_height = int(224 * 30)
new_im = Image.new('RGB', (comb_width, comb_height))
x_offset = 0
y_offset = 0
for img in list_image:
image = Image.open(img)
# image = zeropad_remove(np.array(image))
image = toimage(image)
new_im.paste(image, (x_offset, y_offset))
x_offset += image.size[0]
if x_offset == comb_width:
x_offset = 0
y_offset += image.size[0]
new_im.save(output_dir + '/' + unique_name[kk] + '.png')
stitch('/home/akmmrahman/ss-master/workingFCN/output/indexed_fcn8/','/home/akmmrahman/ss-master/workingFCN/output/stitched_fcn8/',960)
stitch('/home/akmmrahman/ss-master/workingFCN/output/indexed_fcn8_bal/','/home/akmmrahman/ss-master/workingFCN/output/stitched_fcn8_bal/',960)
stitch('/home/akmmrahman/ss-master/workingFCN/output/indexed_unet/','/home/akmmrahman/ss-master/workingFCN/output/stitched_unet/',960)
# stitch('/home/akmmrahman/ss-master/workingFCN/output/indexed_unet_bal/','/home/akmmrahman/ss-master/workingFCN/output/stitched_unet_bal/',960)
# stitch('/home/akmmrahman/ss-master/workingFCN/data/dataset/test/org_grid/','/home/akmmrahman/ss-master/workingFCN/output/input/',960)
# stitch('/home/akmmrahman/ss-master/workingFCN/data/dataset/test/gt_indx/','/home/akmmrahman/ss-master/workingFCN/output/gt/',960)
| 42.082192
| 144
| 0.630859
|
acfdfba658cfa6bcadd34b708928f3e00f5b2dd7
| 8,788
|
py
|
Python
|
models/frustum_pointnets_v1.py
|
huy-ha/frustum-pointnets
|
0c5b8040707e4497ee2fe7bc3445462cf31ac9e0
|
[
"Apache-2.0"
] | null | null | null |
models/frustum_pointnets_v1.py
|
huy-ha/frustum-pointnets
|
0c5b8040707e4497ee2fe7bc3445462cf31ac9e0
|
[
"Apache-2.0"
] | null | null | null |
models/frustum_pointnets_v1.py
|
huy-ha/frustum-pointnets
|
0c5b8040707e4497ee2fe7bc3445462cf31ac9e0
|
[
"Apache-2.0"
] | null | null | null |
''' Frsutum PointNets v1 Model.
'''
from __future__ import print_function
import sys
import os
import tensorflow.compat.v1 as tf
import numpy as np
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import tf_util
from model_util import NUM_HEADING_BIN, NUM_SIZE_CLUSTER, NUM_OBJECT_POINT
from model_util import point_cloud_masking, get_center_regression_net
from model_util import placeholder_inputs, parse_output_to_tensors, get_loss
def get_instance_seg_v1_net(point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D instance segmentation PointNet v1 network.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
end_points: dict
Output:
logits: TF tensor in shape (B,N,2), scores for bkg/clutter and object
end_points: dict
'''
batch_size = point_cloud.get_shape()[0].value
num_point = point_cloud.get_shape()[1].value
net = tf.expand_dims(point_cloud, 2)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv2', bn_decay=bn_decay)
point_feat = tf_util.conv2d(net, 64, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv3', bn_decay=bn_decay)
net = tf_util.conv2d(point_feat, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv4', bn_decay=bn_decay)
net = tf_util.conv2d(net, 1024, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv5', bn_decay=bn_decay)
global_feat = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool')
global_feat = tf.concat([global_feat, tf.expand_dims(tf.expand_dims(one_hot_vec, 1), 1)], axis=3)
global_feat_expand = tf.tile(global_feat, [1, num_point, 1, 1])
concat_feat = tf.concat(axis=3, values=[point_feat, global_feat_expand])
net = tf_util.conv2d(concat_feat, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv6', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv7', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv8', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv9', bn_decay=bn_decay)
net = tf_util.dropout(net, is_training, 'dp1', keep_prob=0.5)
logits = tf_util.conv2d(net, 2, [1,1],
padding='VALID', stride=[1,1], activation_fn=None,
scope='conv10')
logits = tf.squeeze(logits, [2]) # BxNxC
return logits, end_points
def get_3d_box_estimation_v1_net(object_point_cloud, one_hot_vec,
is_training, bn_decay, end_points):
''' 3D Box Estimation PointNet v1 network.
Input:
object_point_cloud: TF tensor in shape (B,M,C)
point clouds in object coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
Output:
output: TF tensor in shape (B,3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4)
including box centers, heading bin class scores and residuals,
and size cluster scores and residuals
'''
num_point = object_point_cloud.get_shape()[1].value
net = tf.expand_dims(object_point_cloud, 2)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg1', bn_decay=bn_decay)
net = tf_util.conv2d(net, 128, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg2', bn_decay=bn_decay)
net = tf_util.conv2d(net, 256, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg3', bn_decay=bn_decay)
net = tf_util.conv2d(net, 512, [1,1],
padding='VALID', stride=[1,1],
bn=True, is_training=is_training,
scope='conv-reg4', bn_decay=bn_decay)
net = tf_util.max_pool2d(net, [num_point,1],
padding='VALID', scope='maxpool2')
net = tf.squeeze(net, axis=[1,2])
net = tf.concat([net, one_hot_vec], axis=1)
net = tf_util.fully_connected(net, 512, scope='fc1', bn=True,
is_training=is_training, bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, scope='fc2', bn=True,
is_training=is_training, bn_decay=bn_decay)
# The first 3 numbers: box center coordinates (cx,cy,cz),
# the next NUM_HEADING_BIN*2: heading bin class scores and bin residuals
# next NUM_SIZE_CLUSTER*4: box cluster scores and residuals
output = tf_util.fully_connected(net,
3+NUM_HEADING_BIN*2+NUM_SIZE_CLUSTER*4, activation_fn=None, scope='fc3')
return output, end_points
def get_model(point_cloud, one_hot_vec, is_training, bn_decay=None):
''' Frustum PointNets model. The model predict 3D object masks and
amodel bounding boxes for objects in frustum point clouds.
Input:
point_cloud: TF tensor in shape (B,N,4)
frustum point clouds with XYZ and intensity in point channels
XYZs are in frustum coordinate
one_hot_vec: TF tensor in shape (B,3)
length-3 vectors indicating predicted object type
is_training: TF boolean scalar
bn_decay: TF float scalar
Output:
end_points: dict (map from name strings to TF tensors)
'''
end_points = {}
# 3D Instance Segmentation PointNet
logits, end_points = get_instance_seg_v1_net(\
point_cloud, one_hot_vec,
is_training, bn_decay, end_points)
end_points['mask_logits'] = logits
# Masking
# select masked points and translate to masked points' centroid
object_point_cloud_xyz, mask_xyz_mean, end_points = \
point_cloud_masking(point_cloud, logits, end_points)
# T-Net and coordinate translation
center_delta, end_points = get_center_regression_net(\
object_point_cloud_xyz, one_hot_vec,
is_training, bn_decay, end_points)
stage1_center = center_delta + mask_xyz_mean # Bx3
end_points['stage1_center'] = stage1_center
# Get object point cloud in object coordinate
object_point_cloud_xyz_new = \
object_point_cloud_xyz - tf.expand_dims(center_delta, 1)
# Amodel Box Estimation PointNet
output, end_points = get_3d_box_estimation_v1_net(\
object_point_cloud_xyz_new, one_hot_vec,
is_training, bn_decay, end_points)
# Parse output to 3D box parameters
end_points = parse_output_to_tensors(output, end_points)
end_points['center'] = end_points['center_boxnet'] + stage1_center # Bx3
return end_points
if __name__=='__main__':
with tf.Graph().as_default():
inputs = tf.zeros((32,1024,4))
outputs = get_model(inputs, tf.ones((32,3)), tf.constant(True))
for key in outputs:
print((key, outputs[key]))
loss = get_loss(tf.zeros((32,1024),dtype=tf.int32),
tf.zeros((32,3)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,)), tf.zeros((32,),dtype=tf.int32),
tf.zeros((32,3)), outputs)
print(loss)
| 44.160804
| 101
| 0.607988
|
acfdfd1d347dcbaa5a8afb2211058267b4f00614
| 951
|
py
|
Python
|
main.py
|
GuyBarros/python-webp-convert
|
5f39b63b6afd8d259dfaf15edd8a02e7db35a9c7
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
GuyBarros/python-webp-convert
|
5f39b63b6afd8d259dfaf15edd8a02e7db35a9c7
|
[
"Apache-2.0"
] | 4
|
2021-06-08T21:51:50.000Z
|
2022-03-12T00:36:55.000Z
|
main.py
|
GuyBarros/python-webp-convert
|
5f39b63b6afd8d259dfaf15edd8a02e7db35a9c7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#this application
import json
import os
import sys
import pprint
import argparse
from PIL import Image
pp = pprint.PrettyPrinter()
# VAULT_ADDR = os.environ.get('VAULT_ADDR', 'http://localhost:8200')
def crawlDirectories(inputPath):
for dirpath, dirnames, files in os.walk(inputPath):
pp.pprint(f'Found directory: {dirpath}')
for file_name in files:
file, ext = os.path.splitext(dirpath+os.sep+file_name)
pp.pprint(ext)
if ext in [".png",".jpg",".jpeg"]:
convertImage(dirpath+os.sep+file_name)
def convertImage(infile):
file, ext = os.path.splitext(infile)
pp.pprint(file)
im = Image.open(infile).convert("RGB")
im.save(file + ".webp", "WEBP", quality=70 )
os.remove(infile)
def main():
pp.pprint("Starting")
crawlDirectories("/Users/guy/Downloads/Personal/Pictures")
if __name__ == '__main__':
main()
| 26.416667
| 69
| 0.644585
|
acfdfe5a435e779dc8ed17a72661204e365bf634
| 7,051
|
py
|
Python
|
arbitrage/private_markets/mtgox.py
|
samrocketman/bitcoin-arbitrage
|
a740d445c19e0c2acff2fb83fdec00caa74999e6
|
[
"Unlicense"
] | 2
|
2017-09-14T21:48:16.000Z
|
2017-10-05T07:23:15.000Z
|
arbitrage/private_markets/mtgox.py
|
ascjones/bitcoin-arbitrage
|
a740d445c19e0c2acff2fb83fdec00caa74999e6
|
[
"Unlicense"
] | null | null | null |
arbitrage/private_markets/mtgox.py
|
ascjones/bitcoin-arbitrage
|
a740d445c19e0c2acff2fb83fdec00caa74999e6
|
[
"Unlicense"
] | 18
|
2017-01-12T11:20:57.000Z
|
2019-04-19T10:14:34.000Z
|
# Copyright (C) 2013, Maxime Biais <maxime@biais.org>
from .market import Market
import time
import base64
import hmac
import urllib.request
import urllib.parse
import urllib.error
import urllib.request
import urllib.error
import urllib.parse
import hashlib
import sys
import json
import re
import logging
import config
class PrivateMtGox(Market):
def __init__(self):
super().__init__()
self.order_url = {"method": "POST", "url":
"https://mtgox.com/api/1/generic/private/order/result"}
self.open_orders_url = {"method": "POST", "url":
"https://mtgox.com/api/1/generic/private/orders"}
self.info_url = {"method": "POST", "url":
"https://mtgox.com/api/1/generic/private/info"}
self.withdraw_url = {"method": "POST", "url":
"https://mtgox.com/api/1/generic/bitcoin/send_simple"}
self.deposit_url = {"method": "POST", "url":
"https://mtgox.com/api/1/generic/bitcoin/address"}
self.key = config.mtgox_key
self.secret = config.mtgox_secret
self.get_info()
def _create_nonce(self):
return int(time.time() * 1000000)
def _change_currency_url(self, url, currency):
return re.sub(r'BTC\w{3}', r'BTC' + currency, url)
def _to_int_price(self, price, currency):
ret_price = None
if currency in ["USD", "EUR", "GBP", "PLN", "CAD", "AUD", "CHF", "CNY",
"NZD", "RUB", "DKK", "HKD", "SGD", "THB"]:
ret_price = price
ret_price = int(price * 100000)
elif currency in ["JPY", "SEK"]:
ret_price = price
ret_price = int(price * 1000)
return ret_price
def _to_int_amount(self, amount):
amount = amount
return int(amount * 100000000)
def _from_int_amount(self, amount):
return amount / 100000000.
def _from_int_price(self, amount):
# FIXME: should take JPY and SEK into account
return amount / 100000.
def _send_request(self, url, params, extra_headers=None):
urlparams = bytes(urllib.parse.urlencode(params), "UTF-8")
secret_from_b64 = base64.b64decode(bytes(self.secret, "UTF-8"))
hmac_secret = hmac.new(secret_from_b64, urlparams, hashlib.sha512)
headers = {
'Rest-Key': self.key,
'Rest-Sign': base64.b64encode(hmac_secret.digest()),
'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
}
if extra_headers is not None:
for k, v in extra_headers.items():
headers[k] = v
try:
req = urllib.request.Request(url['url'],
bytes(urllib.parse.urlencode(params),
"UTF-8"), headers)
response = urllib.request.urlopen(req)
if response.getcode() == 200:
jsonstr = response.read()
return json.loads(str(jsonstr, "UTF-8"))
except Exception as err:
logging.error('Can\'t request MTGox, %s' % err)
return None
def trade(self, amount, ttype, price=None):
if price:
price = self._to_int_price(price, self.currency)
amount = self._to_int_amount(amount)
self.buy_url["url"] = self._change_currency_url(
self.buy_url["url"], self.currency)
params = [("nonce", self._create_nonce()),
("amount_int", str(amount)),
("type", ttype)]
if price:
params.append(("price_int", str(price)))
response = self._send_request(self.buy_url, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
def _buy(self, amount, price):
return self.trade(amount, "bid", price)
def _sell(self, amount, price):
return self.trade(amount, "ask", price)
def withdraw(self, amount, address):
params = [("nonce", self._create_nonce()),
("amount_int", str(self._to_int_amount(amount))),
("address", address)]
response = self._send_request(self.withdraw_url, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
def deposit(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.deposit_url, params)
if response and "result" in response and \
response["result"] == "success":
return response["return"]
return None
class PrivateMtGoxEUR(PrivateMtGox):
def __init__(self):
super().__init__()
self.ticker_url = {"method": "GET", "url":
"https://mtgox.com/api/1/BTCEUR/public/ticker"}
self.buy_url = {"method": "POST", "url":
"https://mtgox.com/api/1/BTCEUR/private/order/add"}
self.sell_url = {"method": "POST", "url":
"https://mtgox.com/api/1/BTCEUR/private/order/add"}
self.currency = "EUR"
def get_info(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.info_url, params)
if response and "result" in response and response["result"] == "success":
self.btc_balance = self._from_int_amount(int(
response["return"]["Wallets"]["BTC"]["Balance"]["value_int"]))
self.eur_balance = self._from_int_price(int(
response["return"]["Wallets"]["EUR"]["Balance"]["value_int"]))
self.usd_balance = self.fc.convert(self.eur_balance, "EUR", "USD")
return 1
return None
class PrivateMtGoxUSD(PrivateMtGox):
def __init__(self):
super().__init__()
self.ticker_url = {"method": "GET", "url":
"https://mtgox.com/api/1/BTCUSD/public/ticker"}
self.buy_url = {"method": "POST", "url":
"https://mtgox.com/api/1/BTCUSD/private/order/add"}
self.sell_url = {"method": "POST", "url":
"https://mtgox.com/api/1/BTCUSD/private/order/add"}
self.currency = "USD"
def get_info(self):
params = [("nonce", self._create_nonce())]
response = self._send_request(self.info_url, params)
if response and "result" in response and response["result"] == "success":
self.btc_balance = self._from_int_amount(int(
response["return"]["Wallets"]["BTC"]["Balance"]["value_int"]))
self.usd_balance = self._from_int_price(int(
response["return"]["Wallets"]["USD"]["Balance"]["value_int"]))
return 1
return None
| 38.741758
| 81
| 0.565735
|
acfdff453180cb952e42c91e1a0077d5b6da2afc
| 9,654
|
py
|
Python
|
Graph/Graph.py
|
RickyL-2000/cs225sp20_env
|
d38c48b72580ba7fa172f0cc7e34b3157c13a515
|
[
"MIT"
] | 9
|
2020-04-26T06:49:06.000Z
|
2020-06-03T09:01:10.000Z
|
Graph/Graph.py
|
Xiwei-Wang/cs225sp20_env
|
d38c48b72580ba7fa172f0cc7e34b3157c13a515
|
[
"MIT"
] | null | null | null |
Graph/Graph.py
|
Xiwei-Wang/cs225sp20_env
|
d38c48b72580ba7fa172f0cc7e34b3157c13a515
|
[
"MIT"
] | 3
|
2020-04-26T07:21:22.000Z
|
2020-08-04T03:37:50.000Z
|
'''
MIT License
Name cs225sp20_env Python Package
URL https://github.com/Xiwei-Wang/cs225sp20_env
Version 1.0
Creation Date 26 April 2020
Copyright(c) 2020 Instructors, TAs and Some Students of UIUC CS 225 SP20 ZJUI Course
Instructorts: Prof. Dr. Klaus-Dieter Schewe
TAs: Tingou Liang, Run Zhang, Enyi Jiang, Xiang Li
Group 1 Students: Shen Zheng, Haozhe Chen, Ruiqi Li, Xiwei Wang
Other Students: Zhongbo Zhu
Above all, due to academic integrity, students who will take UIUC CS 225 ZJUI Course
taught with Python later than Spring 2020 semester are NOT authorized with the access
to this package.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files(the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
---------
File cs225sp20_env/Graph/Graph.py
Version 1.0
'''
# %%
# for VS Code users
if __name__ != "cs225sp20_env.Graph.Graph":
import sys
sys.path.append(__file__[:-len("cs225sp20_env/Graph/Graph.py")])
# %%
# for PyCharm users
if __name__ != "cs225sp20_env.Graph.Graph":
import sys
import os
sys.path.append(os.getcwd())
# %%
from cs225sp20_env.Graph.VertexList import VertexList
from cs225sp20_env.Graph.EdgeList import EdgeList
from cs225sp20_env.List.PyList import PyList
from cs225sp20_env.List.Fifo import Fifo
# %%
class Graph:
def __init__(self,edges=[]):
self.vertexList = VertexList(edges)
for e in edges:
self.addEdge(e)
self.addEdge((e[1],e[0]))
def addEdge(self,edge):
vertex = self.vertexList.locate(edge[0])
edgelist = vertex.edges
if edgelist != None:
edgelist.add(edge[1])
else:
edgelist = EdgeList(edge[1])
vertex.setEdges(edgelist)
def __iter__(self):
vertices = self.vertexList
for v in vertices:
x = vertices.locate(v)
y = x.edges
if y != None:
for z in y:
yield (v,z)
def insertVertex(self,item):
if not (item in self.vertexList):
self.vertexList.append(item)
def deleteVertex(self,item):
return self.vertexList.remove(item)
def insertEdge(self,edge):
self.vertexList.addVertex(edge)
self.addEdge(edge)
self.addEdge((edge[1],edge[0]))
def deleteEdge(self,edge):
self.__deleteEdge(edge)
self.__deleteEdge((edge[1],edge[0]))
def __deleteEdge(self,edge):
if not (edge[0] in self.vertexList):
print("There is no edge", edge)
return False
vertexlocation = self.vertexList.locate(edge[0])
edgelist = vertexlocation.getEdges()
if edgelist == None:
print("There is no edge", edge)
return False
res = edgelist.remove(edge[1])
if res == False:
print("There is no edge", edge)
return res
def outgoingEdges(self,item):
vertex = self.vertexList.locate(item)
if vertex == None:
print("There is no vertex", item)
return []
edgelist = vertex.getEdges()
if edgelist == None:
return []
res = []
for v in edgelist:
res.append((item,v))
return res
# yield (item,v) # If we replace the above two lines with this line, then this methods works as an iterator.
def bfs_KD(self,vertex):
if not (vertex in self.vertexList):
print("There is no vertex", vertex)
return None
length = self.vertexList.getlength()
distance = [None] * length
parent = [None] * length
index = self.vertexList.index(vertex)
distance[index] = 0
parent[index] = vertex
currentlayer = Fifo(length)
currentlayer.pushback(vertex)
nextlayer = Fifo(length)
for l in range(length):
for u in currentlayer:
# print(u)
loc = self.vertexList.locate(u)
edgelist = loc.getEdges()
if edgelist != None:
for v in edgelist:
idx = self.vertexList.index(v)
if parent[idx] == None:
nextlayer.pushback(v)
distance[idx] = l + 1
parent[idx] = u
currentlayer = nextlayer
nextlayer = Fifo(length)
return (distance,parent)
def bfs(self,vertex,index):
if not (vertex in self.vertexList):
print("There is no vertex", vertex)
return None
length = self.vertexList.getlength()
self.distance[index] = 0
self.parent[index] = vertex
queue = []
queue.append(vertex)
head = 0 # head index of queue
while head < len(queue):
u = queue[head]
index = self.vertexList.index(u)
cur_distance = self.distance[index]
loc = self.vertexList.locate(u)
edgelist = loc.getEdges()
if edgelist != None:
for v in edgelist:
idx = self.vertexList.index(v)
if self.parent[idx] == None:
queue.append(v)
self.distance[idx] = cur_distance + 1
self.parent[idx] = u
else:
# TODO leave space to handle if meet other vertex in the same subset
pass
head += 1
def allBFS(self):
numVertices = self.vertexList.getlength()
self.distance = [None] * numVertices
self.parent = [None] * numVertices
for s in self.vertexList:
idx = self.vertexList.index(s)
if self.distance[idx] == None:
self.bfs(s,idx)
return (self.distance,self.parent)
#DFS traverse using recursion
def allDFS(self):
numVertices = self.vertexList.getlength()
initlist = [None]* numVertices
self.tree = PyList(initlist,numVertices)
for i in range(numVertices):
newgraph = Graph([])
self.tree[i] = newgraph
self.mark = [None] * numVertices
self.dfsPos = 1
self.dfsNum = [1] * numVertices
self.finishingTime = 1
self.finishTime = [1] * numVertices
for s in self.vertexList:
idx = self.vertexList.index(s)
if self.mark[idx] == None:
self.mark[idx] = s
self.dfsNum[idx] = self.dfsPos
self.dfsPos += 1
self.dfs(s,idx)
def dfs(self,vertex,index):
for e in self.outgoingEdges(vertex):
idx = self.vertexList.index(e[1])
if self.mark[idx] == None:
self.tree[index].insertEdge(e)
self.__traverseTreeEdge(e)
self.mark[idx] = e[1]
self.dfs(e[1],index)
self.backtrack(vertex)
def __traverseTreeEdge(self,e):
idx = self.vertexList.index(e[1])
self.dfsNum[idx] = self.dfsPos
self.dfsPos += 1
def backtrack(self,vertex):
idx = self.vertexList.index(vertex)
self.finishTime[idx] = self.finishingTime
self.finishingTime += 1
# %%
if __name__ == "__main__":
edges = [(1,2),(2,4),(3,5),(2,5),(1,5),(3,4),(3,1),(6,2),(6,3)]
g = Graph(edges)
print(g.outgoingEdges(1))
print([v for v in g.vertexList])
g.insertVertex(7)
g.insertVertex(8)
print([v for v in g.vertexList])
g.deleteVertex(1)
g.deleteVertex(7)
print([v for v in g.vertexList])
print([e for e in g])
g.insertEdge((1,7))
print([e for e in g])
g.deleteEdge((1,2))
print([e for e in g])
edges = [(1, 5), (1, 3), (1, 7), (5, 2), (5, 3), (3, 4), (3, 6), (2, 4), (2, 6)]
# you can install this package on your own environment to help understand
import networkx as nx
import matplotlib.pyplot as plt
# visualization
G = nx.Graph()
G.add_edges_from(edges)
print("Print all vertices:{}".format(G.nodes()))
print("Print all edges:{}".format(G.edges()))
print("Print the number of edges:{}".format(G.number_of_edges()))
nx.draw_networkx(G)
plt.show()
graph = Graph(edges)
graph.allDFS()
for s in graph.vertexList:
idx = graph.vertexList.index(s)
print(s,':',[e for e in graph.tree[idx]])
graph = Graph([ (1,2),(2,4),(3,5),(2,5),(1,5),(3,4),(3,1),(6,2),(6,3),
(61, 65), (63, 64), (63, 66), (62, 64), (62, 66)])
distance,parent = graph.bfs_KD(1)
print("distance: \t%s\nparent: \t%s" %(distance,parent))
distance,parent = graph.allBFS()
print("distance: \t%s\nparent: \t%s" %(distance,parent))
| 34.851986
| 120
| 0.587321
|
acfdff7ed75ae8f1836d623cb0d32794531d167a
| 4,551
|
py
|
Python
|
graphing/32_fftPitchShifting.py
|
jaakjensen/PythonDSP
|
d4f5850a5379c14d531e6f9c6d43e03f53fb888d
|
[
"MIT"
] | 1
|
2022-01-19T10:40:41.000Z
|
2022-01-19T10:40:41.000Z
|
graphing/32_fftPitchShifting.py
|
jaakjensen/PythonDSP
|
d4f5850a5379c14d531e6f9c6d43e03f53fb888d
|
[
"MIT"
] | null | null | null |
graphing/32_fftPitchShifting.py
|
jaakjensen/PythonDSP
|
d4f5850a5379c14d531e6f9c6d43e03f53fb888d
|
[
"MIT"
] | null | null | null |
# Use this code to pitch shift an audio signal
# using the STFT.
from scipy.fft import fft, ifft, fftfreq
import numpy as np
from scipy import signal
import matplotlib.pyplot as plt
from scipy.signal import hann
from scipy.signal import sawtooth
import sys
#suppress the sometimes-annoying sci-notation
np.set_printoptions(suppress=True,threshold=np.inf)
# Number of sample points
N = 1024
# How much to pitch shift? (2 = 2x, aka an octave)
pitchShiftRatio = 2
# HopSize
hopSize = int(N/8)
# How many overlaps should we calculate?
frames = 15
# sample spacing
Fs = 48000
T = 1.0 / Fs
#X axis - start, stop, # sample points
#don't grab the end point
x1 = np.linspace(0.0, (N+frames*hopSize)*T, N+frames*hopSize, endpoint=False)
#Signal
oscFrequency = 1968.75
#Window
w1 = hann(N)
# Used to calculate bins (non-angular frequency)
# We only grab the first half of the bins (Fs/2)
# because we don't need the negative frequencies
fftBins = np.arange(0,Fs/2,Fs/N)
#Grab X Vals (aka real bins)
xvals = fftBins[0:int(N/2)]
#function used for calculating FFT
def FFTPitchShift():
#signals - cos
y1 = np.cos(2.0*np.pi*x1*oscFrequency)
# For merging all frames at the end
outSignalsMerged = np.zeros(frames*hopSize+N,dtype=complex)
#For holding input and output phases between frames
lastInputPhase = np.zeros(int(N/2))
lastOutputPhase = np.zeros(int(N/2))
for hop in range(0,frames):
#FFT of signal with window
ywf = fft(y1[hop*hopSize:hop*hopSize+N]*w1)
#grab Y values for FFTs
y_vals = np.abs(ywf[0:int(N/2)])
#grab phase values from both FFTs
currentPhase = np.arctan2(ywf[0:int(N/2)].imag, ywf[0:int(N/2)].real)
#take the difference -> phase[n] - phase[n-1]
phaseDifference = currentPhase - lastInputPhase
#save current phase for next frame
lastInputPhase = currentPhase
#calculate phase remainder by subtracting the phase shift
#we'd expect from the center frequency
aPhaseRemainder = phaseDifference - 2*np.pi*fftBins*hopSize/Fs
#re-wrap the phase to -pi to pi
#NOTE: this is not a great method for re-wrapping the phase, but it works
# It may be ok with LUT based approach for sin and cos
aPhaseRemainder = np.arctan2(np.sin(aPhaseRemainder), np.cos(aPhaseRemainder))
# Calculate fractional bin number -> fftBins*N/Fs is the bin
aFractionalBin = ((aPhaseRemainder*N)/(2*np.pi*hopSize)) + (fftBins*N/Fs)
#Calculate new bins
newBins = np.floor(pitchShiftRatio*fftBins*N/Fs + 0.5)
synthesisAmp = np.zeros(int(N/2))
synthesisFreqs = np.zeros(int(N/2))
for i in range(0,int(N/2)):
if(newBins[i] < N/2):
synthesisAmp[int(newBins[i])] += y_vals[int(i)]
synthesisFreqs[int(newBins[i])] = (aFractionalBin[int(i)] * pitchShiftRatio)
outFFT = np.zeros(N,dtype=complex)
for i in range(0,int(N/2)):
amplitude = synthesisAmp[i]
binDeviation = synthesisFreqs[i] - i
phaseDiff = binDeviation * 2.0 * np.pi * hopSize / N
phaseDiff += 2.0 * np.pi * i * hopSize / N
#Wrap phases
outPhase = np.arctan2(np.sin(phaseDiff+lastOutputPhase[i]), np.cos(phaseDiff+lastOutputPhase[i]))
lastOutputPhase[i] = outPhase
outFFT[i] = amplitude*(np.cos(outPhase) + 1j*np.sin(outPhase))
if(i>0 and i<(N/2)):
outFFT[N-i] = amplitude*(np.cos(outPhase) - 1j*np.sin(outPhase))
#Take inverse FFT
outSignal = ifft(outFFT)
#Apply Output Window
outSignal = outSignal*w1
#plot data of FFT
ax[0].plot(xvals, np.abs(ywf[0:int(N/2)]), marker="o")
ax[0].plot(xvals, np.abs(outFFT[0:int(N/2)]), marker="o")
#plot time domain signals
#resynthesised output
ax[1].plot(range(hop*hopSize,hop*hopSize+N), outSignal[0:N].real, marker="o",linewidth=2)
#original signal
ax[2].plot(range(hop*hopSize,hop*hopSize+N), y1[hop*hopSize:hop*hopSize+N], marker="o",linewidth=2)
for g in range(hop*hopSize,hop*hopSize+N):
outSignalsMerged[g] += 0.5*outSignal[g-hop*hopSize]
#At the very end, plot time domain signal
#resynthesised output
ax[1].plot(range(0,frames*hopSize+N), outSignalsMerged[0:frames*hopSize+N].real, marker="o",linewidth=2)
#init plots
fig, ax = plt.subplots(3)
FFTPitchShift();
plt.tight_layout()
plt.show()
| 33.463235
| 109
| 0.642936
|
acfe0031fa599ce1064b559473ab9e0cbad86ef5
| 13,598
|
py
|
Python
|
api/assignment_utilities.py
|
janelia-flyem/assignment-manager
|
3b303a1da7d5db6fcab4f91a7d99beabe9710ee3
|
[
"BSD-3-Clause"
] | null | null | null |
api/assignment_utilities.py
|
janelia-flyem/assignment-manager
|
3b303a1da7d5db6fcab4f91a7d99beabe9710ee3
|
[
"BSD-3-Clause"
] | null | null | null |
api/assignment_utilities.py
|
janelia-flyem/assignment-manager
|
3b303a1da7d5db6fcab4f91a7d99beabe9710ee3
|
[
"BSD-3-Clause"
] | null | null | null |
''' assignment_utilities.py
Assignment manager utilities
'''
import datetime
import json
import random
import re
import string
import time
from urllib.parse import parse_qs
from flask import g
import requests
import holidays as pyholidays
import pandas as pd
from business_duration import businessDuration
BEARER = ''
CONFIG = {'config': {"url": "http://config.int.janelia.org/"}}
KEY_TYPE_IDS = dict()
# *****************************************************************************
# * Classes *
# *****************************************************************************
class InvalidUsage(Exception):
''' Return an error response
'''
status_code = 400
def __init__(self, message, status_code=None, payload=None):
Exception.__init__(self)
self.message = message
if status_code is not None:
self.status_code = status_code
self.payload = payload
def to_dict(self):
''' Build error response
'''
retval = dict(self.payload or ())
retval['rest'] = {'error': self.message}
return retval
# *****************************************************************************
# * Functions *
# *****************************************************************************
def add_key_value_pair(key, val, separator, sql, bind):
''' Add a key/value pair to the WHERE clause of a SQL statement
Keyword arguments:
key: column
value: value
separator: logical separator (AND, OR)
sql: SQL statement
bind: bind tuple
'''
eprefix = ''
if not isinstance(key, str):
key = key.decode('utf-8')
if re.search(r'[!><]$', key):
match = re.search(r'[!><]$', key)
eprefix = match.group(0)
key = re.sub(r'[!><]$', '', key)
if not isinstance(val[0], str):
val[0] = val[0].decode('utf-8')
if '*' in val[0]:
val[0] = val[0].replace('*', '%')
if eprefix == '!':
eprefix = ' NOT'
else:
eprefix = ''
sql += separator + ' ' + key + eprefix + ' LIKE %s'
else:
sql += separator + ' ' + key + eprefix + '=%s'
bind = bind + (val,)
return sql, bind
def call_responder(server, endpoint, payload=''):
''' Call a responder
Keyword arguments:
server: server
endpoint: REST endpoint
payload: payload for POST requests
'''
if server not in CONFIG:
raise Exception("Configuration key %s is not defined" % (server))
url = CONFIG[server]['url'] + endpoint
try:
if payload:
headers = {"Content-Type": "application/json",
"Authorization": "Bearer " + BEARER}
req = requests.post(url, headers=headers, json=payload)
else:
req = requests.get(url)
except requests.exceptions.RequestException as err:
print(err)
raise err
if req.status_code == 200:
return req.json()
print("Could not get response from %s: %s" % (url, req.text))
#raise InvalidUsage("Could not get response from %s: %s" % (url, req.text))
raise InvalidUsage(req.text, req.status_code)
def check_permission(user, permission=None):
''' Validate that a user has a specified permission
Keyword arguments:
user: user name
permission: single permission or list of permissions
'''
if not permission:
stmt = "SELECT * FROM user_permission_vw WHERE name=%s"
try:
g.c.execute(stmt, (user))
rows = g.c.fetchall()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
perm = [row['permission'] for row in rows]
return perm
if type(permission).__name__ == 'str':
permission = [permission]
stmt = "SELECT * FROM user_permission_vw WHERE name=%s AND permission=%s"
for per in permission:
bind = (user, per)
try:
g.c.execute(stmt, bind)
row = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
if row:
return 1
return 0
def check_project(project, ipd):
''' Check to ensure that a project exists and is active.
Keyword arguments:
project: project instance
ipd: request payload
'''
if not project:
raise InvalidUsage("Project %s does not exist" % ipd['project_name'], 404)
if not project['active']:
raise InvalidUsage("Project %s is not active" % project['name'])
def generate_sql(request, result, sql, query=False):
''' Generate a SQL statement and tuple of associated bind variables.
Keyword arguments:
request: API request
result: result dictionary
sql: base SQL statement
query: uses "id" column if true
'''
bind = ()
# pylint: disable=W0603
idcolumn = 0
query_string = 'id='+str(query) if query else request.query_string
order = ''
if query_string:
if not isinstance(query_string, str):
query_string = query_string.decode('utf-8')
ipd = parse_qs(query_string)
separator = ' AND' if ' WHERE ' in sql else ' WHERE'
for key, val in ipd.items():
if key == '_sort':
order = ' ORDER BY ' + val[0]
elif key == '_columns':
sql = sql.replace('*', val[0])
varr = val[0].split(',')
if 'id' in varr:
idcolumn = 1
elif key == '_distinct':
if 'DISTINCT' not in sql:
sql = sql.replace('SELECT', 'SELECT DISTINCT')
else:
sql, bind = add_key_value_pair(key, val, separator, sql, bind)
separator = ' AND'
sql += order
if bind:
result['rest']['sql_statement'] = sql % bind
else:
result['rest']['sql_statement'] = sql
return sql, bind, idcolumn
def get_assignment_by_name_or_id(aid):
''' Get an assignment by name or ID
Keyword arguments:
aid: assignment name or ID
'''
aid = str(aid)
stmt = "SELECT * FROM assignment_vw WHERE id=%s" if aid.isdigit() \
else "SELECT * FROM assignment_vw WHERE name=%s"
try:
g.c.execute(stmt, (aid))
assignment = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
return assignment
def get_key_type_id(key_type):
''' Determine the ID for a key type
Keyword arguments:
key_type: key type
'''
if key_type not in KEY_TYPE_IDS:
try:
g.c.execute("SELECT id,cv_term FROM cv_term_vw WHERE cv='key'")
cv_terms = g.c.fetchall()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
for term in cv_terms:
KEY_TYPE_IDS[term['cv_term']] = term['id']
return KEY_TYPE_IDS[key_type]
def get_project_by_name_or_id(proj):
''' Get a project by name or ID
Keyword arguments:
proj: project name or ID
'''
proj = str(proj)
stmt = "SELECT * FROM project_vw WHERE id=%s" if proj.isdigit() \
else "SELECT * FROM project_vw WHERE name=%s"
try:
g.c.execute(stmt, (proj))
project = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
return project
def get_tasks_by_assignment_id(aid):
''' Get tasks by assignment ID
Keyword arguments:
aid: assignment ID
'''
try:
g.c.execute("SELECT * FROM task_vw WHERE assignment_id=%s", (aid))
tasks = g.c.fetchall()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
return tasks
def get_task_by_id(tid):
''' Get a task by ID
Keyword arguments:
tid: task ID
'''
try:
g.c.execute("SELECT * FROM task_vw WHERE id=%s", (tid))
task = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
return task
def get_user_by_name(uname):
''' Given a user name, return the user record
Keyword arguments:
uname: user name
Returns:
user record
'''
try:
g.c.execute("SELECT * FROM user_vw WHERE name='%s'" % uname)
row = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
return row
def get_workday(janelia_id):
''' Given a Janelia ID, return the Workday record
Keyword arguments:
janelia_id: Janelia ID
Returns:
Workday record
'''
data = call_responder('config', 'config/workday/' + janelia_id)
if not data:
raise InvalidUsage('User %s not found in Workday' % (janelia_id))
work = data['config']
return work
def neuprint_custom_query(payload):
''' Execute a custom NeuPrint query
Keyword arguments:
payload: Cypher payload
'''
try:
response = call_responder('neuprint', 'custom/custom', {"cypher": payload})
except Exception as err:
raise err
return response
def random_string(strlen=8):
''' Generate a random string of letters and digits
Keyword arguments:
strlen: length of generated string
'''
components = string.ascii_letters + string.digits
return ''.join(random.choice(components) for i in range(strlen))
def return_tasks_json(assignment, result):
''' Given an assignment name, return tasks JSON
Keyword arduments:
assignment: assignment name
result: result dictionary
'''
# pylint: disable=W0703
result['task list'] = list()
sql = 'SELECT t.id AS task_id,type,value,key_type,key_text FROM task_vw t ' \
+ 'LEFT OUTER JOIN task_property_vw tp ON (t.id=tp.task_id) WHERE ' \
+ 't.assignment=%s'
try:
g.c.execute(sql, (assignment,))
taskprops = g.c.fetchall()
except Exception as err:
return sql_error(err)
this_task = ''
task = {}
task_count = 0
for tps in taskprops:
if this_task != tps['task_id']:
if this_task:
result['task list'].append(task)
this_task = tps['task_id']
task = {"assignment_manager_task_id": this_task,
tps['key_type']: tps['key_text']}
task_count += 1
if tps['type']:
if tps['type'] in ['body ID A', 'body ID B', 'supervoxel ID 1', 'supervoxel ID 2']:
task[tps['type']] = int(tps['value'])
elif tps['type'] in ['supervoxel point 1', 'supervoxel point 2',
'body point 1', 'body point 2']:
task[tps['type']] = json.loads(tps['value'])
else:
task[tps['type']] = tps['value']
if this_task:
result['task list'].append(task)
result['rest']['row_count'] = task_count
return None
def sql_error(err):
''' Given a MySQL error, return the error message
Keyword arguments:
err: MySQL error
'''
error_msg = ''
try:
error_msg = "MySQL error [%d]: %s" % (err.args[0], err.args[1])
except IndexError:
error_msg = "Error: %s" % err
if error_msg:
print(error_msg)
return error_msg
def update_property(pid, table, name, value):
''' Insert/update a property
Keyword arguments:
id: parent ID
result: result dictionary
table: parent table
name: CV term
value: value
'''
stmt = "INSERT INTO %s_property (%s_id,type_id,value) VALUES " \
+ "(!s,getCvTermId(!s,!s,NULL),!s) ON DUPLICATE KEY UPDATE value=!s"
stmt = stmt % (table, table)
stmt = stmt.replace('!s', '%s')
bind = (pid, table, name, value, value)
try:
g.c.execute(stmt, bind)
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
def validate_user(user):
''' Validate a user
Keyword arguments:
user: user name or Janelia ID
'''
stmt = "SELECT * FROM user_vw WHERE name=%s OR janelia_id=%s"
try:
g.c.execute(stmt, (user, user))
usr = g.c.fetchone()
except Exception as err:
raise InvalidUsage(sql_error(err), 500)
if not usr:
raise InvalidUsage("User %s does not exist" % (user), 400)
return usr['name'], usr['janelia_id']
def working_duration(start_unix, end_unix):
''' Determine working duration (working hours only)
Keyword arguments:
start_unix: start time (epoch seconds)
end_unix: end time (epoch seconds)
'''
open_time = datetime.time(6, 0, 0)
close_time = datetime.time(18, 0, 0)
holidaylist = pyholidays.US()
startstring = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(start_unix))
endstring = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(end_unix))
startdate = pd.to_datetime(startstring)
enddate = pd.to_datetime(endstring)
work_duration = businessDuration(startdate, enddate, open_time, close_time,
holidaylist=holidaylist, unit='hour') * 3600
try:
work_duration = int(work_duration)
except ValueError as err:
print(str(err) + ' for ' + startstring + ', ' + endstring)
work_duration = end_unix - start_unix
return work_duration
| 31.845433
| 95
| 0.564348
|
acfe00449b6241b0865e5dddbdeabb904f3ec947
| 776
|
py
|
Python
|
Examples/put_worksheet_custom_filter.py
|
aspose-cells-cloud/aspose-cells-cloud-python
|
0189236d38053dc67f7edc754b5101f17262cee8
|
[
"MIT"
] | 3
|
2018-05-23T03:16:26.000Z
|
2020-11-07T11:42:41.000Z
|
Examples/put_worksheet_custom_filter.py
|
aspose-cells-cloud/aspose-cells-cloud-python
|
0189236d38053dc67f7edc754b5101f17262cee8
|
[
"MIT"
] | null | null | null |
Examples/put_worksheet_custom_filter.py
|
aspose-cells-cloud/aspose-cells-cloud-python
|
0189236d38053dc67f7edc754b5101f17262cee8
|
[
"MIT"
] | 4
|
2018-08-29T18:45:05.000Z
|
2021-03-25T07:59:56.000Z
|
"""
Test case for cells_auto_filter_put_worksheet_custom_filter
Filters a list with a custom criteria.
"""
name ='Book1.xlsx'
sheet_name ='Sheet1'
range ='A1:C10'
fieldIndex = 0
operatorType1 = "LessOrEqual"
criteria1 = "test"
isAnd = True
operatorType2 = "LessOrEqual"
criteria2 = "test"
matchBlanks = True
refresh = True
folder = "Temp"
AuthUtil.Ready(name, folder)
result = self.api.cells_auto_filter_put_worksheet_custom_filter(name, sheet_name,range ,fieldIndex, operatorType1 , criteria1,is_and=isAnd, operator_type2=operatorType2 , criteria2=criteria2,match_blanks=matchBlanks, refresh=refresh, folder=folder)
| 40.842105
| 256
| 0.635309
|
acfe0075fe46598be309515cbf2a85c8a207bc34
| 13,172
|
py
|
Python
|
originstamp_client/rest.py
|
OriginStampTimestamping/originstamp-python-client
|
a13c3d51eac6dd3a920b7b74e079531fe7ab17a2
|
[
"MIT"
] | 9
|
2018-11-06T06:43:46.000Z
|
2020-09-26T03:29:41.000Z
|
originstamp_client/rest.py
|
OriginStampTimestamping/originstamp-python-client
|
a13c3d51eac6dd3a920b7b74e079531fe7ab17a2
|
[
"MIT"
] | 1
|
2019-05-06T10:49:23.000Z
|
2019-05-13T09:30:01.000Z
|
originstamp_client/rest.py
|
OriginStampTimestamping/originstamp-python-client
|
a13c3d51eac6dd3a920b7b74e079531fe7ab17a2
|
[
"MIT"
] | 1
|
2020-10-02T17:31:47.000Z
|
2020-10-02T17:31:47.000Z
|
# coding: utf-8
"""
OriginStamp Client
OpenAPI spec version: 3.0
OriginStamp Documentation: https://docs.originstamp.com
Contact: mail@originstamp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import io
import json
import logging
import re
import ssl
import certifi
# python 2 and python 3 compatibility library
import six
from six.moves.urllib.parse import urlencode
try:
import urllib3
except ImportError:
raise ImportError('Swagger python client requires urllib3.')
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, ) if six.PY3 else (int, long)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = '{}'
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# In the python 3, the response.data is bytes.
# we need to decode it to string.
if six.PY3:
r.data = r.data.decode('utf8')
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
class ApiException(Exception):
def __init__(self, status=None, reason=None, http_resp=None):
if http_resp:
self.status = http_resp.status
self.reason = http_resp.reason
self.body = http_resp.data
self.headers = http_resp.getheaders()
else:
self.status = status
self.reason = reason
self.body = None
self.headers = None
def __str__(self):
"""Custom error messages for exception"""
error_message = "({0})\n"\
"Reason: {1}\n".format(self.status, self.reason)
if self.headers:
error_message += "HTTP response headers: {0}\n".format(
self.headers)
if self.body:
error_message += "HTTP response body: {0}\n".format(self.body)
return error_message
| 40.780186
| 134
| 0.541527
|
acfe00bf5e0cd91bb4dfdd85d44188f173336958
| 465
|
py
|
Python
|
fabfile/text.py
|
nprapps/play-quiz
|
a3a09d473fc7a420bb6f4e0931dc3b5c54712d08
|
[
"FSFAP"
] | 1
|
2015-10-14T13:00:08.000Z
|
2015-10-14T13:00:08.000Z
|
fabfile/text.py
|
nprapps/play-quiz
|
a3a09d473fc7a420bb6f4e0931dc3b5c54712d08
|
[
"FSFAP"
] | null | null | null |
fabfile/text.py
|
nprapps/play-quiz
|
a3a09d473fc7a420bb6f4e0931dc3b5c54712d08
|
[
"FSFAP"
] | 1
|
2021-02-18T11:31:47.000Z
|
2021-02-18T11:31:47.000Z
|
#!/usr/bin/env python
"""
Commands related to syncing copytext from Google Docs.
"""
from fabric.api import task
import app_config
from etc.gdocs import GoogleDoc
@task(default=True)
def update():
"""
Downloads a Google Doc as an Excel file.
"""
doc = {}
url = app_config.COPY_GOOGLE_DOC_URL
bits = url.split('key=')
bits = bits[1].split('&')
doc['key'] = bits[0]
g = GoogleDoc(**doc)
g.get_auth()
g.get_document()
| 17.222222
| 54
| 0.630108
|
acfe0128d085819ac084de31cd0d2aff0de03f0a
| 1,437
|
py
|
Python
|
utils/extra/htmhtm.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 864
|
2020-09-22T18:52:27.000Z
|
2022-03-28T19:57:25.000Z
|
utils/extra/htmhtm.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 13
|
2020-09-24T10:42:21.000Z
|
2021-12-20T14:44:36.000Z
|
utils/extra/htmhtm.py
|
luis-guilherme/mitra
|
18bd935b11dc8fcf594255a96809c05abc324e87
|
[
"MIT"
] | 55
|
2020-09-22T19:01:19.000Z
|
2022-03-20T09:15:45.000Z
|
# an ambiguous HTML generator
# (will not work as polyglot without encryption)
# Ange Albertini 2021
# To avoid garbage characters in the 1st payload
# (due from encryption of the first '<!--' characters)
# break out of content in the html page via a script like:
# <div id='mypage'>
# [your code here]
# </div>
# <script language=javascript type="text/javascript">
# document.documentElement.innerHTML = document.getElementById('mypage').innerHTML;
# </script>
import argparse
import hashlib
parser = argparse.ArgumentParser(description="Generate binary polyglots.")
parser.add_argument('topfile', help="first 'top' HTML file.")
parser.add_argument('bottomfile', help="second 'bottom' input file.")
args = parser.parse_args()
with open(args.topfile, "rb") as f1:
data1 = f1.read()
with open(args.bottomfile, "rb") as f2:
data2 = f2.read()
# <!--[cut 1]-->
# [page1]
# <!--[cut 2]-->
# [page2]
# <!--
# [padding]
template = b"<!---->%s<!---->%s<!--" % (data1, data2)
cut1 = len("<!--")
cut2 = len("<!---->") + len(data1) + len("<!--")
template += (16 - len(template) % 16) * b"\0"
template += 16 * b"\0"
tagblock = len(template) // 16 - 1
hash_ = hashlib.sha256(template).hexdigest()[:8].lower()
# mitra tools naming convention
filename = "(%x-%x)%i.%s.htm.htm" % (cut1, cut2, tagblock, hash_)
print("Creating '%s'" % filename)
print(" %i bytes" % len(template))
with open(filename, "wb") as f:
f.write(template)
| 24.775862
| 85
| 0.653445
|
acfe013dafbe4f8ce8e0e0b16130c72e06db9f25
| 2,371
|
py
|
Python
|
vehicle/OVMS.V3/components/wolfssl/wrapper/python/wolfssl/src/wolfssl/build_ffi.py
|
qtwre/Open-Vehicle-Monitoring-System-3
|
0ebd21bdff06190c0909c29b215ab63f5792e7d6
|
[
"MIT"
] | 322
|
2017-06-12T16:56:49.000Z
|
2022-03-27T15:46:38.000Z
|
vehicle/OVMS.V3/components/wolfssl/wrapper/python/wolfssl/src/wolfssl/build_ffi.py
|
qtwre/Open-Vehicle-Monitoring-System-3
|
0ebd21bdff06190c0909c29b215ab63f5792e7d6
|
[
"MIT"
] | 426
|
2017-08-30T04:47:34.000Z
|
2022-03-25T21:01:11.000Z
|
vehicle/OVMS.V3/components/wolfssl/wrapper/python/wolfssl/src/wolfssl/build_ffi.py
|
qtwre/Open-Vehicle-Monitoring-System-3
|
0ebd21bdff06190c0909c29b215ab63f5792e7d6
|
[
"MIT"
] | 194
|
2017-07-03T23:34:08.000Z
|
2022-03-16T09:09:22.000Z
|
# -*- coding: utf-8 -*-
#
# build_ffi.py
#
# Copyright (C) 2006-2020 wolfSSL Inc.
#
# This file is part of wolfSSL.
#
# wolfSSL is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# wolfSSL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
#/
# pylint: disable=missing-docstring, invalid-name
from cffi import FFI
ffi = FFI()
ffi.set_source(
"wolfssl._ffi",
"""
#include <wolfssl/options.h>
#include <wolfssl/ssl.h>
void wolfSSL_Free(void *ptr, void* heap, int type);
""",
include_dirs=["/usr/local/include"],
library_dirs=["/usr/local/lib"],
libraries=["wolfssl"],
)
ffi.cdef(
"""
typedef unsigned char byte;
typedef unsigned int word32;
void wolfSSL_Free(void*, void*, int);
void* wolfSSLv23_server_method(void);
void* wolfSSLv23_client_method(void);
void* wolfTLSv1_2_server_method(void);
void* wolfTLSv1_2_client_method(void);
void* wolfSSL_CTX_new(void*);
void wolfSSL_CTX_free(void*);
void wolfSSL_CTX_set_verify(void*, int, void*);
int wolfSSL_CTX_set_cipher_list(void*, const char*);
int wolfSSL_CTX_use_PrivateKey_file(void*, const char*, int);
int wolfSSL_CTX_load_verify_locations(void*, const char*, const char*);
int wolfSSL_CTX_load_verify_buffer(void*, const unsigned char*, long, int);
int wolfSSL_CTX_use_certificate_chain_file(void*, const char *);
int wolfSSL_CTX_UseSupportedCurve(void*, short);
void* wolfSSL_new(void*);
void wolfSSL_free(void*);
int wolfSSL_set_fd(void*, int);
int wolfSSL_get_error(void*, int);
int wolfSSL_negotiate(void*);
int wolfSSL_write(void*, const void*, int);
int wolfSSL_read(void*, void*, int);
int wolfSSL_shutdown(void*);
"""
)
if __name__ == "__main__":
ffi.compile(verbose=1)
| 29.271605
| 80
| 0.704766
|
acfe0149119dc15a4e0b0e5a3f9e704f4f9c3c92
| 7,872
|
py
|
Python
|
discor_algo/discor/algorithm/sac.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 27
|
2020-06-09T06:33:14.000Z
|
2022-03-27T05:36:27.000Z
|
discor_algo/discor/algorithm/sac.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 6
|
2021-02-02T23:00:02.000Z
|
2022-01-13T03:13:51.000Z
|
discor_algo/discor/algorithm/sac.py
|
fgitmichael/SelfSupevisedSkillDiscovery
|
60eee11cfd67046190dd2784bf40e97bdbed9d40
|
[
"MIT"
] | 3
|
2020-06-15T15:17:36.000Z
|
2021-03-25T11:52:07.000Z
|
import os
import torch
from torch.optim import Adam
from .base import Algorithm
from discor.network import TwinnedStateActionFunction, GaussianPolicy
from discor.utils import disable_gradients, soft_update, update_params, \
assert_action
class SAC(Algorithm):
def __init__(self, state_dim, action_dim, device, gamma=0.99, nstep=1,
policy_lr=0.0003, q_lr=0.0003, entropy_lr=0.0003,
policy_hidden_units=[256, 256], q_hidden_units=[256, 256],
target_update_coef=0.005, log_interval=10, seed=0):
super().__init__(
state_dim, action_dim, device, gamma, nstep, log_interval, seed)
# Build networks.
self._policy_net = GaussianPolicy(
state_dim=self._state_dim,
action_dim=self._action_dim,
hidden_units=policy_hidden_units
).to(self._device)
self._online_q_net = TwinnedStateActionFunction(
state_dim=self._state_dim,
action_dim=self._action_dim,
hidden_units=q_hidden_units
).to(self._device)
self._target_q_net = TwinnedStateActionFunction(
state_dim=self._state_dim,
action_dim=self._action_dim,
hidden_units=q_hidden_units
).to(self._device).eval()
# Copy parameters of the learning network to the target network.
self._target_q_net.load_state_dict(self._online_q_net.state_dict())
# Disable gradient calculations of the target network.
disable_gradients(self._target_q_net)
# Optimizers.
self._policy_optim = Adam(self._policy_net.parameters(), lr=policy_lr)
self._q_optim = Adam(self._online_q_net.parameters(), lr=q_lr)
# Target entropy is -|A|.
self._target_entropy = -float(self._action_dim)
# We optimize log(alpha), instead of alpha.
self._log_alpha = torch.zeros(
1, device=self._device, requires_grad=True)
self._alpha = self._log_alpha.detach().exp()
self._alpha_optim = Adam([self._log_alpha], lr=entropy_lr)
self._target_update_coef = target_update_coef
def explore(self, state):
state = torch.tensor(
state[None, ...].copy(), dtype=torch.float, device=self._device)
with torch.no_grad():
action, _, _ = self._policy_net(state)
action = action.cpu().numpy()[0]
assert_action(action)
return action
def exploit(self, state):
state = torch.tensor(
state[None, ...].copy(), dtype=torch.float, device=self._device)
with torch.no_grad():
_, _, action = self._policy_net(state)
action = action.cpu().numpy()[0]
assert_action(action)
return action
def update_target_networks(self):
soft_update(
self._target_q_net, self._online_q_net, self._target_update_coef)
def update_online_networks(self, batch, writer):
self._learning_steps += 1
self.update_policy_and_entropy(batch, writer)
self.update_q_functions(batch, writer)
def update_policy_and_entropy(self, batch, writer):
states, actions, rewards, next_states, dones = batch
# Update policy.
policy_loss, entropies = self.calc_policy_loss(states)
update_params(self._policy_optim, policy_loss)
# Update the entropy coefficient.
entropy_loss = self.calc_entropy_loss(entropies)
update_params(self._alpha_optim, entropy_loss)
self._alpha = self._log_alpha.detach().exp()
if self._learning_steps % self._log_interval == 0:
writer.add_scalar(
'loss/policy', policy_loss.detach().item(),
self._learning_steps)
writer.add_scalar(
'loss/entropy', entropy_loss.detach().item(),
self._learning_steps)
writer.add_scalar(
'stats/alpha', self._alpha.item(),
self._learning_steps)
writer.add_scalar(
'stats/entropy', entropies.detach().mean().item(),
self._learning_steps)
def calc_policy_loss(self, states):
# Resample actions to calculate expectations of Q.
sampled_actions, entropies, _ = self._policy_net(states)
# Expectations of Q with clipped double Q technique.
qs1, qs2 = self._online_q_net(states, sampled_actions)
qs = torch.min(qs1, qs2)
# Policy objective is maximization of (Q + alpha * entropy).
assert qs.shape == entropies.shape
policy_loss = torch.mean((- qs - self._alpha * entropies))
return policy_loss, entropies.detach_()
def calc_entropy_loss(self, entropies):
assert not entropies.requires_grad
# Intuitively, we increse alpha when entropy is less than target
# entropy, vice versa.
entropy_loss = -torch.mean(
self._log_alpha * (self._target_entropy - entropies))
return entropy_loss
def update_q_functions(self, batch, writer, imp_ws1=None, imp_ws2=None):
states, actions, rewards, next_states, dones = batch
# Calculate current and target Q values.
curr_qs1, curr_qs2 = self.calc_current_qs(states, actions)
target_qs = self.calc_target_qs(rewards, next_states, dones)
# Update Q functions.
q_loss, mean_q1, mean_q2 = \
self.calc_q_loss(curr_qs1, curr_qs2, target_qs, imp_ws1, imp_ws2)
update_params(self._q_optim, q_loss)
if self._learning_steps % self._log_interval == 0:
writer.add_scalar(
'loss/Q', q_loss.detach().item(),
self._learning_steps)
writer.add_scalar(
'stats/mean_Q1', mean_q1, self._learning_steps)
writer.add_scalar(
'stats/mean_Q2', mean_q2, self._learning_steps)
# Return there values for DisCor algorithm.
return curr_qs1.detach(), curr_qs2.detach(), target_qs
def calc_current_qs(self, states, actions):
curr_qs1, curr_qs2 = self._online_q_net(states, actions)
return curr_qs1, curr_qs2
def calc_target_qs(self, rewards, next_states, dones):
with torch.no_grad():
next_actions, next_entropies, _ = self._policy_net(next_states)
next_qs1, next_qs2 = self._target_q_net(next_states, next_actions)
next_qs = \
torch.min(next_qs1, next_qs2) + self._alpha * next_entropies
assert rewards.shape == next_qs.shape
target_qs = rewards + (1.0 - dones) * self._discount * next_qs
return target_qs
def calc_q_loss(self, curr_qs1, curr_qs2, target_qs, imp_ws1=None,
imp_ws2=None):
assert imp_ws1 is None or imp_ws1.shape == curr_qs1.shape
assert imp_ws2 is None or imp_ws2.shape == curr_qs2.shape
assert not target_qs.requires_grad
assert curr_qs1.shape == target_qs.shape
# Q loss is mean squared TD errors with importance weights.
if imp_ws1 is None:
q1_loss = torch.mean((curr_qs1 - target_qs).pow(2))
q2_loss = torch.mean((curr_qs2 - target_qs).pow(2))
else:
q1_loss = torch.sum((curr_qs1 - target_qs).pow(2) * imp_ws1)
q2_loss = torch.sum((curr_qs2 - target_qs).pow(2) * imp_ws2)
# Mean Q values for logging.
mean_q1 = curr_qs1.detach().mean().item()
mean_q2 = curr_qs2.detach().mean().item()
return q1_loss + q2_loss, mean_q1, mean_q2
def save_models(self, save_dir):
super().save_models(save_dir)
self._policy_net.save(os.path.join(save_dir, 'policy_net.pth'))
self._online_q_net.save(os.path.join(save_dir, 'online_q_net.pth'))
self._target_q_net.save(os.path.join(save_dir, 'target_q_net.pth'))
| 39.164179
| 78
| 0.639355
|
acfe0164bb952545d9ea85adcad69f14c3bff58a
| 41,641
|
py
|
Python
|
jenkins_jobs/modules/parameters.py
|
wsoula/jenkins-job-builder
|
2bff652b03bfcf0ab272e5cd0b5092472c201f7e
|
[
"Apache-2.0"
] | null | null | null |
jenkins_jobs/modules/parameters.py
|
wsoula/jenkins-job-builder
|
2bff652b03bfcf0ab272e5cd0b5092472c201f7e
|
[
"Apache-2.0"
] | null | null | null |
jenkins_jobs/modules/parameters.py
|
wsoula/jenkins-job-builder
|
2bff652b03bfcf0ab272e5cd0b5092472c201f7e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Parameters module allows you to specify build parameters for a job.
**Component**: parameters
:Macro: parameter
:Entry Point: jenkins_jobs.parameters
Example::
job:
name: test_job
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
from jenkins_jobs.errors import MissingAttributeError
from jenkins_jobs.errors import InvalidAttributeError
import jenkins_jobs.modules.base
import jenkins_jobs.modules.helpers as helpers
def base_param(registry, xml_parent, data, do_default, ptype):
pdef = XML.SubElement(xml_parent, ptype)
XML.SubElement(pdef, 'name').text = data['name']
XML.SubElement(pdef, 'description').text = data.get('description', '')
if do_default:
default = data.get('default', None)
if default is not None:
XML.SubElement(pdef, 'defaultValue').text = str(default)
else:
XML.SubElement(pdef, 'defaultValue')
return pdef
def string_param(registry, xml_parent, data):
"""yaml: string
A string parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.StringParameterDefinition')
def promoted_param(registry, xml_parent, data):
"""yaml: promoted build
A promoted build parameter.
Requires the Jenkins :jenkins-wiki:`Promoted Builds Plugin
<Promoted+Builds+Plugin>`.
:arg str name: the name of the parameter (required)
:arg str project-name: the job from which the user can pick runs (required)
:arg str promotion-name: promotion process to choose from (optional)
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/promoted-build-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.plugins.promoted__builds.parameters.'
'PromotedBuildParameterDefinition')
try:
XML.SubElement(pdef, 'projectName').text = data['project-name']
except KeyError:
raise MissingAttributeError('project-name')
XML.SubElement(pdef, 'promotionProcessName').text = data.get(
'promotion-name', None)
def password_param(registry, xml_parent, data):
"""yaml: password
A password parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- password:
name: FOO
default: 1HSC0Ts6E161FysGf+e1xasgsHkgleLh09JUTYnipPvw=
description: "A parameter named FOO."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.PasswordParameterDefinition')
def bool_param(registry, xml_parent, data):
"""yaml: bool
A boolean parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- bool:
name: FOO
default: false
description: "A parameter named FOO, defaults to 'false'."
"""
data['default'] = str(data.get('default', False)).lower()
base_param(registry, xml_parent, data, True,
'hudson.model.BooleanParameterDefinition')
def file_param(registry, xml_parent, data):
"""yaml: file
A file parameter.
:arg str name: the target location for the file upload
:arg str description: a description of the parameter (optional)
Example::
parameters:
- file:
name: test.txt
description: "Upload test.txt."
"""
base_param(registry, xml_parent, data, False,
'hudson.model.FileParameterDefinition')
def text_param(registry, xml_parent, data):
"""yaml: text
A text parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example::
parameters:
- text:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
"""
base_param(registry, xml_parent, data, True,
'hudson.model.TextParameterDefinition')
def label_param(registry, xml_parent, data):
"""yaml: label
A node label parameter.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg bool all-nodes: to run job on all nodes matching label
in parallel (default: false)
:arg str matching-label: to run all nodes matching label
'success', 'unstable' or 'allCases' (optional)
:arg str node-eligibility: all nodes, ignore temporary nodes or
ignore temporary offline nodes (optional, default all nodes)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/node-label001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, True,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'LabelParameterDefinition')
valid_types = ['allCases', 'success', 'unstable']
mapping = [
('all-nodes', 'allNodesMatchingLabel', False),
('matching-label', 'triggerIfResult', 'allCases', valid_types),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
eligibility_label = data.get('node-eligibility', 'all').lower()
eligibility_label_dict = {
'all': 'org.jvnet.jenkins.plugins.'
'nodelabelparameter.node.'
'AllNodeEligibility',
'ignore-offline': 'org.jvnet.jenkins.plugins.'
'nodelabelparameter.node.'
'IgnoreOfflineNodeEligibility',
'ignore-temp-offline': 'org.jvnet.jenkins.plugins.'
'nodelabelparameter.node.'
'IgnoreTempOfflineNodeEligibility',
}
if eligibility_label not in eligibility_label_dict:
raise InvalidAttributeError(eligibility_label, eligibility_label,
eligibility_label_dict.keys())
XML.SubElement(pdef, 'nodeEligibility').set(
"class", eligibility_label_dict[eligibility_label])
def node_param(registry, xml_parent, data):
"""yaml: node
Defines a list of nodes where this job could potentially be executed on.
Restrict where this project can be run, If your using a node or label
parameter to run your job on a particular node, you should not use the
option "Restrict where this project can be run" in the job configuration
- it will not have any effect to the selection of your node anymore!
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg list default-slaves: The nodes used when job gets triggered
by anything else other than manually
:arg list allowed-slaves: The nodes available for selection
when job gets triggered manually. Empty means 'All'.
:arg bool ignore-offline-nodes: Ignore nodes not online or not having
executors (default false)
:arg bool allowed-multiselect: Allow multi node selection for concurrent
builds - this option only makes sense (and must be selected!) in
case the job is configured with: "Execute concurrent builds if
necessary". With this configuration the build will be executed on all
the selected nodes in parallel. (default false)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/node-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'org.jvnet.jenkins.plugins.nodelabelparameter.'
'NodeParameterDefinition')
default = XML.SubElement(pdef, 'defaultSlaves')
if 'default-slaves' in data:
for slave in data['default-slaves']:
XML.SubElement(default, 'string').text = slave
allowed = XML.SubElement(pdef, 'allowedSlaves')
if 'allowed-slaves' in data:
for slave in data['allowed-slaves']:
XML.SubElement(allowed, 'string').text = slave
XML.SubElement(pdef, 'ignoreOfflineNodes').text = str(
data.get('ignore-offline-nodes', False)).lower()
if data.get('allowed-multiselect', False):
XML.SubElement(pdef, 'triggerIfResult').text = \
'allowMultiSelectionForConcurrentBuilds'
else:
XML.SubElement(pdef, 'triggerIfResult').text = \
'multiSelectionDisallowed'
XML.SubElement(pdef, 'allowMultiNodeSelection').text = str(
data.get('allowed-multiselect', False)).lower()
XML.SubElement(pdef, 'triggerConcurrentBuilds').text = str(
data.get('allowed-multiselect', False)).lower()
def choice_param(registry, xml_parent, data):
"""yaml: choice
A single selection parameter.
:arg str name: the name of the parameter
:arg list choices: the available choices, first one is the default one.
:arg str description: a description of the parameter (optional)
Example::
parameters:
- choice:
name: project
choices:
- nova
- glance
description: "On which project to run?"
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.model.ChoiceParameterDefinition')
choices = XML.SubElement(pdef, 'choices',
{'class': 'java.util.Arrays$ArrayList'})
a = XML.SubElement(choices, 'a', {'class': 'string-array'})
for choice in data['choices']:
XML.SubElement(a, 'string').text = choice
def credentials_param(registry, xml_parent, data):
"""yaml: credentials
A credentials selection parameter. Requires the Jenkins
:jenkins-wiki:`Credentials Plugin
<Credentials+Plugin>`.
:arg str name: the name of the parameter
:arg str type: credential type (optional, default 'any')
:Allowed Values: * **any** Any credential type (default)
* **usernamepassword** Username with password
* **sshkey** SSH Username with private key
* **secretfile** Secret file
* **secrettext** Secret text
* **certificate** Certificate
:arg bool required: whether this parameter is required (optional, default
false)
:arg string default: default credentials ID (optional)
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/credentials-param001.yaml
:language: yaml
"""
cred_impl_types = {
'any': 'com.cloudbees.plugins.credentials.common.StandardCredentials',
'usernamepassword': 'com.cloudbees.plugins.credentials.impl.' +
'UsernamePasswordCredentialsImpl',
'sshkey': 'com.cloudbees.jenkins.plugins.sshcredentials.impl.' +
'BasicSSHUserPrivateKey',
'secretfile': 'org.jenkinsci.plugins.plaincredentials.impl.' +
'FileCredentialsImpl',
'secrettext': 'org.jenkinsci.plugins.plaincredentials.impl.' +
'StringCredentialsImpl',
'certificate': 'com.cloudbees.plugins.credentials.impl.' +
'CertificateCredentialsImpl'
}
cred_type = data.get('type', 'any').lower()
if cred_type not in cred_impl_types:
raise InvalidAttributeError('type', cred_type, cred_impl_types.keys())
pdef = base_param(registry, xml_parent, data, False,
'com.cloudbees.plugins.credentials.' +
'CredentialsParameterDefinition')
XML.SubElement(pdef, 'defaultValue').text = data.get('default', '')
XML.SubElement(pdef, 'credentialType').text = cred_impl_types[cred_type]
XML.SubElement(pdef, 'required').text = str(data.get('required',
False)).lower()
def run_param(registry, xml_parent, data):
"""yaml: run
A run parameter.
:arg str name: the name of the parameter
:arg str project-name: the name of job from which the user can pick runs
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude:: /../../tests/parameters/fixtures/run-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'hudson.model.RunParameterDefinition')
mapping = [
('project-name', 'projectName', None),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def extended_choice_param(registry, xml_parent, data):
"""yaml: extended-choice
Creates an extended choice parameter where values can be read from a file
Requires the Jenkins :jenkins-wiki:`Extended Choice Parameter Plugin
<Extended+Choice+Parameter+plugin>`.
:arg str name: name of the parameter
:arg str description: description of the parameter
(optional, default '')
:arg str property-file: location of property file to read from
(optional, default '')
:arg str property-key: key for the property-file (optional, default '')
:arg bool quote-value: whether to put quotes around the property
when passing to Jenkins (optional, default false)
:arg str visible-items: number of items to show in the list
(optional, default 5)
:arg str type: type of select, can be single-select, multi-select,
radio, checkbox or textbox (optional, default single-select)
:arg str value: comma separated list of values for the single select
or multi-select box (optional, default '')
:arg str default-value: used to set the initial selection of the
single-select or multi-select box (optional, default '')
:arg str value-description: comma separated list of value descriptions
for the single select or multi-select box (optional, default '')
:arg str default-property-file: location of property file when default
value needs to come from a property file (optional, default '')
:arg str default-property-key: key for the default property file
(optional, default '')
:arg str description-property-file: location of property file when value
description needs to come from a property file (optional, default '')
:arg str description-property-key: key for the value description
property file (optional, default '')
:arg str multi-select-delimiter: value between selections when the
parameter is a multi-select (optional, default ',')
:arg str groovy-script: the groovy script contents (optional, default ',')
:arg str groovy-script-file: location of groovy script file to generate
parameters (optional, default '')
:arg str bindings: variable bindings for the groovy script
(optional, default '')
:arg str classpath: the classpath for the groovy script
(optional, default ',')
:arg str default-groovy-script: the default groovy
script contents (optional, default '')
:arg str default-groovy-classpath: the default classpath for the
groovy script (optional, default '')
:arg str description-groovy-script: location of groovy script when value
description needs to come from a groovy script (optional, default '')
:arg str description-groovy-classpath: classpath for the value description
groovy script (optional, default '')
Minimal Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/extended-choice-param-minimal.yaml
:language: yaml
Full Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/extended-choice-param-full.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'com.cwctravel.hudson.plugins.'
'extended__choice__parameter.'
'ExtendedChoiceParameterDefinition')
choicedict = {'single-select': 'PT_SINGLE_SELECT',
'multi-select': 'PT_MULTI_SELECT',
'radio': 'PT_RADIO',
'checkbox': 'PT_CHECKBOX',
'textbox': 'PT_TEXTBOX',
'PT_SINGLE_SELECT': 'PT_SINGLE_SELECT',
'PT_MULTI_SELECT': 'PT_MULTI_SELECT',
'PT_RADIO': 'PT_RADIO',
'PT_CHECKBOX': 'PT_CHECKBOX',
'PT_TEXTBOX': 'PT_TEXTBOX'}
mapping = [
('value', 'value', ''),
('visible-items', 'visibleItemCount', 5),
('multi-select-delimiter', 'multiSelectDelimiter', ','),
('quote-value', 'quoteValue', False),
('default-value', 'defaultValue', ''),
('value-description', 'descriptionPropertyValue', ''),
('type', 'type', 'single-select', choicedict),
('property-file', 'propertyFile', ''),
('property-key', 'propertyKey', ''),
('default-property-file', 'defaultPropertyFile', ''),
('default-property-key', 'defaultPropertyKey', ''),
('description-property-file', 'descriptionPropertyFile', ''),
('description-property-key', 'descriptionPropertyKey', ''),
('bindings', 'bindings', ''),
('groovy-script', 'groovyScript', ''),
('groovy-script-file', 'groovyScriptFile', ''),
('classpath', 'groovyClasspath', ''),
('default-groovy-script', 'defaultGroovyScript', ''),
('default-groovy-classpath', 'defaultGroovyClasspath', ''),
('description-groovy-script', 'descriptionGroovyScript', ''),
('description-groovy-classpath', 'descriptionGroovyClasspath', ''),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def validating_string_param(registry, xml_parent, data):
"""yaml: validating-string
A validating string parameter
Requires the Jenkins :jenkins-wiki:`Validating String Plugin
<Validating+String+Parameter+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
:arg str regex: a regular expression to validate the string
:arg str msg: a message to display upon failed validation
Example::
parameters:
- validating-string:
name: FOO
default: bar
description: "A parameter named FOO, defaults to 'bar'."
regex: [A-Za-z]*
msg: Your entered value failed validation
"""
pdef = base_param(registry, xml_parent, data, True,
'hudson.plugins.validating__string__parameter.'
'ValidatingStringParameterDefinition')
mapping = [
('regex', 'regex', None),
('msg', 'failedValidationMessage', None),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def svn_tags_param(registry, xml_parent, data):
"""yaml: svn-tags
A svn tag parameter
Requires the Jenkins :jenkins-wiki:`Parameterized Trigger Plugin
<Parameterized+Trigger+Plugin>`.
:arg str name: the name of the parameter
:arg str url: the url to list tags from
:arg str credentials-id: Credentials ID to use for authentication
(default '')
:arg str filter: the regular expression to filter tags (default '')
:arg str default: the default value of the parameter (default '')
:arg str description: a description of the parameter (default '')
:arg int max-tags: the number of tags to display (default '100')
:arg bool sort-newest-first: sort tags from newest to oldest (default true)
:arg bool sort-z-to-a: sort tags in reverse alphabetical order
(default false)
Example::
parameters:
- svn-tags:
name: BRANCH_NAME
default: release
description: A parameter named BRANCH_NAME default is release
url: http://svn.example.com/repo
filter: [A-za-z0-9]*
"""
pdef = base_param(registry, xml_parent, data, True,
'hudson.scm.listtagsparameter.'
'ListSubversionTagsParameterDefinition')
mapping = [
('url', 'tagsDir', None),
('credentials-id', 'credentialsId', ''),
('filter', 'tagsFilter', ''),
('max-tags', 'maxTags', '100'),
('sort-newest-first', 'reverseByDate', True),
('sort-z-to-a', 'reverseByName', False),
('', 'uuid', "1-1-1-1-1"),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def dynamic_choice_param(registry, xml_parent, data):
"""yaml: dynamic-choice
Dynamic Choice Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices.
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice:
name: OPTIONS
description: "Available options"
script: "['optionA', 'optionB']"
remote: false
read-only: false
"""
dynamic_param_common(registry, xml_parent, data,
'ChoiceParameterDefinition')
def dynamic_string_param(registry, xml_parent, data):
"""yaml: dynamic-string
Dynamic Parameter
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script: Groovy expression which generates the potential choices
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg str classpath: class path for script (optional)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script: "bar"
remote: false
read-only: false
"""
dynamic_param_common(registry, xml_parent, data,
'StringParameterDefinition')
def dynamic_choice_scriptler_param(registry, xml_parent, data):
"""yaml: dynamic-choice-scriptler
Dynamic Choice Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-choice-scriptler:
name: OPTIONS
description: "Available options"
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(registry, xml_parent, data,
'ScriptlerChoiceParameterDefinition')
def dynamic_string_scriptler_param(registry, xml_parent, data):
"""yaml: dynamic-string-scriptler
Dynamic Parameter (Scriptler)
Requires the Jenkins :jenkins-wiki:`Jenkins Dynamic Parameter Plug-in
<Dynamic+Parameter+Plug-in>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str script-id: Groovy script which generates the default value
:arg list parameters: parameters to corresponding script
:Parameter: * **name** (`str`) Parameter name
* **value** (`str`) Parameter value
:arg bool remote: the script will be executed on the slave where the build
is started (default false)
:arg bool read-only: user can't modify parameter once populated
(default false)
Example::
parameters:
- dynamic-string-scriptler:
name: FOO
description: "A parameter named FOO, defaults to 'bar'."
script-id: "scriptid.groovy"
parameters:
- name: param1
value: value1
- name: param2
value: value2
remote: false
read-only: false
"""
dynamic_scriptler_param_common(registry, xml_parent, data,
'ScriptlerStringParameterDefinition')
def dynamic_param_common(registry, xml_parent, data, ptype):
pdef = base_param(registry, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.' +
ptype)
XML.SubElement(pdef, '__remote').text = str(
data.get('remote', False)).lower()
XML.SubElement(pdef, '__script').text = data.get('script', None)
localBaseDir = XML.SubElement(pdef, '__localBaseDirectory',
{'serialization': 'custom'})
filePath = XML.SubElement(localBaseDir, 'hudson.FilePath')
default = XML.SubElement(filePath, 'default')
XML.SubElement(filePath, 'boolean').text = "true"
XML.SubElement(default, 'remote').text = \
"/var/lib/jenkins/dynamic_parameter/classpath"
XML.SubElement(pdef, '__remoteBaseDirectory').text = \
"dynamic_parameter_classpath"
XML.SubElement(pdef, '__classPath').text = data.get('classpath', None)
XML.SubElement(pdef, 'readonlyInputField').text = str(
data.get('read-only', False)).lower()
def dynamic_scriptler_param_common(registry, xml_parent, data, ptype):
pdef = base_param(registry, xml_parent, data, False,
'com.seitenbau.jenkins.plugins.dynamicparameter.'
'scriptler.' + ptype)
parametersXML = XML.SubElement(pdef, '__parameters')
parameters = data.get('parameters', [])
if parameters:
mapping = [
('name', 'name', None),
('value', 'value', None),
]
for parameter in parameters:
parameterXML = XML.SubElement(parametersXML,
'com.seitenbau.jenkins.plugins.'
'dynamicparameter.scriptler.'
'ScriptlerParameterDefinition_'
'-ScriptParameter')
helpers.convert_mapping_to_xml(
parameterXML, parameter, mapping, fail_required=True)
mapping = [
('script-id', '__scriptlerScriptId', None),
('remote', '__remote', False),
('read-only', 'readonlyInputField', False),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def matrix_combinations_param(registry, xml_parent, data):
"""yaml: matrix-combinations
Matrix combinations parameter
Requires the Jenkins :jenkins-wiki:`Matrix Combinations Plugin
<Matrix+Combinations+Plugin>`.
:arg str name: the name of the parameter
:arg str description: a description of the parameter (optional)
:arg str filter: Groovy expression to use filter the combination by
default (optional)
Example:
.. literalinclude:: \
/../../tests/parameters/fixtures/matrix-combinations-param001.yaml
:language: yaml
"""
element_name = 'hudson.plugins.matrix__configuration__parameter.' \
'MatrixCombinationsParameterDefinition'
pdef = XML.SubElement(xml_parent, element_name)
mapping = [
('name', 'name', None),
('description', 'description', ''),
('filter', 'defaultCombinationFilter', ''),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
return pdef
def copyartifact_build_selector_param(registry, xml_parent, data):
"""yaml: copyartifact-build-selector
Control via a build parameter, which build the copyartifact plugin should
copy when it is configured to use 'build-param'. Requires the Jenkins
:jenkins-wiki:`Copy Artifact plugin <Copy+Artifact+Plugin>`.
:arg str name: name of the build parameter to store the selection in
:arg str description: a description of the parameter (optional)
:arg str which-build: which to provide as the default value in the UI. See
``which-build`` param of :py:mod:`~builders.copyartifact` from the
builders module for the available values as well as options available
that control additional behaviour for the selected value.
Example:
.. literalinclude::
/../../tests/parameters/fixtures/copyartifact-build-selector001.yaml
:language: yaml
"""
t = XML.SubElement(xml_parent, 'hudson.plugins.copyartifact.'
'BuildSelectorParameter')
mapping = [
('name', 'name', None),
('description', 'description', ''),
]
helpers.convert_mapping_to_xml(t, data, mapping, fail_required=True)
helpers.copyartifact_build_selector(t, data, 'defaultSelector')
def maven_metadata_param(registry, xml_parent, data):
"""yaml: maven-metadata
This parameter allows the resolution of maven artifact versions
by contacting the repository and reading the maven-metadata.xml.
Requires the Jenkins :jenkins-wiki:`Maven Metadata Plugin
<Maven+Metadata+Plugin>`.
:arg str name: Name of the parameter
:arg str description: Description of the parameter (optional)
:arg str repository-base-url: URL from where you retrieve your artifacts
(default '')
:arg str repository-username: Repository's username if authentication is
required. (default '')
:arg str repository-password: Repository's password if authentication is
required. (default '')
:arg str artifact-group-id: Unique project identifier (default '')
:arg str artifact-id: Name of the artifact without version (default '')
:arg str packaging: Artifact packaging option. Could be something such as
jar, zip, pom.... (default '')
:arg str versions-filter: Specify a regular expression which will be used
to filter the versions which are actually displayed when triggering a
new build. (default '')
:arg str default-value: For features such as SVN polling a default value
is required. If job will only be started manually, this field is not
necessary. (default '')
:arg str maximum-versions-to-display: The maximum number of versions to
display in the drop-down. Any non-number value as well as 0 or negative
values will default to all. (default 10)
:arg str sorting-order: ascending or descending
(default descending)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/maven-metadata-param001.yaml
:language: yaml
"""
pdef = base_param(registry, xml_parent, data, False,
'eu.markov.jenkins.plugin.mvnmeta.'
'MavenMetadataParameterDefinition')
mapping = [
('repository-base-url', 'repoBaseUrl', ''),
('artifact-group-id', 'groupId', ''),
('artifact-id', 'artifactId', ''),
('packaging', 'packaging', ''),
('default-value', 'defaultValue', ''),
('versions-filter', 'versionFilter', ''),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
sort_order = data.get('sorting-order', 'descending').lower()
sort_dict = {'descending': 'DESC',
'ascending': 'ASC'}
if sort_order not in sort_dict:
raise InvalidAttributeError(sort_order, sort_order, sort_dict.keys())
XML.SubElement(pdef, 'sortOrder').text = sort_dict[sort_order]
mapping = [
('maximum-versions-to-display', 'maxVersions', 10),
('repository-username', 'username', ''),
('repository-password', 'password', ''),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def hidden_param(parser, xml_parent, data):
"""yaml: hidden
Allows you to use parameters hidden from the build with parameter page.
Requires the Jenkins :jenkins-wiki:`Hidden Parameter Plugin
<Hidden+Parameter+Plugin>`.
:arg str name: the name of the parameter
:arg str default: the default value of the parameter (optional)
:arg str description: a description of the parameter (optional)
Example:
.. literalinclude::
/../../tests/parameters/fixtures/hidden-param001.yaml
:language: yaml
"""
base_param(parser, xml_parent, data, True,
'com.wangyin.parameter.WHideParameterDefinition')
def random_string_param(registry, xml_parent, data):
"""yaml: random-string
This parameter generates a random string and passes it to the
build, preventing Jenkins from combining queued builds.
Requires the Jenkins :jenkins-wiki:`Random String Parameter Plugin
<Random+String+Parameter+Plugin>`.
:arg str name: Name of the parameter
:arg str description: Description of the parameter (default '')
:arg str failed-validation-message: Failure message to display for invalid
input (default '')
Example:
.. literalinclude::
/../../tests/parameters/fixtures/random-string-param001.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent,
'hudson.plugins.random__string__parameter.'
'RandomStringParameterDefinition')
if 'name' not in data:
raise JenkinsJobsException('random-string must have a name parameter.')
mapping = [
('name', 'name', None),
('description', 'description', ''),
('failed-validation-message', 'failedValidationMessage', ''),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
def git_parameter_param(registry, xml_parent, data):
"""yaml: git-parameter
This parameter allows you to select a git tag, branch or revision number as
parameter in Parametrized builds.
Requires the Jenkins :jenkins-wiki:`Git Parameter Plugin
<Git+Parameter+Plugin>`.
:arg str name: Name of the parameter
:arg str description: Description of the parameter (default '')
:arg str type: The type of the list of parameters (default 'PT_TAG')
:Allowed Values: * **PT_TAG** list of all commit tags in repository -
returns Tag Name
* **PT_BRANCH** list of all branches in repository -
returns Branch Name
* **PT_BRANCH_TAG** list of all commit tags and all
branches in repository - returns Tag Name or Branch
Name
* **PT_REVISION** list of all revision sha1 in repository
followed by its author and date - returns Tag SHA1
* **PT_PULL_REQUEST**
:arg str branch: Name of branch to look in. Used only if listing
revisions. (default '')
:arg str branchFilter: Regex used to filter displayed branches. If blank,
the filter will default to ".*". Remote branches will be listed with
the remote name first. E.g., "origin/master" (default '.*')
:arg str tagFilter: Regex used to filter displayed branches. If blank, the
filter will default to ".*". Remote branches will be listed with the
remote name first. E.g., "origin/master" (default '*')
:arg str sortMode: Mode of sorting. (default 'NONE')
:Allowed Values: * **NONE**
* **DESCENDING**
* **ASCENDING**
* **ASCENDING_SMART**
* **DESCENDING_SMART**
:arg str defaultValue: This value is returned when list is empty. (default
'')
:arg str selectedValue: Which value is selected, after loaded parameters.
If you choose 'default', but default value is not present on the list,
nothing is selected. (default 'NONE')
:Allowed Values: * **NONE**
* **TOP**
* **DEFAULT**
:arg str useRepository: If in the task is defined multiple repositories
parameter specifies which the repository is taken into account. If the
parameter is not defined, is taken first defined repository. The
parameter is a regular expression which is compared with a URL
repository. (default '')
:arg bool quickFilterEnabled: When this option is enabled will show a text
field. Parameter is filtered on the fly. (default false)
Minimal Example:
.. literalinclude::
/../../tests/parameters/fixtures/git-parameter-param-minimal.yaml
:language: yaml
Full Example:
.. literalinclude::
/../../tests/parameters/fixtures/git-parameter-param-full.yaml
:language: yaml
"""
pdef = XML.SubElement(xml_parent,
'net.uaznia.lukanus.hudson.plugins.gitparameter.'
'GitParameterDefinition')
valid_types = [
'PT_TAG',
'PT_BRANCH',
'PT_BRANCH_TAG',
'PT_REVISION',
'PT_PULL_REQUEST',
]
valid_sort_modes = [
'NONE',
'ASCENDING',
'ASCENDING_SMART',
'DESCENDING',
'DESCENDING_SMART',
]
valid_selected_values = ['NONE', 'TOP', 'DEFAULT']
mapping = [
('name', 'name', None),
('description', 'description', ''),
('type', 'type', 'PT_TAG', valid_types),
('branch', 'branch', ''),
('tagFilter', 'tagFilter', '*'),
('branchFilter', 'branchFilter', '.*'),
('sortMode', 'sortMode', 'NONE', valid_sort_modes),
('defaultValue', 'defaultValue', ''),
('selectedValue', 'selectedValue', 'NONE', valid_selected_values),
('useRepository', 'useRepository', ''),
('quickFilterEnabled', 'quickFilterEnabled', False),
]
helpers.convert_mapping_to_xml(pdef, data, mapping, fail_required=True)
class Parameters(jenkins_jobs.modules.base.Base):
sequence = 21
component_type = 'parameter'
component_list_type = 'parameters'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
parameters = data.get('parameters', [])
hmodel = 'hudson.model.'
if parameters:
# The conditionals here are to work around the extended_choice
# parameter also being definable in the properties module. This
# usage has been deprecated but not removed. Because it may have
# added these elements before us, we need to check if they already
# exist, and only add them if they're missing.
pdefp = properties.find(hmodel + 'ParametersDefinitionProperty')
if pdefp is None:
pdefp = XML.SubElement(properties,
hmodel + 'ParametersDefinitionProperty')
pdefs = pdefp.find('parameterDefinitions')
if pdefs is None:
pdefs = XML.SubElement(pdefp, 'parameterDefinitions')
for param in parameters:
self.registry.dispatch('parameter', pdefs, param)
| 38.771881
| 79
| 0.639898
|
acfe017ee84fb34e1e4e8b92c62ee6faa6323232
| 199
|
py
|
Python
|
raspy/devices/picamera/__init__.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/devices/picamera/__init__.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
raspy/devices/picamera/__init__.py
|
cyrusbuilt/RasPy
|
1e34840cc90ea7f19317e881162209d3d819eb09
|
[
"MIT"
] | null | null | null |
"""This package contains modules for working with the PiCamera device."""
__all__ = (
"capture_utils",
"events",
"image_encoding",
"picamera_device",
"still_capture_settings"
)
| 18.090909
| 73
| 0.673367
|
acfe01f4de19cb857a33b7c855a79b35390114fb
| 2,938
|
py
|
Python
|
aligner/helper.py
|
zhouyangnk/Montreal-Forced-Aligner
|
4f8733409e79a50744616921a04fccf115e8af6f
|
[
"MIT"
] | null | null | null |
aligner/helper.py
|
zhouyangnk/Montreal-Forced-Aligner
|
4f8733409e79a50744616921a04fccf115e8af6f
|
[
"MIT"
] | null | null | null |
aligner/helper.py
|
zhouyangnk/Montreal-Forced-Aligner
|
4f8733409e79a50744616921a04fccf115e8af6f
|
[
"MIT"
] | null | null | null |
import os
import shutil
def thirdparty_binary(binary_name):
return shutil.which(binary_name)
def make_path_safe(path):
return '"{}"'.format(path)
def load_text(path):
with open(path, 'r', encoding='utf8') as f:
text = f.read().strip().lower()
return text
def make_safe(element):
if isinstance(element, list):
return ' '.join(map(make_safe, element))
return str(element)
def output_mapping(mapping, path):
with open(path, 'w', encoding='utf8') as f:
for k in sorted(mapping.keys()):
v = mapping[k]
if isinstance(v, list):
v = ' '.join(v)
f.write('{} {}\n'.format(k, v))
def save_scp(scp, path, sort=True, multiline=False):
with open(path, 'w', encoding='utf8') as f:
if sort:
scp = sorted(scp)
for line in scp:
if multiline:
f.write('{}\n{}\n'.format(make_safe(line[0]), make_safe(line[1])))
else:
f.write('{}\n'.format(' '.join(map(make_safe, line))))
def save_groups(groups, seg_dir, pattern, multiline=False):
for i, g in enumerate(groups):
path = os.path.join(seg_dir, pattern.format(i))
save_scp(g, path, multiline=multiline)
def load_scp(path):
'''
Load a Kaldi script file (.scp)
See http://kaldi-asr.org/doc/io.html#io_sec_scp_details for more information
Parameters
----------
path : str
Path to Kaldi script file
Returns
-------
dict
Dictionary where the keys are the first couple and the values are all
other columns in the script file
'''
scp = {}
with open(path, 'r', encoding='utf8') as f:
for line in f:
line = line.strip()
if line == '':
continue
line_list = line.split()
key = line_list.pop(0)
if len(line_list) == 1:
value = line_list[0]
else:
value = line_list
scp[key] = value
return scp
def filter_scp(uttlist, scp, exclude=False):
# Modelled after https://github.com/kaldi-asr/kaldi/blob/master/egs/wsj/s5/utils/filter_scp.pl
# Used in DNN recipes
# Scp could be either a path or just the list
# Get lines of scp file
input_lines = []
if not isinstance(scp, list) and os.path.exists(scp):
# If path provided
with open(scp, 'r') as fp:
input_lines = fp.readlines()
else:
# If list provided
input_lines = scp
# Get lines of valid_uttlist in a list, and a list of utterance IDs.
uttlist = set(uttlist)
filtered = []
for line in input_lines:
line_id = line.split()[0]
if exclude:
if line_id not in uttlist:
filtered.append(line)
else:
if line_id in uttlist:
filtered.append(line)
return filtered
| 26
| 98
| 0.564329
|
acfe020e336689278c5c9ab7e248ffd87360c8d8
| 6,688
|
py
|
Python
|
tests/objects/test_link.py
|
malached/caldera
|
b622b0b8d0a04bcd0328040cbf53a01b93505afc
|
[
"Apache-2.0"
] | 3,385
|
2017-11-29T02:08:31.000Z
|
2022-03-31T13:38:11.000Z
|
tests/objects/test_link.py
|
malached/caldera
|
b622b0b8d0a04bcd0328040cbf53a01b93505afc
|
[
"Apache-2.0"
] | 1,283
|
2017-11-29T16:45:31.000Z
|
2022-03-31T20:10:04.000Z
|
tests/objects/test_link.py
|
malached/caldera
|
b622b0b8d0a04bcd0328040cbf53a01b93505afc
|
[
"Apache-2.0"
] | 800
|
2017-11-29T17:48:43.000Z
|
2022-03-30T22:39:40.000Z
|
from unittest import mock
import pytest
from app.objects.secondclass.c_link import Link
from app.objects.secondclass.c_fact import Fact
from app.objects.secondclass.c_relationship import Relationship
from app.service.interfaces.i_event_svc import EventServiceInterface
from app.utility.base_service import BaseService
@pytest.fixture
def fake_event_svc(loop):
class FakeEventService(BaseService, EventServiceInterface):
def __init__(self):
self.fired = {}
def reset(self):
self.fired = {}
async def observe_event(self, callback, exchange=None, queue=None):
pass
async def fire_event(self, exchange=None, queue=None, timestamp=True, **callback_kwargs):
self.fired[exchange, queue] = callback_kwargs
service = FakeEventService()
service.add_service('event_svc', service)
yield service
service.remove_service('event_svc')
class TestLink:
def test_link_eq(self, ability, executor):
test_executor = executor(name='psh', platform='windows')
test_ability = ability(ability_id='123', executors=[test_executor])
fact = Fact(trait='remote.host.fqdn', value='dc')
test_link = Link(command='sc.exe \\dc create sandsvc binpath= "s4ndc4t.exe -originLinkID 111111"',
paw='123456', ability=test_ability, id=111111, executor=test_executor)
test_link.used = [fact]
test_link2 = Link(command='sc.exe \\dc create sandsvc binpath= "s4ndc4t.exe -originLinkID 222222"',
paw='123456', ability=test_ability, id=222222, executor=test_executor)
test_link2.used = [fact]
assert test_link == test_link2
def test_link_neq(self, ability, executor):
test_executor = executor(name='psh', platform='windows')
test_ability = ability(ability_id='123', executors=[test_executor])
fact_a = Fact(trait='host.user.name', value='a')
fact_b = Fact(trait='host.user.name', value='b')
test_link_a = Link(command='net user a', paw='123456', ability=test_ability, id=111111, executor=test_executor)
test_link_a.used = [fact_a]
test_link_b = Link(command='net user b', paw='123456', ability=test_ability, id=222222, executor=test_executor)
test_link_b.used = [fact_b]
assert test_link_a != test_link_b
@mock.patch.object(Link, '_emit_status_change_event')
def test_no_status_change_event_on_instantiation(self, mock_emit_status_change_method, ability, executor):
executor = executor('psh', 'windows')
ability = ability(executor=executor)
Link(command='net user a', paw='123456', ability=ability, executor=executor)
mock_emit_status_change_method.assert_not_called()
@mock.patch.object(Link, '_emit_status_change_event')
def test_status_change_event_fired_on_status_change(self, mock_emit_status_change_method, ability, executor):
executor = executor('psh', 'windows')
ability = ability(executor=executor)
link = Link(command='net user a', paw='123456', ability=ability, executor=executor, status=-3)
link.status = -5
mock_emit_status_change_method.assert_called_with(from_status=-3, to_status=-5)
def test_emit_status_change_event(self, loop, fake_event_svc, ability, executor):
executor = executor('psh', 'windows')
ability = ability(executor=executor)
link = Link(command='net user a', paw='123456', ability=ability, executor=executor, status=-3)
fake_event_svc.reset()
loop.run_until_complete(
link._emit_status_change_event(
from_status=-3,
to_status=-5
)
)
expected_key = (Link.EVENT_EXCHANGE, Link.EVENT_QUEUE_STATUS_CHANGED)
assert expected_key in fake_event_svc.fired
event_kwargs = fake_event_svc.fired[expected_key]
assert event_kwargs['link'] == link.id
assert event_kwargs['from_status'] == -3
assert event_kwargs['to_status'] == -5
def test_link_agent_reported_time_not_present_when_none_roundtrip(self, ability, executor):
test_executor = executor(name='psh', platform='windows')
test_ability = ability(ability_id='123')
test_link = Link(command='sc.exe \\dc create sandsvc binpath= "s4ndc4t.exe -originLinkID 111111"',
paw='123456', ability=test_ability, executor=test_executor, id=111111)
serialized_link = test_link.display
loaded_link = Link.load(serialized_link)
assert 'agent_reported_time' not in serialized_link
assert loaded_link.agent_reported_time is None
def test_link_agent_reported_time_present_when_set_roundtrip(self, ability, executor):
test_executor = executor(name='psh', platform='windows')
test_ability = ability(ability_id='123')
test_link = Link(command='sc.exe \\dc create sandsvc binpath= "s4ndc4t.exe -originLinkID 111111"',
paw='123456', ability=test_ability, executor=test_executor, id=111111,
agent_reported_time=BaseService.get_timestamp_from_string('2021-02-23 11:50:16'))
serialized_link = test_link.display
loaded_link = Link.load(serialized_link)
assert serialized_link['agent_reported_time'] == '2021-02-23 11:50:16'
assert loaded_link.agent_reported_time == BaseService.get_timestamp_from_string('2021-02-23 11:50:16')
def test_link_knowledge_svc_synchronization(self, loop, executor, ability, knowledge_svc):
test_executor = executor(name='psh', platform='windows')
test_ability = ability(ability_id='123', executors=[test_executor])
fact = Fact(trait='remote.host.fqdn', value='dc')
fact2 = Fact(trait='domain.user.name', value='Bob')
relationship = Relationship(source=fact, edge='has_admin', target=fact2)
test_link = Link(command='echo "this was a triumph"',
paw='123456', ability=test_ability, id=111111, executor=test_executor)
loop.run_until_complete(test_link._create_relationships([relationship], None))
checkable = [(x.trait, x.value) for x in test_link.facts]
assert (fact.trait, fact.value) in checkable
assert (fact2.trait, fact2.value) in checkable
knowledge_base_f = loop.run_until_complete(knowledge_svc.get_facts(dict(source=test_link.id)))
assert len(knowledge_base_f) == 2
assert test_link.id in knowledge_base_f[0].links
knowledge_base_r = loop.run_until_complete(knowledge_svc.get_relationships(dict(edge='has_admin')))
assert len(knowledge_base_r) == 1
| 48.817518
| 119
| 0.691388
|
acfe026b11f2efb7d1e652ec6a4ed43697ff8678
| 3,788
|
py
|
Python
|
suites/API/DatabaseApi/BlocksTransactions/GetBlockTxNumber.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 1
|
2021-03-12T05:17:02.000Z
|
2021-03-12T05:17:02.000Z
|
suites/API/DatabaseApi/BlocksTransactions/GetBlockTxNumber.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 1
|
2019-11-19T12:10:59.000Z
|
2019-11-19T12:10:59.000Z
|
suites/API/DatabaseApi/BlocksTransactions/GetBlockTxNumber.py
|
echoprotocol/pytests
|
5dce698558c2ba703aea03aab79906af1437da5d
|
[
"MIT"
] | 2
|
2019-04-29T10:46:48.000Z
|
2019-10-29T10:01:03.000Z
|
# -*- coding: utf-8 -*-
from common.base_test import BaseTest
import lemoncheesecake.api as lcc
from lemoncheesecake.matching import check_that, equal_to
SUITE = {
"description": "Method 'get_block_tx_number'"
}
@lcc.prop("main", "type")
@lcc.prop("positive", "type")
@lcc.tags("api", "database_api", "database_api_blocks_transactions", "get_block_tx_number")
@lcc.suite("Check work of method 'get_block_tx_number'", rank=1)
class GetBlockTxNumber(BaseTest):
def __init__(self):
super().__init__()
self.__database_api_identifier = None
self.__registration_api_identifier = None
self.__network_broadcast_identifier = None
self.echo_acc0 = None
self.echo_acc1 = None
def setup_suite(self):
super().setup_suite()
self._connect_to_echopy_lib()
lcc.set_step("Setup for {}".format(self.__class__.__name__))
self.__database_api_identifier = self.get_identifier("database")
self.__registration_api_identifier = self.get_identifier("registration")
self.__network_broadcast_identifier = self.get_identifier("network_broadcast")
lcc.log_info(
"API identifiers are: database='{}', registration='{}', network_broadcast='{}'".format(
self.__database_api_identifier, self.__registration_api_identifier, self.__network_broadcast_identifier
)
)
self.echo_acc0 = self.get_account_id(
self.accounts[0], self.__database_api_identifier, self.__registration_api_identifier
)
self.echo_acc1 = self.get_account_id(
self.accounts[1], self.__database_api_identifier, self.__registration_api_identifier
)
lcc.log_info("Echo accounts are: #1='{}', #2='{}'".format(self.echo_acc0, self.echo_acc1))
def get_head_block_num(self):
return self.echo.api.database.get_dynamic_global_properties()["head_block_number"]
def setup_test(self, test):
lcc.set_step("Setup for '{}'".format(str(test).split(".")[-1]))
self.utils.cancel_all_subscriptions(self, self.__database_api_identifier)
lcc.log_info("Canceled all subscriptions successfully")
def teardown_test(self, test, status):
lcc.set_step("Teardown for '{}'".format(str(test).split(".")[-1]))
self.utils.cancel_all_subscriptions(self, self.__database_api_identifier)
lcc.log_info("Canceled all subscriptions successfully")
lcc.log_info("Test {}".format(status))
def teardown_suite(self):
self._disconnect_to_echopy_lib()
super().teardown_suite()
@lcc.test("Simple work of method 'get_block_tx_number'")
def method_main_check(self):
operation_count = 1
lcc.set_step("Perform transfer operation")
self.utils.perform_transfer_operations(
self,
self.echo_acc0,
self.echo_acc1,
self.__database_api_identifier,
operation_count=operation_count,
log_broadcast=False
)
lcc.log_info("Transaction was broadcasted")
lcc.set_step("Get block id and check that all {} transactions added successfully".format(operation_count))
response_id = self.send_request(
self.get_request("get_dynamic_global_properties"), self.__database_api_identifier
)
dynamic_global_property_object = self.get_response(response_id)["result"]
head_block_id = dynamic_global_property_object['head_block_id']
response_id = self.send_request(
self.get_request("get_block_tx_number", [head_block_id]), self.__database_api_identifier
)
tx_number = self.get_response(response_id)["result"]
check_that("block transaction number", tx_number, equal_to(operation_count))
| 43.045455
| 119
| 0.689282
|
acfe028f783142e500f7907affdd6584b7df8dee
| 2,743
|
py
|
Python
|
scripts/retimestamp_rosbag.py
|
KopanevPavel/Kimera-VIO-ROS
|
774ab62fac78f9a4c5aa08b76ed8f9d0dafa64d8
|
[
"BSD-2-Clause"
] | 1
|
2022-01-05T06:42:02.000Z
|
2022-01-05T06:42:02.000Z
|
scripts/retimestamp_rosbag.py
|
KopanevPavel/Kimera-VIO-ROS
|
774ab62fac78f9a4c5aa08b76ed8f9d0dafa64d8
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/retimestamp_rosbag.py
|
KopanevPavel/Kimera-VIO-ROS
|
774ab62fac78f9a4c5aa08b76ed8f9d0dafa64d8
|
[
"BSD-2-Clause"
] | 1
|
2020-08-05T15:41:30.000Z
|
2020-08-05T15:41:30.000Z
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------
# Function : restamp ros bagfile (using header timestamps)
# Project : IJRR MAV Datasets
# Author : www.asl.ethz.ch
# Version : V01 21JAN2016 Initial version.
# Comment :
# Status : under review
#
# Usage : python restamp_bag.py -i inbag.bag -o outbag.bag
#
#
# This file has been modified to fit the needs of the SparkVIO project.
# All original credit for this work goes to ETHZ.
# ------------------------------------------------------------------------------
import roslib
import rosbag
import rospy
import sys
import getopt
from std_msgs.msg import String
def main(argv):
inputfile = ''
outputfile = ''
# parse arguments
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print 'usage: restamp_bag.py -i <inputfile> -o <outputfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'usage: python restamp_bag.py -i <inputfile> -o <outputfile>'
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-o", "--ofile"):
outputfile = arg
# print console header
print ""
print "restamp_bag"
print ""
print 'input file: ', inputfile
print 'output file: ', outputfile
print ""
print "starting restamping (may take a while)"
print ""
outbag = rosbag.Bag(outputfile, 'w')
messageCounter = 0
kPrintDotReductionFactor = 1000
try:
for topic, msg, t in rosbag.Bag(inputfile).read_messages():
if topic == "/clock":
outbag.write(topic, msg, msg.clock)
elif topic == "/tf":
outbag.write(topic, msg, t) # TODO(marcus): decide on this?
elif topic == "/tf_static":
outbag.write(topic, msg, t) # TODO(marcus): decide on this?
else:
try:
# Write message in output bag with input message header stamp
outbag.write(topic, msg, msg.header.stamp)
except:
print "a message has no header here. Coming from topic: ", topic
if (messageCounter % kPrintDotReductionFactor) == 0:
#print '.',
sys.stdout.write('.')
sys.stdout.flush()
messageCounter = messageCounter + 1
# print console footer
finally:
print ""
print ""
print "finished iterating through input bag"
print "output bag written"
print ""
outbag.close()
if __name__ == "__main__":
main(sys.argv[1:])
| 29.180851
| 84
| 0.533358
|
acfe03f79a54a7786e6aaeaaa8251345b5de8dc8
| 3,424
|
py
|
Python
|
observable/manage.py
|
lycantropos/admin
|
906b206cdecad65aff55a67114350f8332837947
|
[
"MIT"
] | null | null | null |
observable/manage.py
|
lycantropos/admin
|
906b206cdecad65aff55a67114350f8332837947
|
[
"MIT"
] | null | null | null |
observable/manage.py
|
lycantropos/admin
|
906b206cdecad65aff55a67114350f8332837947
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3.6
import logging.config
import logging.handlers
import os
import sys
from asyncio import (get_event_loop,
ensure_future)
import click
from aiohttp import ClientSession
from aiohttp.web import run_app
from observable.app import create_app
from observable.config import PACKAGE_NAME
from observable.services import scanner
@click.group()
@click.option('--verbose', '-v',
is_flag=True,
help='Set logging level to DEBUG.')
@click.pass_context
def main(ctx: click.Context,
verbose: bool) -> None:
instance_name = os.environ['Observable.Name']
set_logging(instance_name=instance_name,
verbose=verbose)
host = os.environ['Observable.Host']
port = int(os.environ['Observable.Port'])
ctx.obj = {'host': host,
'port': port,
'name': instance_name}
def set_logging(
*,
instance_name: str,
package_name: str = PACKAGE_NAME,
log_file_extension: str = 'log',
verbose: bool) -> None:
logs_file_name = instance_name + os.extsep + log_file_extension
configurator = dict_configurator(logs_file_name)
configurator.configure()
if not verbose:
logging.getLogger(package_name).setLevel(logging.INFO)
def dict_configurator(logs_file_name: str,
version: int = 1) -> logging.config.DictConfigurator:
file_config = {'format': '[%(levelname)-8s %(asctime)s - %(name)s] '
'%(message)s'}
console_formatter_config = {'format': '[%(levelname)-8s %(name)s] %(msg)s'}
formatters = {'console': console_formatter_config,
'file': file_config}
console_handler_config = {'class': 'logging.StreamHandler',
'level': logging.DEBUG,
'formatter': 'console',
'stream': sys.stdout}
file_handler_config = {'class': 'logging.FileHandler',
'level': logging.DEBUG,
'formatter': 'file',
'filename': logs_file_name}
handlers = {'console': console_handler_config,
'file': file_handler_config}
loggers = {None: {'level': logging.DEBUG,
'handlers': ('console', 'file'),
'qualname': PACKAGE_NAME}}
config = dict(formatters=formatters,
handlers=handlers,
loggers=loggers,
version=version)
return logging.config.DictConfigurator(config)
@main.command()
@click.pass_context
def run(ctx: click.Context) -> None:
host = ctx.obj['host']
port = ctx.obj['port']
name = ctx.obj['name']
loop = get_event_loop()
subscriptions = dict()
session = ClientSession(loop=loop)
app = create_app(loop,
subscriptions=subscriptions,
session=session)
ensure_future(scanner.run_periodically(subscriptions,
delay=2,
name=name,
session=session,
loop=loop),
loop=loop)
run_app(app,
host=host,
port=port,
print=logging.info,
loop=loop)
if __name__ == '__main__':
main()
| 32
| 79
| 0.557827
|
acfe04621f039fe643ece6b17d9356e48ba5e091
| 153
|
py
|
Python
|
sample/WRS2018/SP-DoubleArmV7S-ROS.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | null | null | null |
sample/WRS2018/SP-DoubleArmV7S-ROS.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | null | null | null |
sample/WRS2018/SP-DoubleArmV7S-ROS.py
|
jun0/choreonoid
|
37167e52bfa054088272e1924d2062604104ac08
|
[
"MIT"
] | null | null | null |
import WRSUtil
WRSUtil.loadProject(
"SingleSceneView", "SP", "AISTSimulator", "DoubleArmV7S",
enableVisionSimulation = True, remoteType = "ROS")
| 30.6
| 61
| 0.732026
|
acfe04bcd3f30a63f37abe76e0596609c11fc930
| 5,481
|
py
|
Python
|
compressed_communication/aggregators/comparison_methods/qsgd.py
|
lamylio/federated
|
3f79e71344016ae5e5ec550557af25e5c169a934
|
[
"Apache-2.0"
] | 1
|
2022-03-16T02:13:39.000Z
|
2022-03-16T02:13:39.000Z
|
compressed_communication/aggregators/comparison_methods/qsgd.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
compressed_communication/aggregators/comparison_methods/qsgd.py
|
notminusone/federated
|
6a709f5598450232b918c046cfeba849f479d5cb
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tff.aggregator for implementing QSGD."""
import collections
import tensorflow as tf
import tensorflow_compression as tfc
import tensorflow_federated as tff
from compressed_communication.aggregators.utils import quantize_utils
_SEED_TYPE = tff.TensorType(tf.int64, [2])
@tff.tf_computation
def get_bitstring_length(value):
"""Return size (in bits) of encoded value."""
bitstring, _ = value
return 32. + 8. * tf.cast(tf.strings.length(bitstring, unit="BYTE"),
dtype=tf.float64)
class QSGDFactory(tff.aggregators.UnweightedAggregationFactory):
"""Aggregator that implements QSGD.
Expects `value_type` to be a `TensorType`.
Paper: https://arxiv.org/abs/1610.02132
"""
def __init__(self, num_steps):
"""Initializer for QSGDFactory.
Defines the initial quantization step size, as well as what type of
quantization should be applied and what normalization (if any) should be
used to scale client updates.
Args:
num_steps: Float that parametrizes the quantization levels,
equal to the number of steps.
"""
self._num_steps = num_steps
def create(self, value_type):
if not tff.types.is_structure_of_floats(
value_type) or not value_type.is_tensor():
raise ValueError("Expect value_type to be a float tensor, "
f"found {value_type}.")
@tff.tf_computation(value_type)
def quantize_encode(value):
seed = tf.cast(tf.stack([tf.timestamp() * 1e6, tf.timestamp() * 1e6]),
dtype=tf.int64)
norm = tf.norm(value, ord=2)
q_step_size = norm / tf.cast(self._num_steps, tf.float32)
quantized_value = quantize_utils.stochastic_quantize(
value, q_step_size, seed)
dequantized_value = quantize_utils.uniform_dequantize(
quantized_value, q_step_size, None)
value_size = tf.size(quantized_value, out_type=tf.float32)
distortion = tf.reduce_sum(
tf.square(value - dequantized_value)) / value_size
value_nonzero_ct = tf.math.count_nonzero(
quantized_value, dtype=tf.float32)
sparsity = (value_size - value_nonzero_ct) / value_size
encoded_value = (tfc.run_length_gamma_encode(data=quantized_value), norm)
return encoded_value, distortion, sparsity
def dequantize(value, norm):
q_step_size = norm / tf.cast(self._num_steps, tf.float32)
return quantize_utils.uniform_dequantize(value, q_step_size, None)
def sum_encoded_value(value):
@tff.tf_computation
def get_accumulator():
return tf.zeros(shape=value_type.shape, dtype=tf.float32)
@tff.tf_computation
def decode_accumulate_values(accumulator, encoded_value):
bitstring, norm = encoded_value
decoded_value = tfc.run_length_gamma_decode(code=bitstring,
shape=value_type.shape)
dequantized_value = dequantize(decoded_value, norm)
return accumulator + dequantized_value
@tff.tf_computation
def merge_decoded_values(decoded_value_1, decoded_value_2):
return decoded_value_1 + decoded_value_2
@tff.tf_computation
def report_decoded_summation(summed_decoded_values):
return summed_decoded_values
return tff.federated_aggregate(
value,
zero=get_accumulator(),
accumulate=decode_accumulate_values,
merge=merge_decoded_values,
report=report_decoded_summation)
@tff.federated_computation()
def init_fn():
return tff.federated_value((), tff.SERVER)
@tff.federated_computation(init_fn.type_signature.result,
tff.type_at_clients(value_type))
def next_fn(state, value):
encoded_value, distortion, sparsity = tff.federated_map(
quantize_encode, value)
avg_distortion = tff.federated_mean(distortion)
avg_sparsity = tff.federated_mean(sparsity)
bitstring_lengths = tff.federated_map(get_bitstring_length, encoded_value)
avg_bitstring_length = tff.federated_mean(bitstring_lengths)
num_elements = tff.federated_mean(tff.federated_map(
tff.tf_computation(lambda x: tf.size(x, out_type=tf.float64)), value))
avg_bitrate = tff.federated_map(
tff.tf_computation(
lambda x, y: tf.math.divide_no_nan(x, y, name="tff_divide")),
(avg_bitstring_length, num_elements))
decoded_value = sum_encoded_value(encoded_value)
return tff.templates.MeasuredProcessOutput(
state=state,
result=decoded_value,
measurements=tff.federated_zip(
collections.OrderedDict(avg_bitrate=avg_bitrate,
avg_distortion=avg_distortion,
avg_sparsity=avg_sparsity)))
return tff.templates.AggregationProcess(init_fn, next_fn)
| 37.285714
| 80
| 0.691662
|
acfe0534070ce796af9f7526601c908942f05d46
| 5,115
|
py
|
Python
|
bindings/python/src/cloudsmith_api/models/packages_validateupload_luarocks.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 9
|
2018-07-02T15:21:40.000Z
|
2021-11-24T03:44:39.000Z
|
bindings/python/src/cloudsmith_api/models/packages_validateupload_luarocks.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 8
|
2019-01-08T22:06:12.000Z
|
2022-03-16T15:02:37.000Z
|
bindings/python/src/cloudsmith_api/models/packages_validateupload_luarocks.py
|
cloudsmith-io/cloudsmith-api
|
bc747fa6ee1d86485e334b08f65687630b3fd87c
|
[
"Apache-2.0"
] | 1
|
2021-12-06T19:08:05.000Z
|
2021-12-06T19:08:05.000Z
|
# coding: utf-8
"""
Cloudsmith API
The API to the Cloudsmith Service
OpenAPI spec version: v1
Contact: support@cloudsmith.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class PackagesValidateuploadLuarocks(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'package_file': 'str',
'republish': 'bool',
'tags': 'str'
}
attribute_map = {
'package_file': 'package_file',
'republish': 'republish',
'tags': 'tags'
}
def __init__(self, package_file=None, republish=None, tags=None):
"""
PackagesValidateuploadLuarocks - a model defined in Swagger
"""
self._package_file = None
self._republish = None
self._tags = None
self.package_file = package_file
if republish is not None:
self.republish = republish
if tags is not None:
self.tags = tags
@property
def package_file(self):
"""
Gets the package_file of this PackagesValidateuploadLuarocks.
The primary file for the package.
:return: The package_file of this PackagesValidateuploadLuarocks.
:rtype: str
"""
return self._package_file
@package_file.setter
def package_file(self, package_file):
"""
Sets the package_file of this PackagesValidateuploadLuarocks.
The primary file for the package.
:param package_file: The package_file of this PackagesValidateuploadLuarocks.
:type: str
"""
if package_file is None:
raise ValueError("Invalid value for `package_file`, must not be `None`")
self._package_file = package_file
@property
def republish(self):
"""
Gets the republish of this PackagesValidateuploadLuarocks.
If true, the uploaded package will overwrite any others with the same attributes (e.g. same version); otherwise, it will be flagged as a duplicate.
:return: The republish of this PackagesValidateuploadLuarocks.
:rtype: bool
"""
return self._republish
@republish.setter
def republish(self, republish):
"""
Sets the republish of this PackagesValidateuploadLuarocks.
If true, the uploaded package will overwrite any others with the same attributes (e.g. same version); otherwise, it will be flagged as a duplicate.
:param republish: The republish of this PackagesValidateuploadLuarocks.
:type: bool
"""
self._republish = republish
@property
def tags(self):
"""
Gets the tags of this PackagesValidateuploadLuarocks.
A comma-separated values list of tags to add to the package.
:return: The tags of this PackagesValidateuploadLuarocks.
:rtype: str
"""
return self._tags
@tags.setter
def tags(self, tags):
"""
Sets the tags of this PackagesValidateuploadLuarocks.
A comma-separated values list of tags to add to the package.
:param tags: The tags of this PackagesValidateuploadLuarocks.
:type: str
"""
self._tags = tags
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, PackagesValidateuploadLuarocks):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.95082
| 155
| 0.587488
|
acfe05977e8ddf58d422bc9b999988a715e540c9
| 2,169
|
py
|
Python
|
murano/packages/versions/mpl_v1.py
|
OndrejVojta/murano
|
cf995586e0d11233694ce097bd9754a60149d9cd
|
[
"Apache-2.0"
] | 1
|
2015-02-14T16:21:07.000Z
|
2015-02-14T16:21:07.000Z
|
murano/packages/versions/mpl_v1.py
|
OndrejVojta/murano
|
cf995586e0d11233694ce097bd9754a60149d9cd
|
[
"Apache-2.0"
] | null | null | null |
murano/packages/versions/mpl_v1.py
|
OndrejVojta/murano
|
cf995586e0d11233694ce097bd9754a60149d9cd
|
[
"Apache-2.0"
] | 1
|
2016-04-30T07:27:52.000Z
|
2016-04-30T07:27:52.000Z
|
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import murano.packages.application_package
import murano.packages.exceptions as e
import murano.packages.mpl_package
# noinspection PyProtectedMember
def load(package, yaml_content):
package._full_name = yaml_content.get('FullName')
if not package._full_name:
raise murano.packages.exceptions.PackageFormatError(
'FullName not specified')
_check_full_name(package._full_name)
package._package_type = yaml_content.get('Type')
if not package._package_type or package._package_type not in \
murano.packages.application_package.PackageTypes.ALL:
raise e.PackageFormatError('Invalid Package Type')
package._display_name = yaml_content.get('Name', package._full_name)
package._description = yaml_content.get('Description')
package._author = yaml_content.get('Author')
package._supplier = yaml_content.get('Supplier') or {}
package._classes = yaml_content.get('Classes')
package._ui = yaml_content.get('UI', 'ui.yaml')
package._logo = yaml_content.get('Logo')
package._tags = yaml_content.get('Tags')
def create(source_directory, content, loader):
return murano.packages.mpl_package.MuranoPlPackage(
source_directory, content, loader)
def _check_full_name(full_name):
error = murano.packages.exceptions.PackageFormatError(
'Invalid FullName')
if re.match(r'^[\w\.]+$', full_name):
if full_name.startswith('.') or full_name.endswith('.'):
raise error
if '..' in full_name:
raise error
else:
raise error
| 35.557377
| 72
| 0.723375
|
acfe076376b28ccc42dbb11d91f8003131f269ce
| 8,759
|
py
|
Python
|
django-rgd/rgd/migrations/0001_initial.py
|
ResonantGeoData/ResonantGeoData
|
72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a
|
[
"Apache-2.0"
] | 40
|
2020-05-07T17:15:26.000Z
|
2022-02-27T14:45:04.000Z
|
django-rgd/rgd/migrations/0001_initial.py
|
ResonantGeoData/ResonantGeoData
|
72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a
|
[
"Apache-2.0"
] | 408
|
2020-05-07T15:10:35.000Z
|
2022-03-30T03:08:47.000Z
|
django-rgd/rgd/migrations/0001_initial.py
|
ResonantGeoData/ResonantGeoData
|
72b3d4085cc5700d0ad5556f31b7eb96ed2d3b8a
|
[
"Apache-2.0"
] | 3
|
2021-04-12T20:16:22.000Z
|
2021-06-22T14:03:46.000Z
|
# Generated by Django 3.2.4 on 2021-06-24 22:52
from django.conf import settings
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import rgd.models.mixins
import rgd.utility
import s3_file_field.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('name', models.CharField(max_length=127)),
],
options={
'default_related_name': 'collections',
},
bases=(models.Model, rgd.models.mixins.PermissionPathMixin),
),
migrations.CreateModel(
name='SpatialEntry',
fields=[
('spatial_id', models.AutoField(primary_key=True, serialize=False)),
('acquisition_date', models.DateTimeField(blank=True, default=None, null=True)),
('footprint', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
('outline', django.contrib.gis.db.models.fields.GeometryField(srid=4326)),
(
'instrumentation',
models.CharField(
blank=True,
help_text='The instrumentation used to acquire these data.',
max_length=100,
null=True,
),
),
],
),
migrations.CreateModel(
name='WhitelistedEmail',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('email', models.EmailField(max_length=254)),
],
),
migrations.CreateModel(
name='CollectionPermission',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
(
'role',
models.SmallIntegerField(
choices=[(1, 'Reader'), (2, 'Owner')],
db_index=True,
default=1,
help_text='A "reader" can view assets in this collection. An "owner" can additionally add/remove other users, set their permissions, delete the collection, and add/remove other files.',
),
),
(
'collection',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='collection_permissions',
to='rgd.collection',
),
),
(
'user',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='collection_permissions',
to=settings.AUTH_USER_MODEL,
),
),
],
options={
'default_related_name': 'collection_permissions',
},
),
migrations.CreateModel(
name='ChecksumFile',
fields=[
(
'id',
models.AutoField(
auto_created=True, primary_key=True, serialize=False, verbose_name='ID'
),
),
('failure_reason', models.TextField(null=True)),
(
'status',
models.CharField(
choices=[
('created', 'Created but not queued'),
('queued', 'Queued for processing'),
('running', 'Processing'),
('failed', 'Failed'),
('success', 'Succeeded'),
],
default='created',
max_length=20,
),
),
(
'created',
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name='created'
),
),
(
'modified',
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name='modified'
),
),
('name', models.CharField(blank=True, max_length=1000)),
('description', models.TextField(blank=True, null=True)),
('checksum', models.CharField(max_length=128)),
('validate_checksum', models.BooleanField(default=False)),
('last_validation', models.BooleanField(default=True)),
('type', models.IntegerField(choices=[(1, 'FileField'), (2, 'URL')], default=1)),
(
'file',
s3_file_field.fields.S3FileField(
blank=True, null=True, upload_to=rgd.utility.uuid_prefix_filename
),
),
('url', models.TextField(blank=True, null=True)),
(
'collection',
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name='checksumfiles',
related_query_name='checksumfiles',
to='rgd.collection',
),
),
],
bases=(models.Model, rgd.models.mixins.PermissionPathMixin),
),
migrations.CreateModel(
name='SpatialAsset',
fields=[
(
'spatialentry_ptr',
models.OneToOneField(
auto_created=True,
on_delete=django.db.models.deletion.CASCADE,
parent_link=True,
primary_key=True,
serialize=False,
to='rgd.spatialentry',
),
),
(
'created',
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name='created'
),
),
(
'modified',
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name='modified'
),
),
('files', models.ManyToManyField(to='rgd.ChecksumFile')),
],
options={
'get_latest_by': 'modified',
'abstract': False,
},
bases=('rgd.spatialentry', models.Model, rgd.models.mixins.PermissionPathMixin),
),
migrations.AddConstraint(
model_name='collectionpermission',
constraint=models.UniqueConstraint(fields=('collection', 'user'), name='unique_user'),
),
migrations.AddConstraint(
model_name='checksumfile',
constraint=models.CheckConstraint(
check=models.Q(
models.Q(
('file__regex', '.+'),
('type', 1),
models.Q(('url__in', ['', None]), ('url__isnull', True), _connector='OR'),
),
models.Q(
('type', 2),
models.Q(('url__isnull', False), ('url__regex', '.+')),
models.Q(('file__in', ['', None]), ('file__isnull', True), _connector='OR'),
),
_connector='OR',
),
name='rgd_checksumfile_file_source_value_matches_type',
),
),
]
| 38.416667
| 209
| 0.433611
|
acfe0849b6b17c38d5299279d774630236c02911
| 186
|
py
|
Python
|
third_party/django_summernote/__init__.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 39
|
2015-06-10T23:18:07.000Z
|
2021-10-21T04:29:06.000Z
|
third_party/django_summernote/__init__.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 2
|
2016-08-22T12:38:10.000Z
|
2017-01-26T18:37:33.000Z
|
third_party/django_summernote/__init__.py
|
asysc2020/contentbox
|
5c155976e0ce7ea308d62293ab89624d97b21d09
|
[
"Apache-2.0"
] | 26
|
2015-06-10T22:09:15.000Z
|
2021-06-27T15:45:15.000Z
|
version_info = (0, 5, 8)
__version__ = version = '.'.join(map(str, version_info))
__project__ = PROJECT = 'django-summernote'
__author__ = AUTHOR = "Park Hyunwoo <ez.amiryo@gmail.com>"
| 31
| 58
| 0.709677
|
acfe08ddec13c5b69fe8ea9d4d277a5db9ae8609
| 1,098
|
py
|
Python
|
src/openpersonen/api/data_classes/persoon.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 2
|
2020-08-26T11:24:43.000Z
|
2021-07-28T09:46:40.000Z
|
src/openpersonen/api/data_classes/persoon.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | 153
|
2020-08-26T10:45:35.000Z
|
2021-12-10T17:33:16.000Z
|
src/openpersonen/api/data_classes/persoon.py
|
maykinmedia/open-personen
|
ddcf083ccd4eb864c5305bcd8bc75c6c64108272
|
[
"RSA-MD"
] | null | null | null |
from dataclasses import dataclass
from openpersonen.backends import backend
from .geboorte import Geboorte
from .naam import Naam
@dataclass
class Persoon:
burgerservicenummer: str
geheimhoudingPersoonsgegevens: bool
naam: Naam
geboorte: Geboorte
backend_function_name = None
@classmethod
def list(cls, bsn):
class_instances = []
func = getattr(backend, cls.backend_function_name)
if not func:
raise ValueError(f"No function found with name {cls.backend_function_name}")
instance_dicts = func(bsn)
for instance_dict in instance_dicts:
class_instances.append(cls(**instance_dict))
return class_instances
@classmethod
def retrieve(cls, bsn, id):
func = getattr(backend, cls.backend_function_name)
if not func:
raise ValueError(f"No function found with name {cls.backend_function_name}")
instance_dicts = func(bsn, id=id)
if not instance_dicts:
raise ValueError("No instances found")
return cls(**instance_dicts[0])
| 24.954545
| 88
| 0.676685
|
acfe0945d0deb294e4aafa9ec020e776d5bcefc1
| 413
|
py
|
Python
|
appmap/test/data/trial/test/test_deferred.py
|
calvinsomething/appmap-python
|
7234f7cdb240eadfa74a1e6021bc8695ceb60179
|
[
"MIT"
] | 34
|
2020-12-08T20:57:11.000Z
|
2022-01-31T09:45:03.000Z
|
appmap/test/data/trial/test/test_deferred.py
|
calvinsomething/appmap-python
|
7234f7cdb240eadfa74a1e6021bc8695ceb60179
|
[
"MIT"
] | 105
|
2020-12-02T14:29:43.000Z
|
2022-02-02T10:00:04.000Z
|
appmap/test/data/trial/test/test_deferred.py
|
calvinsomething/appmap-python
|
7234f7cdb240eadfa74a1e6021bc8695ceb60179
|
[
"MIT"
] | 5
|
2020-11-30T01:18:17.000Z
|
2021-08-04T10:30:36.000Z
|
import time
from twisted.internet import defer
from twisted.internet import reactor
from twisted.trial import unittest
class TestDeferred(unittest.TestCase):
def test_hello_world(self):
d = defer.Deferred()
def cb(_):
self.assertTrue(False)
d.addCallback(cb)
reactor.callLater(1, d.callback, None)
return d
test_hello_world.todo = "don't fix me"
| 19.666667
| 46
| 0.670702
|
acfe0959d1b4c8d8b2dfedcc1082ac091e6c75f4
| 1,249
|
py
|
Python
|
app.py
|
ikii123/ikii
|
9be4c076af83b0d7213852753656818847e09a07
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
ikii123/ikii
|
9be4c076af83b0d7213852753656818847e09a07
|
[
"BSD-3-Clause"
] | null | null | null |
app.py
|
ikii123/ikii
|
9be4c076af83b0d7213852753656818847e09a07
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, TextSendMessage,
)
app = Flask(__name__)
line_bot_api = LineBotApi('MTn2latTZ4NmBnuah67007iRDPdliDVKkpxR1yb5IGpzTARdjzAqSnLmhkvew0EqfNs3wDSQuTc8j/DUfKCoPFpV3ECtur1KUxyiRd1jZjeS9JA7yJXlkuK6l6/WkCJEKDybBDiRMdFbYxtFlRYOmQdB04t89/1O/w1cDnyilFU=')
handler = WebhookHandler('adbb3952c8bc75b90664aa5ededbbbec')
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
abort(400)
return 'OK'
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
line_bot_api.reply_message(
event.reply_token,
TextSendMessage(text=event.message.text))
if __name__ == "__main__":
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
| 26.020833
| 201
| 0.738991
|
acfe09902c56f709286e8141ebb48598e49e7599
| 484
|
py
|
Python
|
extractor/fulltext/launch_single.py
|
arXiv/arxiv-fulltext
|
36008457022cde245d78b3ad91e0a95aa21bc420
|
[
"MIT"
] | 18
|
2019-03-01T02:51:45.000Z
|
2021-11-05T12:26:12.000Z
|
extractor/fulltext/launch_single.py
|
arXiv/arxiv-fulltext
|
36008457022cde245d78b3ad91e0a95aa21bc420
|
[
"MIT"
] | 6
|
2019-05-06T15:25:16.000Z
|
2019-07-31T20:11:36.000Z
|
extractor/fulltext/launch_single.py
|
arXiv/arxiv-fulltext
|
36008457022cde245d78b3ad91e0a95aa21bc420
|
[
"MIT"
] | 8
|
2019-01-10T22:01:58.000Z
|
2021-11-05T12:26:01.000Z
|
import os
import sys
# sys.path.append(".")
import logging
from fulltext import convert
log = logging.getLogger('fulltext')
if __name__ == '__main__':
if len(sys.argv) <= 1:
sys.exit('No file path specified')
path = sys.argv[1].strip()
try:
log.info('Path: %s\n' % path)
log.info('Path exists: %s\n' % str(os.path.exists(path)))
textpath = convert(path)
except Exception as e:
sys.exit(str(e))
sys.stdout.write(textpath)
| 23.047619
| 65
| 0.613636
|
acfe0af524df69ba956e8f867c1600c3a6c7932a
| 1,929
|
py
|
Python
|
qiskit/providers/ibmq/job/circuitjob.py
|
Sahar2/qiskit-ibmq-provider
|
a7fa886f5b34123bf7bb903840e32b1bf4cc30b5
|
[
"Apache-2.0"
] | 1
|
2020-07-14T20:09:52.000Z
|
2020-07-14T20:09:52.000Z
|
qiskit/providers/ibmq/job/circuitjob.py
|
Sahar2/qiskit-ibmq-provider
|
a7fa886f5b34123bf7bb903840e32b1bf4cc30b5
|
[
"Apache-2.0"
] | null | null | null |
qiskit/providers/ibmq/job/circuitjob.py
|
Sahar2/qiskit-ibmq-provider
|
a7fa886f5b34123bf7bb903840e32b1bf4cc30b5
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Job specific for Circuits."""
from qiskit.providers import JobError
from qiskit.providers.jobstatus import JOB_FINAL_STATES
from .ibmqjob import IBMQJob
class CircuitJob(IBMQJob):
"""Job specific for use with Circuits.
Note: this class is experimental, and currently only supports the
customizations needed for using it with the manager (which implies
initializing with a job_id:
* _wait_for_completion()
* status()
* result()
In general, the changes involve using a different `self._api.foo()` method
for adjusting to the Circuits particularities.
"""
def status(self):
# Implies self._job_id is None
if self._future_captured_exception is not None:
raise JobError(str(self._future_captured_exception))
if self._job_id is None or self._status in JOB_FINAL_STATES:
return self._status
try:
# TODO: See result values
api_response = self._api.circuit_job_status(self._job_id)
self._update_status(api_response)
# pylint: disable=broad-except
except Exception as err:
raise JobError(str(err))
return self._status
def _get_job(self):
if self._cancelled:
raise JobError(
'Job result impossible to retrieve. The job was cancelled.')
return self._api.circuit_job_get(self._job_id)
| 31.112903
| 78
| 0.685329
|
acfe0b20400133a3ddd26cd4b58228709709f9ad
| 1,353
|
py
|
Python
|
postprocessing.py
|
philippbeer/m4_clustering
|
18cf1b9111f4236f0be152d2419c470840645acb
|
[
"MIT"
] | null | null | null |
postprocessing.py
|
philippbeer/m4_clustering
|
18cf1b9111f4236f0be152d2419c470840645acb
|
[
"MIT"
] | null | null | null |
postprocessing.py
|
philippbeer/m4_clustering
|
18cf1b9111f4236f0be152d2419c470840645acb
|
[
"MIT"
] | null | null | null |
"""
This module provides the methods for the processing of y_hat
"""
from typing import Dict
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
import config as cnf
def postprocess(y_test: np.ndarray,
y_hat: np.ndarray,
standardizers: Dict[int,MinMaxScaler],
ts_order: pd.Series) -> pd.DataFrame:
"""
denormalize y_test and and y_hat
Params:
-------
y_test : array with with test set
y_hat : array with predicted y values
standardizer : dictionary of scaler objects
ts_order : series containing the order the time series in y_hat/y_test
Returns:
--------
df : dataframe
"""
step = 1
df_pred = pd.DataFrame()
for i in range(ts_order.shape[0]):
ts_name = ts_order[i] # getting name of time series
scaler = standardizers[ts_name] # look up scaler for time series
y_test_rescaled = scaler.inverse_transform(y_test[i].reshape(-1,1))
y_hat_rescaled = scaler.inverse_transform(y_hat[i].reshape(-1,1))
d = {'V1': ts_name,
'step': step,
'y': y_test_rescaled.reshape(y_test_rescaled.shape[0]),
'y_hat': y_hat_rescaled.reshape(y_hat_rescaled.shape[0])}
df_tmp = pd.DataFrame(d, index=range(y_test_rescaled.shape[0]))
df_pred = df_pred.append(df_tmp)
# updating forecasting steps
if step % cnf.STEPS_AHEAD == 0:
step = 1
else:
step += 1
return df_pred
| 24.160714
| 71
| 0.71323
|
acfe0bb1abbefc09b121c2544adc3aeee1e5e93e
| 7,691
|
py
|
Python
|
myems-api/core/menu.py
|
18600575648/myems
|
38ab7d509b5ab275a4df0333e6256c586abdfbf9
|
[
"MIT"
] | null | null | null |
myems-api/core/menu.py
|
18600575648/myems
|
38ab7d509b5ab275a4df0333e6256c586abdfbf9
|
[
"MIT"
] | null | null | null |
myems-api/core/menu.py
|
18600575648/myems
|
38ab7d509b5ab275a4df0333e6256c586abdfbf9
|
[
"MIT"
] | null | null | null |
import falcon
import simplejson as json
import mysql.connector
import config
from core.useractivity import user_logger, access_control
class MenuCollection:
@staticmethod
def __init__():
""""Initializes MenuCollection"""
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" ORDER BY id ")
cursor.execute(query)
rows_menus = cursor.fetchall()
result = list()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
temp = {"id": row[0],
"name": row[1],
"route": row[2],
"parent_menu_id": row[3],
"is_hidden": bool(row[4])}
result.append(temp)
cursor.close()
cnx.close()
resp.text = json.dumps(result)
class MenuItem:
@staticmethod
def __init__():
""""Initializes MenuItem"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE id=%s ")
cursor.execute(query, (id_,))
rows_menu = cursor.fetchone()
result = None
if rows_menu is not None and len(rows_menu) > 0:
result = {"id": rows_menu[0],
"name": rows_menu[1],
"route": rows_menu[2],
"parent_menu_id": rows_menu[3],
"is_hidden": bool(rows_menu[4])}
cursor.close()
cnx.close()
resp.text = json.dumps(result)
@staticmethod
@user_logger
def on_put(req, resp, id_):
"""Handles PUT requests"""
access_control(req)
try:
raw_json = req.stream.read().decode('utf-8')
except Exception as ex:
raise falcon.HTTPError(falcon.HTTP_400, title='API.EXCEPTION', description=ex)
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
new_values = json.loads(raw_json)
if 'is_hidden' not in new_values['data'].keys() or \
not isinstance(new_values['data']['is_hidden'], bool):
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_IS_HIDDEN')
is_hidden = new_values['data']['is_hidden']
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
update_row = (" UPDATE tbl_menus "
" SET is_hidden = %s "
" WHERE id = %s ")
cursor.execute(update_row, (is_hidden,
id_))
cnx.commit()
cursor.close()
cnx.close()
resp.status = falcon.HTTP_200
class MenuChildrenCollection:
@staticmethod
def __init__():
""""Initializes MenuChildrenCollection"""
pass
@staticmethod
def on_options(req, resp, id_):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp, id_):
if not id_.isdigit() or int(id_) <= 0:
raise falcon.HTTPError(falcon.HTTP_400, title='API.BAD_REQUEST',
description='API.INVALID_MENU_ID')
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE id = %s ")
cursor.execute(query, (id_,))
row_current_menu = cursor.fetchone()
if row_current_menu is None:
cursor.close()
cnx.close()
raise falcon.HTTPError(falcon.HTTP_404, title='API.NOT_FOUND',
description='API.MENU_NOT_FOUND')
query = (" SELECT id, name "
" FROM tbl_menus "
" ORDER BY id ")
cursor.execute(query)
rows_menus = cursor.fetchall()
menu_dict = dict()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
menu_dict[row[0]] = {"id": row[0],
"name": row[1]}
result = dict()
result['current'] = dict()
result['current']['id'] = row_current_menu[0]
result['current']['name'] = row_current_menu[1]
result['current']['parent_menu'] = menu_dict.get(row_current_menu[3], None)
result['current']['is_hidden'] = bool(row_current_menu[4])
result['children'] = list()
query = (" SELECT id, name, route, parent_menu_id, is_hidden "
" FROM tbl_menus "
" WHERE parent_menu_id = %s "
" ORDER BY id ")
cursor.execute(query, (id_, ))
rows_menus = cursor.fetchall()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
parent_menu = menu_dict.get(row[3], None)
meta_result = {"id": row[0],
"name": row[1],
"parent_menu": parent_menu,
"is_hidden": bool(row[4])}
result['children'].append(meta_result)
cursor.close()
cnx.close()
resp.text = json.dumps(result)
class MenuWebCollection:
@staticmethod
def __init__():
""""Initializes MenuWebCollection"""
pass
@staticmethod
def on_options(req, resp):
resp.status = falcon.HTTP_200
@staticmethod
def on_get(req, resp):
cnx = mysql.connector.connect(**config.myems_system_db)
cursor = cnx.cursor()
query = (" SELECT id, route, parent_menu_id "
" FROM tbl_menus "
" WHERE parent_menu_id IS NULL AND is_hidden = 0 ")
cursor.execute(query)
rows_menus = cursor.fetchall()
first_level_routes = {}
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
first_level_routes[row[0]] = {
'route': row[1],
'children': []
}
query = (" SELECT id, route, parent_menu_id "
" FROM tbl_menus "
" WHERE parent_menu_id IS NOT NULL AND is_hidden = 0 ")
cursor.execute(query)
rows_menus = cursor.fetchall()
if rows_menus is not None and len(rows_menus) > 0:
for row in rows_menus:
if row[2] in first_level_routes.keys():
first_level_routes[row[2]]['children'].append(row[1])
result = dict()
for _id, item in first_level_routes.items():
result[item['route']] = item['children']
cursor.close()
cnx.close()
resp.text = json.dumps(result)
| 32.315126
| 90
| 0.532961
|
acfe0bbb77df52198318856e16a75e7d93262f9d
| 2,881
|
py
|
Python
|
paragen/generators/abstract_generator.py
|
godweiyang/ParaGen
|
9665d1244ea38a41fc06b4e0a7f6411985e2221f
|
[
"Apache-2.0"
] | 50
|
2022-01-18T07:25:46.000Z
|
2022-03-14T13:06:18.000Z
|
paragen/generators/abstract_generator.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 2
|
2022-01-19T09:36:42.000Z
|
2022-02-23T07:16:02.000Z
|
paragen/generators/abstract_generator.py
|
JiangtaoFeng/ParaGen
|
509334bf16e3674e009bb9dc37ecc38ae3b5c977
|
[
"Apache-2.0"
] | 6
|
2022-01-19T09:28:53.000Z
|
2022-03-10T10:20:08.000Z
|
import logging
logger = logging.getLogger(__name__)
import torch
import torch.nn as nn
from paragen.utils.ops import inspect_fn
from paragen.utils.runtime import Environment
from paragen.utils.io import UniIO, mkdir
class AbstractGenerator(nn.Module):
"""
AbstractGenerator wrap a model with inference algorithms.
It can be directly exported and used for inference or serving.
Args:
path: path to restore traced model
"""
def __init__(self, path):
super().__init__()
self._path = path
self._traced_model = None
self._model = None
self._mode = 'infer'
def build(self, *args, **kwargs):
"""
Build or load a generator
"""
if self._path is not None:
self.load()
else:
self.build_from_model(*args, **kwargs)
self._env = Environment()
if self._env.device.startswith('cuda'):
logger.info('move model to {}'.format(self._env.device))
self.cuda(self._env.device)
def build_from_model(self, *args, **kwargs):
"""
Build generator from model
"""
raise NotImplementedError
def forward(self, *args, **kwargs):
"""
Infer a sample in evaluation mode.
We auto detect whether the inference model is traced, and use appropriate model to perform inference.
"""
if self._traced_model is not None:
return self._traced_model(*args, **kwargs)
else:
return self._forward(*args, **kwargs)
def _forward(self, *args, **kwargs):
"""
Infer a sample in evaluation mode with torch model.
"""
raise NotImplementedError
def export(self, path, net_input, **kwargs):
"""
Export self to `path` by export model directly
Args:
path: path to store serialized model
net_input: fake net_input for tracing the model
"""
self.eval()
with torch.no_grad():
logger.info('trace model {}'.format(self._model.__class__.__name__))
model = torch.jit.trace_module(self._model, {'forward': net_input})
mkdir(path)
logger.info('save model to {}/model'.format(path))
with UniIO('{}/model'.format(path), 'wb') as fout:
torch.jit.save(model, fout)
def load(self):
"""
Load a serialized model from path
"""
logger.info('load model from {}'.format(self._path))
with UniIO(self._path, 'rb') as fin:
self._traced_model = torch.jit.load(fin)
def reset(self, *args, **kwargs):
"""
Reset generator states.
"""
pass
@property
def input_slots(self):
"""
Generator input slots that is auto-detected
"""
return inspect_fn(self._forward)
| 28.245098
| 109
| 0.58799
|
acfe0c0ae868dab08e3ae88fe9c10b19b55dbc01
| 2,640
|
py
|
Python
|
web scraping/flipcartCrawling/flipcartCrawling/spiders/flipcartClothing.py
|
NirmalSilwal/Python-
|
6d23112db8366360f0b79bdbf21252575e8eab3e
|
[
"MIT"
] | 32
|
2020-04-05T08:29:40.000Z
|
2022-01-08T03:10:00.000Z
|
web scraping/flipcartCrawling/flipcartCrawling/spiders/flipcartClothing.py
|
NirmalSilwal/Python-
|
6d23112db8366360f0b79bdbf21252575e8eab3e
|
[
"MIT"
] | 3
|
2021-06-02T04:09:11.000Z
|
2022-03-02T14:55:03.000Z
|
web scraping/flipcartCrawling/flipcartCrawling/spiders/flipcartClothing.py
|
NirmalSilwal/Python-
|
6d23112db8366360f0b79bdbf21252575e8eab3e
|
[
"MIT"
] | 3
|
2020-07-13T05:44:04.000Z
|
2021-03-03T07:07:58.000Z
|
import scrapy
from ..items import FlipcartcrawlingItem
class FlipcartclothingSpider(scrapy.Spider):
name = 'flipcartClothing'
page_number = 2
# TODO handle for multiple url at same time
start_urls = [
# 'https://www.flipkart.com/clothing-and-accessories/topwear/pr?sid=clo%2Cash&otracker=categorytree&p%5B%5D=facets.ideal_for%255B%255D%3DMen&page=1'
'https://www.flipkart.com/womens-footwear/pr?sid=osp,iko&otracker=nmenu_sub_Women_0_Footwear&page=1'
]
def parse(self, response):
items = FlipcartcrawlingItem()
all_prod_categories = ['mens topwear', 'womens footwear']
all_responses = response.css('._373qXS')
for myresponse in all_responses:
name = myresponse.css('.IRpwTa::text').extract()
brand = myresponse.css('._2WkVRV::text').extract()
original_price = myresponse.css('._3I9_wc::text')[1::2].extract()
original_price = [float(i.replace(',', '')) for i in original_price]
sale_price = myresponse.css('._30jeq3::text').extract()
sale_price = [float(i[1:].replace(',', '')) for i in sale_price]
# TODO, resolve it as it is giving blank url
image_url = myresponse.css('._2r_T1I::attr(src)').extract()
# image_url = myresponse.css('._2r_T1I')
# image_url = myresponse.xpath('//img[contains(@class,"._2r_T1I")]/@src').extract()[0]
# response.selector.xpath('//img/@src').extract()
product_page_url = response.url
# for men
# product_category = all_prod_categories[0]
# for women
product_category = all_prod_categories[1]
items['name'] = name
items['brand'] = brand
items['original_price'] = original_price
items['sale_price'] = sale_price
items['image_url'] = image_url
items['product_page_url'] = product_page_url
items['product_category'] = product_category
yield items
# for mens
# next_page = "https://www.flipkart.com/clothing-and-accessories/topwear/pr?sid=clo%2Cash&otracker=categorytree&p%5B%5D=facets.ideal_for%255B%255D%3DMen&page=" + str(FlipcartclothingSpider.page_number)
# for womens
next_page = 'https://www.flipkart.com/womens-footwear/pr?sid=osp%2Ciko&otracker=nmenu_sub_Women_0_Footwear&page=' + str(FlipcartclothingSpider.page_number)
if FlipcartclothingSpider.page_number <= 25:
FlipcartclothingSpider.page_number += 1
yield response.follow(next_page, callback=self.parse)
| 38.26087
| 209
| 0.637879
|
acfe0eab2145a7a8e989e3f1d3e03dba482388e3
| 3,761
|
py
|
Python
|
tests/core/test_acceptor.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/test_acceptor.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | 8
|
2022-01-23T10:51:59.000Z
|
2022-03-29T22:11:57.000Z
|
tests/core/test_acceptor.py
|
fisabiliyusri/proxy
|
29934503251b704813ef3e7ed8c2a5ae69448c8a
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
proxy.py
~~~~~~~~
⚡⚡⚡ Fast, Lightweight, Pluggable, TLS interception capable proxy server focused on
Network monitoring, controls & Application development, testing, debugging.
:copyright: (c) 2013-present by Abhinav Singh and contributors.
:license: BSD, see LICENSE for more details.
"""
import socket
import selectors
import multiprocessing
import unittest
from unittest import mock
from proxy.common.flag import FlagParser
from proxy.core.acceptor import Acceptor
class TestAcceptor(unittest.TestCase):
def setUp(self) -> None:
self.acceptor_id = 1
self.pipe = mock.MagicMock()
self.work_klass = mock.MagicMock()
self.flags = FlagParser.initialize(
threaded=True,
work_klass=self.work_klass,
local_executor=0,
)
self.acceptor = Acceptor(
idd=self.acceptor_id,
fd_queue=self.pipe[1],
flags=self.flags,
lock=multiprocessing.Lock(),
executor_queues=[],
executor_pids=[],
executor_locks=[],
)
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
@mock.patch('proxy.core.acceptor.acceptor.recv_handle')
def test_continues_when_no_events(
self,
mock_recv_handle: mock.Mock,
mock_fromfd: mock.Mock,
mock_selector: mock.Mock,
) -> None:
fileno = 10
conn = mock.MagicMock()
addr = mock.MagicMock()
sock = mock_fromfd.return_value
mock_fromfd.return_value.accept.return_value = (conn, addr)
mock_recv_handle.return_value = fileno
selector = mock_selector.return_value
selector.select.side_effect = [[], KeyboardInterrupt()]
self.acceptor.run()
sock.accept.assert_not_called()
self.flags.work_klass.assert_not_called()
@mock.patch('threading.Thread')
@mock.patch('selectors.DefaultSelector')
@mock.patch('socket.fromfd')
@mock.patch('proxy.core.acceptor.acceptor.recv_handle')
def test_accepts_client_from_server_socket(
self,
mock_recv_handle: mock.Mock,
mock_fromfd: mock.Mock,
mock_selector: mock.Mock,
mock_thread: mock.Mock,
) -> None:
fileno = 10
conn = mock.MagicMock()
addr = mock.MagicMock()
sock = mock_fromfd.return_value
mock_fromfd.return_value.accept.return_value = (conn, addr)
mock_recv_handle.return_value = fileno
self.pipe[1].recv.return_value = 1
mock_thread.return_value.start.side_effect = KeyboardInterrupt()
mock_key = mock.MagicMock()
type(mock_key).data = mock.PropertyMock(return_value=fileno)
selector = mock_selector.return_value
selector.select.return_value = [(mock_key, selectors.EVENT_READ)]
self.acceptor.run()
self.pipe[1].recv.assert_called_once()
selector.register.assert_called_with(
fileno, selectors.EVENT_READ, fileno,
)
selector.unregister.assert_called_with(fileno)
mock_recv_handle.assert_called_with(self.pipe[1])
mock_fromfd.assert_called_with(
fileno,
family=socket.AF_INET,
type=socket.SOCK_STREAM,
)
self.flags.work_klass.assert_called_with(
self.work_klass.create.return_value,
flags=self.flags,
event_queue=None,
upstream_conn_pool=None,
)
mock_thread.assert_called_with(
target=self.flags.work_klass.return_value.run,
)
mock_thread.return_value.start.assert_called()
sock.close.assert_called()
| 31.605042
| 86
| 0.634938
|
acfe0ff812b5b3dde0750f5a6653707ecd724916
| 1,747
|
py
|
Python
|
peering_manager/urls.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
peering_manager/urls.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
peering_manager/urls.py
|
amtypaldos/peering-manager
|
a5a90f108849874e9acaa6827552535fa250a60e
|
[
"Apache-2.0"
] | null | null | null |
"""peering_manager URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from . import views
handler500 = views.handle_500
__patterns = [
# Include the peering app
url(r'', include('peering.urls')),
# Include the peeringdb app
url(r'', include('peeringdb.urls')),
# Users login/logout
url(r'^login/$', views.LoginView.as_view(), name='login'),
url(r'^logout/$', views.LogoutView.as_view(), name='logout'),
# User profile, password, activity
url(r'^profile/$', views.ProfileView.as_view(), name='user_profile'),
url(r'^password/$', views.ChangePasswordView.as_view(),
name='user_change_password'),
url(r'^activity/$', views.RecentActivityView.as_view(), name='user_activity'),
# Home
url(r'^$', views.Home.as_view(), name='home'),
# Admin
url(r'^admin/', admin.site.urls),
# Error triggering
url(r'^error500/$', views.trigger_500),
]
# Prepend BASE_PATH
urlpatterns = [
url(r'^{}'.format(settings.BASE_PATH), include(__patterns))
]
| 30.12069
| 82
| 0.683457
|
acfe1060807119a56dd48c29ca3dbdf40dc2890e
| 3,120
|
py
|
Python
|
app/app/settings.py
|
chemscobra/recipe-app-api
|
4bbec7b12d345783c6a3222971b6743281e27198
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
chemscobra/recipe-app-api
|
4bbec7b12d345783c6a3222971b6743281e27198
|
[
"MIT"
] | null | null | null |
app/app/settings.py
|
chemscobra/recipe-app-api
|
4bbec7b12d345783c6a3222971b6743281e27198
|
[
"MIT"
] | null | null | null |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+y(z9$3tnc5u_mvch3iiac@m*llqi(55w&^_8vzx2=di)ntzrn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'core.User'
| 25.365854
| 91
| 0.695192
|
acfe11af5503fd9893d7f5f457c67bdc837283e0
| 40,160
|
py
|
Python
|
plnn_bounds/model.py
|
oval-group/decomposition-plnn-bounds
|
1f2548bf422a5c6ac235cfde2b6f467f850f65a1
|
[
"MIT"
] | 2
|
2021-02-15T13:59:40.000Z
|
2022-03-10T21:18:17.000Z
|
plnn_bounds/model.py
|
oval-group/decomposition-plnn-bounds
|
1f2548bf422a5c6ac235cfde2b6f467f850f65a1
|
[
"MIT"
] | null | null | null |
plnn_bounds/model.py
|
oval-group/decomposition-plnn-bounds
|
1f2548bf422a5c6ac235cfde2b6f467f850f65a1
|
[
"MIT"
] | 1
|
2021-03-22T01:20:31.000Z
|
2021-03-22T01:20:31.000Z
|
import math
import scipy.io
import torch
from collections import Counter, defaultdict
from plnn_bounds.modules import View, Flatten
from plnn_bounds.naive_approximation import NaiveNetwork
from torch import nn
def no_grad(mod):
for param in mod.parameters():
param.requires_grad = False
def cifar_model_large():
model = nn.Sequential(
nn.Conv2d(3, 32, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(32, 32, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(64, 64, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(64*8*8,512),
nn.ReLU(),
nn.Linear(512,512),
nn.ReLU(),
nn.Linear(512,10)
)
return model
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def cifar_model():
model = nn.Sequential(
nn.Conv2d(3, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*8*8,100),
nn.ReLU(),
nn.Linear(100, 10)
)
for m in model.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
return model
def load_adversarial_problem(filename, cls):
if filename.endswith('mini.pth'):
model = nn.Sequential(
nn.Conv2d(1, 4, 2, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(4, 8, 2, stride=2),
nn.ReLU(),
Flatten(),
nn.Linear(8*4*4,50),
nn.ReLU(),
nn.Linear(50,10),
)
model.load_state_dict(torch.load(filename)['state_dict'][0])
no_grad(model)
dataset = torch.load('./data/mini_mnist_test.pt')
elif filename.endswith('small.pth'):
model = nn.Sequential(
nn.Conv2d(1, 16, 4, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, 4, stride=2, padding=1),
nn.ReLU(),
Flatten(),
nn.Linear(32*7*7,100),
nn.ReLU(),
nn.Linear(100, 10)
)
model.load_state_dict(torch.load(filename)['state_dict'][0])
no_grad(model)
# from torchvision import datasets, transforms
# ds = datasets.MNIST('./data', train=True, download=True)
# train_ds = {'data': ds.train_data.unsqueeze(1).float()/255.0,
# 'labels': ds.train_labels}
# torch.save(train_ds, './data/mnist_train.pt')
# ds = datasets.MNIST('./data', train=False, download=True)
# test_ds = {'data': ds.test_data.unsqueeze(1).float() / 255.0,
# 'labels': ds.test_labels}
# torch.save(test_ds, './data/mnist_test.pt')
dataset = torch.load('./data/mnist_test.pt')
else:
raise NotImplementedError
data = dataset['data']
labels = dataset['labels']
sample = data[0].type(torch.Tensor().type())
label = int(labels[0])
adv_label = 0
if label == adv_label:
adv_label += 1
eps = 0.1
# Create the input domain to the verification
domain = torch.stack([torch.clamp(sample - eps, 0, None),
torch.clamp(sample + eps, None, 1.0)], -1)
# Adjust the convolutional bound so as to make it mono-objective, just for
# the target label.
layers = [lay for lay in model]
assert isinstance(layers[-1], nn.Linear)
old_last = layers[-1]
new_last = nn.Linear(old_last.in_features, 1)
no_grad(new_last)
new_last.weight.copy_(old_last.weight[label] - old_last.weight[adv_label])
new_last.bias.copy_(old_last.bias[label] - old_last.bias[adv_label])
layers[-1] = new_last
return cls(layers), domain
class AcasNetwork:
def __init__(self, rpx_infile):
readline = lambda: rpx_infile.readline().strip()
line = readline()
# Ignore the comments
while line.startswith('//'):
line = readline()
# Parse the dimensions
all_dims = [int(dim) for dim in line.split(',')
if dim != '']
self.nb_layers, self.input_size, \
self.output_size, self.max_lay_size = all_dims
# Get the layers size
line = readline()
self.nodes_in_layer = [int(l_size_str) for l_size_str in line.split(',')
if l_size_str != '']
assert(self.input_size == self.nodes_in_layer[0])
assert(self.output_size == self.nodes_in_layer[-1])
# Load the symmetric parameter
line = readline()
is_symmetric = int(line.split(',')[0]) != 0
# if symmetric == 1, enforce that psi (input[2]) is positive
# if to do so, it needs to be flipped, input[1] is also adjusted
# In practice, all the networks released with Reluxplex 1.0 have it as 0
# so we will just ignore it.
# Load Min/Max/Mean/Range values of inputs
line = readline()
self.inp_mins = [float(min_str) for min_str in line.split(',')
if min_str != '']
line = readline()
self.inp_maxs = [float(max_str) for max_str in line.split(',')
if max_str != '']
line = readline()
self.inpout_means = [float(mean_str) for mean_str in line.split(',')
if mean_str != '']
line = readline()
self.inpout_ranges = [float(range_str) for range_str in line.split(',')
if range_str != '']
assert(len(self.inp_mins) == len(self.inp_maxs))
assert(len(self.inpout_means) == len(self.inpout_ranges))
assert(len(self.inpout_means) == (len(self.inp_mins) + 1))
# Load the weights
self.parameters = []
for layer_idx in range(self.nb_layers):
# Gather weight matrix
weights = []
biases = []
for tgt_neuron in range(self.nodes_in_layer[layer_idx+1]):
line = readline()
to_neuron_weights = [float(wgt_str) for wgt_str in line.split(',')
if wgt_str != '']
assert(len(to_neuron_weights) == self.nodes_in_layer[layer_idx])
weights.append(to_neuron_weights)
for tgt_neuron in range(self.nodes_in_layer[layer_idx+1]):
line = readline()
neuron_biases = [float(bias_str) for bias_str in line.split(',')
if bias_str != '']
assert(len(neuron_biases) == 1)
biases.append(neuron_biases[0])
assert(len(weights) == len(biases))
self.parameters.append((weights, biases))
def write_rlv_file(self, rlv_outfile):
write_line = lambda x: rlv_outfile.write(x + '\n')
layers_var_name = []
# Write down all the inputs
inp_layer = []
for inp_idx in range(self.input_size):
new_var_name = f"in_{inp_idx}"
inp_layer.append(new_var_name)
write_line(f"Input {new_var_name}")
layers_var_name.append(inp_layer)
# Write down the rescaled version of the inputs
resc_inp_layer = []
for inp_idx in range(self.input_size):
new_var_name = f"resc_inX{inp_idx}"
resc_inp_layer.append(new_var_name)
scale = 1.0 / self.inpout_ranges[inp_idx]
bias = - scale * self.inpout_means[inp_idx]
prev_var = layers_var_name[-1][inp_idx]
write_line(f"Linear {new_var_name} {bias} {scale} {prev_var}")
layers_var_name.append(resc_inp_layer)
# Write down the linear/ReLU layers
for layer_idx in range(self.nb_layers):
lin_weights, bias = self.parameters[layer_idx]
layer_type = "Linear" if (layer_idx == self.nb_layers-1) else "ReLU"
name_prefix = "outnormed" if (layer_idx == self.nb_layers-1) else "relu"
prev_lay_vars = layers_var_name[-1]
nb_nodes_from = self.nodes_in_layer[layer_idx]
nb_nodes_to_write = self.nodes_in_layer[layer_idx+1]
assert(len(lin_weights) == nb_nodes_to_write)
assert(len(bias) == nb_nodes_to_write)
for node_weight in lin_weights:
assert(len(node_weight) == nb_nodes_from)
assert(len(node_weight) == len(prev_lay_vars))
relu_layer = []
for neur_idx in range(nb_nodes_to_write):
new_var_name = f"{name_prefix}_{layer_idx}X{neur_idx}"
node_line = f"{layer_type} {new_var_name}"
node_bias = bias[neur_idx]
node_line += f" {node_bias}"
for edge_weight, prev_var in zip(lin_weights[neur_idx],
prev_lay_vars):
node_line += f" {edge_weight} {prev_var}"
relu_layer.append(new_var_name)
write_line(node_line)
layers_var_name.append(relu_layer)
# Write down the output rescaling
unscaled_outvar = layers_var_name[-1]
assert(len(unscaled_outvar) == self.output_size)
# The means/ranges are given as:
# in0 in1 ... inLast out ??? ???
# There is a bunch of random variables at the end that are useless
output_bias = self.inpout_means[self.input_size]
output_scale = self.inpout_ranges[self.input_size]
out_vars = []
for out_idx in range(self.output_size):
new_var_name = f"out_{out_idx}"
prev_var = unscaled_outvar[out_idx]
out_vars.append(new_var_name)
write_line(f"Linear {new_var_name} {output_bias} {output_scale} {prev_var}")
layers_var_name.append(out_vars)
# Write down the constraints that we know
inp_vars = layers_var_name[0]
for inp_idx in range(self.input_size):
var_name = inp_vars[inp_idx]
# Min-constraint
min_val = self.inp_mins[inp_idx]
min_constr = f"Assert <= {min_val} 1.0 {var_name}"
write_line(min_constr)
# Max-constraint
max_val = self.inp_maxs[inp_idx]
max_constr = f"Assert >= {max_val} 1.0 {var_name}"
write_line(max_constr)
GE='>='
LE='<='
COMPS = [GE, LE]
def load_rlv(rlv_infile):
# This parser only makes really sense in the case where the network is a
# feedforward network, organised in layers. It's most certainly wrong in
# all the other situations.
# What we will return:
# -> The layers of a network in pytorch, corresponding to the network
# described in the .rlv
# -> An input domain on which the property should be proved
# -> A set of layers to stack on top of the network so as to transform
# the proof problem into a minimization problem.
readline = lambda: rlv_infile.readline().strip().split(' ')
all_layers = []
layer_type = []
nb_neuron_in_layer = Counter()
neuron_depth = {}
neuron_idx_in_layer = {}
weight_from_neuron = defaultdict(dict)
pool_parents = {}
bias_on_neuron = {}
network_depth = 0
input_domain = []
to_prove = []
while True:
line = readline()
if line[0] == '':
break
if line[0] == "Input":
n_name = line[1]
n_depth = 0
neuron_depth[n_name] = n_depth
if n_depth >= len(all_layers):
all_layers.append([])
layer_type.append("Input")
all_layers[n_depth].append(n_name)
neuron_idx_in_layer[n_name] = nb_neuron_in_layer[n_depth]
nb_neuron_in_layer[n_depth] += 1
input_domain.append((-float('inf'), float('inf')))
elif line[0] in ["Linear", "ReLU"]:
n_name = line[1]
n_bias = line[2]
parents = [(line[i], line[i+1]) for i in range(3, len(line), 2)]
deduced_depth = [neuron_depth[parent_name] + 1
for (_, parent_name) in parents]
# Check that all the deduced depth are the same. This wouldn't be
# the case for a ResNet type network but let's say we don't support
# it for now :)
for d in deduced_depth:
assert d == deduced_depth[0], "Non Supported architecture"
# If we are here, the deduced depth is probably correct
n_depth = deduced_depth[0]
neuron_depth[n_name] = n_depth
if n_depth >= len(all_layers):
# This is the first Neuron that we see of this layer
all_layers.append([])
layer_type.append(line[0])
network_depth = n_depth
else:
# This is not the first neuron of this layer, let's make sure
# the layer type is consistent
assert line[0] == layer_type[n_depth]
all_layers[n_depth].append(n_name)
neuron_idx_in_layer[n_name] = nb_neuron_in_layer[n_depth]
nb_neuron_in_layer[n_depth] += 1
for weight_from_parent, parent_name in parents:
weight_from_neuron[parent_name][n_name] = float(weight_from_parent)
bias_on_neuron[n_name] = float(n_bias)
elif line[0] == "Assert":
# Ignore for now that there is some assert,
# I'll figure out later how to deal with them
ineq_symb = line[1]
assert ineq_symb in COMPS
off = float(line[2])
parents = [(float(line[i]), line[i+1])
for i in range(3, len(line), 2)]
if len(parents) == 1:
# This is a constraint on a single variable, probably a simple bound.
p_name = parents[0][1]
depth = neuron_depth[p_name]
pos_in_layer = neuron_idx_in_layer[p_name]
weight = parents[0][0]
# Normalise things a bit
if weight < 0:
off = -off
weight = -weight
ineq_symb = LE if ineq_symb == GE else GE
if weight != 1:
off = off / weight
weight = 1
if depth == 0:
# This is a limiting bound on the input, let's update the
# domain
known_bounds = input_domain[pos_in_layer]
if ineq_symb == GE:
# The offset needs to be greater or equal than the
# value, this is an upper bound
new_bounds = (known_bounds[0], min(off, known_bounds[1]))
else:
# The offset needs to be less or equal than the value
# so this is a lower bound
new_bounds = (max(off, known_bounds[0]), known_bounds[1])
input_domain[pos_in_layer] = new_bounds
elif depth == network_depth:
# If this is not on the input layer, this should be on the
# output layer. Imposing constraints on inner-hidden units
# is not supported for now.
to_prove.append(([(1.0, pos_in_layer)], off, ineq_symb))
else:
raise Exception(f"Can't handle this line: {line}")
else:
parents_depth = [neuron_depth[parent_name] for _, parent_name in parents]
assert all(network_depth == pdepth for pdepth in parents_depth), \
"Only linear constraints on the output have been implemented."
art_weights = [(weight, neuron_idx_in_layer[parent_name])
for (weight, parent_name) in parents]
to_prove.append((art_weights, off, ineq_symb))
elif line[0] == "MaxPool":
n_name = line[1]
parents = line[2:]
deduced_depth = [neuron_depth[parent_name] + 1
for parent_name in parents]
# Check that all the deduced depth are the same. This wouldn't be
# the case for a ResNet type network but let's say we don't support
# it for now :)
for d in deduced_depth:
assert d == deduced_depth[0], "Non Supported architecture"
# If we are here, the deduced depth is probably correct
n_depth = deduced_depth[0]
if n_depth >= len(all_layers):
# This is the first Neuron that we see of this layer
all_layers.append([])
layer_type.append(line[0])
else:
# This is not the first neuron of this layer, let's make sure
# the layer type is consistent
assert line[0] == layer_type[n_depth]
all_layers[n_depth].append(n_name)
neuron_idx_in_layer[n_name] = nb_neuron_in_layer[n_depth]
nb_neuron_in_layer[n_depth] += 1
neuron_depth[n_name] = n_depth
pool_parents[n_name] = parents
else:
print("Unknown start of line.")
raise NotImplementedError
# Check that we have a properly defined input domain
for var_bounds in input_domain:
assert not math.isinf(var_bounds[0]), "No lower bound for one of the variable"
assert not math.isinf(var_bounds[1]), "No upper bound for one of the variable"
assert var_bounds[1] >= var_bounds[0], "No feasible value for one variable"
# TODO maybe: If we have a constraint that is an equality exactly, it might
# be worth it to deal with this better than just representing it by two
# inequality constraints. A solution might be to just modify the network so
# that it takes one less input, and to fold the contribution into the bias.
# Note that property 4 of Reluplex is such a property.
# Construct the network layers
net_layers = []
nb_layers = len(all_layers) - 1
for from_lay_idx in range(nb_layers):
to_lay_idx = from_lay_idx + 1
l_type = layer_type[to_lay_idx]
nb_from = len(all_layers[from_lay_idx])
nb_to = len(all_layers[to_lay_idx])
if l_type in ["Linear", "ReLU"]:
# If it's linear or ReLU, we're going to get a nn.Linear to
# represent the Linear part, and eventually a nn.ReLU if necessary
new_layer = torch.nn.Linear(nb_from, nb_to, bias=True)
lin_weight = new_layer.weight.data
# nb_to x nb_from
bias = new_layer.bias.data
# nb_to
lin_weight.zero_()
bias.zero_()
for from_idx, from_name in enumerate(all_layers[from_lay_idx]):
weight_from = weight_from_neuron[from_name]
for to_name, weight_value in weight_from.items():
to_idx = neuron_idx_in_layer[to_name]
lin_weight[to_idx, from_idx] = weight_value
for to_idx, to_name in enumerate(all_layers[to_lay_idx]):
bias_value = bias_on_neuron[to_name]
bias[to_idx] = bias_value
net_layers.append(new_layer)
if l_type == "ReLU":
net_layers.append(torch.nn.ReLU())
elif l_type == "MaxPool":
# We need to identify what kind of MaxPooling we are
# considering.
# Not sure how robust this really is though :/
pool_dims_estimated = []
first_index = []
nb_parents = []
for to_idx, to_name in enumerate(all_layers[to_lay_idx]):
parents = pool_parents[to_name]
parents_idx = [neuron_idx_in_layer[p_name]
for p_name in parents]
# Let's try to identify the pattern for the max_pooling
off_with_prev = [parents_idx[i+1] - parents_idx[i]
for i in range(len(parents_idx)-1)]
diff_offsets = set(off_with_prev)
# The number of differents offset should mostly correspond to
# the number of dimensions of the pooling operation, maybe???
pool_dims_estimated.append(len(diff_offsets))
nb_parents.append(len(parents_idx))
first_index.append(parents_idx[0])
assert all(pde == pool_dims_estimated[0]
for pde in pool_dims_estimated), "Can't identify pooling dim"
assert all(p_nb == nb_parents[0]
for p_nb in nb_parents), "Can't identify the kernel size"
# Can we identify a constant stride?
stride_candidates = [first_index[i+1] - first_index[i]
for i in range(len(first_index)-1)]
assert all(sc == stride_candidates[0]
for sc in stride_candidates), "Can't identify stride."
pool_dim = pool_dims_estimated[0]
stride = stride_candidates[0]
kernel_size = nb_parents[0]
if pool_dim == 1:
net_layers.append(View((1, nb_neuron_in_layer[from_lay_idx])))
net_layers.append(torch.nn.MaxPool1d(kernel_size,
stride=stride))
net_layers.append(View((nb_neuron_in_layer[to_lay_idx],)))
else:
raise Exception("Not implemented yet")
else:
raise Exception("Not implemented")
# The .rlv files contains the specifications that we need to satisfy for
# obtaining a counterexample
# We will add extra layers on top that will makes it so that finding the
# minimum of the resulting network is equivalent to performing the proof.
# The way we do it:
# -> For each constraint, we transform it into a canonical representation
# `offset GreaterOrEqual linear_fun`
# -> Create a new neuron with a value of `linear_fun - offset`
# -> If this neuron is negative, this constraint is satisfied
# -> We add a Max over all of these constraint outputs.
# If the output of the max is negative, that means that all of the
# constraints have been satisfied and therefore we have a counterexample
# So, when we minimize this network,
# * if we obtain a negative minimum,
# -> We have a counterexample
# * if we obtain a positive minimum,
# -> There is no input which gives a negative value, and therefore no
# counterexamples
prop_layers = []
## Add the linear to compute the value of each constraint
nb_final = len(all_layers[network_depth])
nb_constr = len(to_prove)
constr_val_layer = torch.nn.Linear(nb_final, nb_constr, bias=True)
constr_weight = constr_val_layer.weight.data
# nb_to x nb_from
constr_bias = constr_val_layer.bias.data
# nb_to
constr_weight.zero_()
constr_bias.zero_()
for constr_idx, out_constr in enumerate(to_prove):
art_weights, off, ineq_symb = out_constr
if ineq_symb == LE:
# Flip all the weights and the offset, and flip the LE to a GE
art_weights = [(-weight, idx) for weight, idx in art_weights]
off = - off
ineq_symb = GE
constr_bias[constr_idx] = -off
for w, parent_idx in art_weights:
constr_weight[constr_idx, parent_idx] = w
prop_layers.append(constr_val_layer)
## Add a Maxpooling layer
# We take a max over all the element
nb_elt = nb_constr
kernel_size = nb_constr
prop_layers.append(View((1, nb_elt)))
prop_layers.append(torch.nn.MaxPool1d(kernel_size))
prop_layers.append(View((1,)))
# Make input_domain into a Tensor
input_domain = torch.Tensor(input_domain)
return net_layers, input_domain, prop_layers
def simplify_network(all_layers):
'''
Given a sequence of Pytorch nn.Module `all_layers`,
representing a feed-forward neural network,
merge the layers when two sucessive modules are nn.Linear
and can therefore be equivalenty computed as a single nn.Linear
'''
new_all_layers = [all_layers[0]]
for layer in all_layers[1:]:
if (type(layer) is nn.Linear) and (type(new_all_layers[-1]) is nn.Linear):
# We can fold together those two layers
prev_layer = new_all_layers.pop()
joint_weight = torch.mm(layer.weight.data, prev_layer.weight.data)
if prev_layer.bias is not None:
joint_bias = layer.bias.data + torch.mv(layer.weight.data, prev_layer.bias.data)
else:
joint_bias = layer.bias.data
joint_out_features = layer.out_features
joint_in_features = prev_layer.in_features
joint_layer = nn.Linear(joint_in_features, joint_out_features)
joint_layer.bias.data.copy_(joint_bias)
joint_layer.weight.data.copy_(joint_weight)
new_all_layers.append(joint_layer)
elif (type(layer) is nn.MaxPool1d) and (layer.kernel_size == 1) and (layer.stride == 1):
# This is just a spurious Maxpooling because the kernel_size is 1
# We will do nothing
pass
elif (type(layer) is View) and (type(new_all_layers[-1]) is View):
# No point in viewing twice in a row
del new_all_layers[-1]
# Figure out what was the last thing that imposed a shape
# and if this shape was the proper one.
prev_layer_idx = -1
lay_nb_dim_inp = 0
while True:
parent_lay = new_all_layers[prev_layer_idx]
prev_layer_idx -= 1
if type(parent_lay) is nn.ReLU:
# Can't say anything, ReLU is flexible in dimension
continue
elif type(parent_lay) is nn.Linear:
lay_nb_dim_inp = 1
break
elif type(parent_lay) is nn.MaxPool1d:
lay_nb_dim_inp = 2
break
else:
raise NotImplementedError
if len(layer.out_shape) != lay_nb_dim_inp:
# If the View is actually necessary, add the change
new_all_layers.append(layer)
# Otherwise do nothing
else:
new_all_layers.append(layer)
return new_all_layers
def load_and_simplify(rlv_file, net_cls):
'''
Take as argument a .rlv file `rlv_file`,
loads the corresponding network and its property,
simplify it and instantiate it as an object with the `net_cls` class
Returns the `net_cls` object and the domain of the proof
'''
net_layers, domain, prop_layers = load_rlv(rlv_file)
all_layers = net_layers + prop_layers
all_layers = simplify_network(all_layers)
network = net_cls(all_layers)
return network, domain
def load_mat_network(mat_file):
'''
Take as argument the path to a matlab file,
loads the network and return its layers.
'''
weights = scipy.io.loadmat(mat_file)
all_weight_keys = sorted(key for key in weights.keys() if 'weight' in key)
all_bias_keys = sorted(key for key in weights.keys() if 'bias' in key)
all_layers = []
for w_key, b_key in zip(all_weight_keys, all_bias_keys):
linear_weight = weights[w_key]
linear_bias = weights[b_key]
feat_from, feat_to = linear_weight.shape
new_linear = nn.Linear(feat_from, feat_to, bias=True)
new_linear.weight.data.copy_(torch.FloatTensor(linear_weight.T))
new_linear.bias.data.copy_(torch.FloatTensor(linear_bias))
all_layers.append(new_linear)
all_layers.append(nn.ReLU())
# Remove the extra ReLU at the end
del all_layers[-1]
return all_layers
def reluify_maxpool(layers, domain):
'''
Remove all the Maxpool units of a feedforward network represented by
`layers` and replace them by an equivalent combination of ReLU + Linear
This is only valid over the domain `domain` because we use some knowledge
about upper and lower bounds of certain neurons
'''
naive_net = NaiveNetwork(layers)
naive_net.do_interval_analysis(domain)
lbs = naive_net.lower_bounds
layers = layers[:]
new_all_layers = []
idx_of_inp_lbs = 0
layer_idx = 0
while layer_idx < len(layers):
layer = layers[layer_idx]
if type(layer) is nn.MaxPool1d:
# We need to decompose this MaxPool until it only has a size of 2
assert layer.padding == 0
assert layer.dilation == 1
if layer.kernel_size > 2:
assert layer.kernel_size % 2 == 0, "Not supported yet"
assert layer.stride % 2 == 0, "Not supported yet"
# We're going to decompose this maxpooling into two maxpooling
# max( in_1, in_2 , in_3, in_4)
# will become
# max( max(in_1, in_2), max(in_3, in_4))
first_mp = nn.MaxPool1d(2, stride=2)
second_mp = nn.MaxPool1d(layer.kernel_size // 2,
stride=layer.stride // 2)
# We will replace the Maxpooling that was originally there with
# those two layers
# We need to add a corresponding layer of lower bounds
first_lbs = lbs[idx_of_inp_lbs]
intermediate_lbs = []
for pair_idx in range(len(first_lbs) // 2):
intermediate_lbs.append(max(first_lbs[2*pair_idx],
first_lbs[2*pair_idx+1]))
# Do the replacement
del layers[layer_idx]
layers.insert(layer_idx, first_mp)
layers.insert(layer_idx+1, second_mp)
lbs.insert(idx_of_inp_lbs+1, intermediate_lbs)
# Now continue so that we re-go through the loop with the now
# simplified maxpool
continue
elif layer.kernel_size == 2:
# Each pair need two in the intermediate layers that is going
# to be Relu-ified
pre_nb_inp_lin = len(lbs[idx_of_inp_lbs])
# How many starting position can we fit in?
# 1 + how many stride we can fit before we're too late in the array to fit a kernel_size
pre_nb_out_lin = (1 + ((pre_nb_inp_lin - layer.kernel_size) // layer.stride)) * 2
pre_relu_lin = nn.Linear(pre_nb_inp_lin, pre_nb_out_lin, bias=True)
pre_relu_weight = pre_relu_lin.weight.data
pre_relu_bias = pre_relu_lin.bias.data
pre_relu_weight.zero_()
pre_relu_bias.zero_()
# For each of (x, y) that needs to be transformed to max(x, y)
# We create (x-y, y-y_lb)
first_in_index = 0
first_out_index = 0
while first_in_index + 1 < pre_nb_inp_lin:
pre_relu_weight[first_out_index, first_in_index] = 1
pre_relu_weight[first_out_index, first_in_index+1] = -1
pre_relu_weight[first_out_index+1, first_in_index+1] = 1
pre_relu_bias[first_out_index+1] = -lbs[idx_of_inp_lbs][first_in_index + 1]
# Now shift
first_in_index += layer.stride
first_out_index += 2
new_all_layers.append(pre_relu_lin)
new_all_layers.append(nn.ReLU())
# We now need to create the second layer
# It will sum [max(x-y, 0)], [max(y - y_lb, 0)] and y_lb
post_nb_inp_lin = pre_nb_out_lin
post_nb_out_lin = post_nb_inp_lin // 2
post_relu_lin = nn.Linear(post_nb_inp_lin, post_nb_out_lin)
post_relu_weight = post_relu_lin.weight.data
post_relu_bias = post_relu_lin.bias.data
post_relu_weight.zero_()
post_relu_bias.zero_()
first_in_index = 0
out_index = 0
while first_in_index + 1 < post_nb_inp_lin:
post_relu_weight[out_index, first_in_index] = 1
post_relu_weight[out_index, first_in_index+1] = 1
post_relu_bias[out_index] = lbs[idx_of_inp_lbs][layer.stride*out_index+1]
first_in_index += 2
out_index += 1
new_all_layers.append(post_relu_lin)
idx_of_inp_lbs += 1
else:
# This should have been cleaned up in one of the simplify passes
raise NotImplementedError
elif type(layer) is nn.Linear:
new_all_layers.append(layer)
idx_of_inp_lbs += 1
elif type(layer) is nn.ReLU:
new_all_layers.append(layer)
elif type(layer) is View:
# We shouldn't add the view as we are getting rid of them
pass
layer_idx += 1
return new_all_layers
def dump_rlv(rlv_outfile, layers, domain, transform_maxpool=False):
'''
Dump the networks represented by the series of `layers`
into the `rlv_outfile` file.
If `transform_maxpool` is set to True, replace the Maxpool layer
by a combination of ReLUs
'''
writeline = lambda x: rlv_outfile.write(x + '\n')
if transform_maxpool:
new_layers = simplify_network(layers)
new_layers = reluify_maxpool(new_layers, domain)
new_layers = simplify_network(new_layers)
max_net = nn.Sequential(*layers)
relu_net = nn.Sequential(*new_layers)
assert_network_equivalence(max_net, relu_net, domain)
layers = new_layers
var_names = []
# Define the input
inp_layer_var_names = []
for inp_idx, (inp_lb, inp_ub) in enumerate(domain):
var_name = f"inX{inp_idx}"
writeline(f"Input {var_name}")
writeline(f"Assert <= {inp_lb} 1.0 {var_name}")
writeline(f"Assert >= {inp_ub} 1.0 {var_name}")
inp_layer_var_names.append(var_name)
var_names.append(inp_layer_var_names)
layer_idx = 0
out_layer_idx = 1
while layer_idx < len(layers):
layer = layers[layer_idx]
new_layer_var_names = []
if type(layer) is nn.Linear:
# Should we write it as a Linear or as a ReLU?
is_relu = False
# If the next layer is a ReLU, write it as ReLU
# Otherwise, as Linear
if (layer_idx + 1 < len(layers)) and (type(layers[layer_idx+1]) is nn.ReLU):
is_relu = True
line_header = "ReLU" if is_relu else "Linear"
var_pattern = "relu" if is_relu else "linear"
prev_var_names = var_names[-1]
for out_n_idx in range(layer.out_features):
var_name = f"{var_pattern}_{out_layer_idx}-{out_n_idx}"
bias = layer.bias.data[out_n_idx]
weight_str = " ".join([f"{w} {pre_var}" for w, pre_var
in zip(layer.weight.data[out_n_idx, :],
prev_var_names)])
writeline(f"{line_header} {var_name} {bias} {weight_str}")
new_layer_var_names.append(var_name)
out_layer_idx += 1
var_names.append(new_layer_var_names)
elif type(layer) is nn.ReLU:
assert layer_idx > 0, "A ReLU is the first layer, that's weird"
assert type(layers[layer_idx-1]) is nn.Linear, "There was no linear before this ReLU, this script might be wrong in this case"
elif type(layer) is View:
pass
elif type(layer) is nn.MaxPool1d:
assert not transform_maxpool
else:
raise NotImplementedError
layer_idx += 1
# Given that we have standardized the property to amount to
# Prove that the output is less than zero,
writeline(f"Assert >= 0.0 1.0 {var_names[-1][0]}")
def dump_nnet(nnet_outfile, layers, domain):
'''
Dump the networks represented by the series of `layers`
into the `nnet_outfile` file.
This is a valid dump only on the domain `domain`, because
we use some knowledge about bounds on the value of some neurons
to guarantee that we are passing the ReLU.
'''
writeline = lambda x: nnet_outfile.write(x + '\n')
make_comma_separated_line = lambda tab: ",".join(map(str, tab))+","
new_layers = simplify_network(layers)
new_layers = reluify_maxpool(new_layers, domain)
new_layers = simplify_network(new_layers)
max_net = nn.Sequential(*layers)
relu_net = nn.Sequential(*new_layers)
assert_network_equivalence(max_net, relu_net, domain)
layers = new_layers
var_names = []
# Global parameters of the networks
nb_layers = 0
max_lay_size = 0
for layer in layers:
if type(layer) is nn.Linear:
nb_layers += 1
max_lay_size = max(max_lay_size, layer.out_features)
nb_input = layers[0].in_features
output_size = 1
writeline(f"{nb_layers},{nb_input},{output_size},{max_lay_size},")
# Layer sizes
layer_sizes = [nb_input]
for layer in layers:
if type(layer) is nn.Linear:
layer_sizes.append(layer.out_features)
layer_size_str = ",".join(map(str, layer_sizes))
writeline(make_comma_separated_line(layer_sizes))
# Symmetric parameter
writeline("0")
# Write down the mins of the input of the network
inp_lbs = domain[:, 0]
writeline(make_comma_separated_line(inp_lbs))
# Write down the maxes of the input of the network
inp_ubs = domain[:, 1]
writeline(make_comma_separated_line(inp_ubs))
# Write down the mean of the input of the network.
# We're not going to do any conditioning
# Note that there is one additional that is for the output
writeline(make_comma_separated_line([0]*(nb_input+1)))
# Write down the ranges of the input of the network
# We're not going to do any conditioning
# Note that there is one additional that is for the output
writeline(make_comma_separated_line([1]*(nb_input+1)))
for layer in layers:
if type(layer) is not nn.Linear:
# The ReLU is implicit, and we have removed all the linear layers
continue
# Write the weight coming to each neuron
for neuron_out_idx in range(layer.out_features):
to_neuron_weight = layer.weight.data[neuron_out_idx, :]
writeline(make_comma_separated_line(to_neuron_weight))
# Write the bias for each neuron
for neuron_out_idx in range(layer.out_features):
neuron_bias = layer.bias.data[neuron_out_idx]
writeline(f"{neuron_bias},")
def assert_network_equivalence(net1, net2, domain):
nb_samples = 1024 * 1024
nb_inp = domain.size(0)
rand_samples = torch.Tensor(nb_samples, nb_inp)
rand_samples.uniform_(0, 1)
domain_lb = domain.select(1, 0).contiguous()
domain_ub = domain.select(1, 1).contiguous()
domain_width = domain_ub - domain_lb
domain_lb = domain_lb.view(1, nb_inp).expand(nb_samples, nb_inp)
domain_width = domain_width.view(1, nb_inp).expand(nb_samples, nb_inp)
inps = domain_lb + domain_width * rand_samples
with torch.no_grad():
net1_out = net1(inps)
net2_out = net2(inps)
diff = net1_out - net2_out
max_diff = torch.abs(diff).max()
assert max_diff <= 1e-8, "The network rewrite is incorrect"
| 40.483871
| 138
| 0.588894
|
acfe1285b62dc33d6791a737787462bddadb481e
| 4,154
|
py
|
Python
|
private/templates/default/menus.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | 1
|
2019-08-20T16:32:33.000Z
|
2019-08-20T16:32:33.000Z
|
private/templates/default/menus.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
private/templates/default/menus.py
|
andygimma/eden
|
716d5e11ec0030493b582fa67d6f1c35de0af50d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from gluon import *
from s3 import *
from eden.layouts import *
try:
from .layouts import *
except ImportError:
pass
import eden.menus as default
# Below is an example which you can base your own template's menus.py on
# - there are also other examples in the other templates folders
# =============================================================================
#class S3MainMenu(default.S3MainMenu):
#"""
#Custom Application Main Menu:
#The main menu consists of several sub-menus, each of which can
#be customized separately as a method of this class. The overall
#composition of the menu is defined in the menu() method, which can
#be customized as well:
#Function Sub-Menu Access to (standard)
#menu_modules() the modules menu the Eden modules
#menu_gis() the GIS menu GIS configurations
#menu_admin() the Admin menu System/User Administration
#menu_lang() the Language menu Selection of the GUI locale
#menu_auth() the User menu Login, Logout, User Profile
#menu_help() the Help menu Contact page, About page
#The standard uses the MM layout class for main menu items - but you
#can of course use a custom layout class which you define in layouts.py.
#Additional sub-menus can simply be defined as additional functions in
#this class, and then be included in the menu() method.
#Each sub-menu function returns a list of menu items, only the menu()
#function must return a layout class instance.
#"""
# -------------------------------------------------------------------------
#@classmethod
#def menu(cls):
#""" Compose Menu """
#main_menu = MM()(
## Modules-menu, align-left
#cls.menu_modules(),
## Service menus, align-right
## Note: always define right-hand items in reverse order!
#cls.menu_help(right=True),
#cls.menu_auth(right=True),
#cls.menu_lang(right=True),
#cls.menu_admin(right=True),
#cls.menu_gis(right=True)
#)
#return main_menu
# -------------------------------------------------------------------------
#@classmethod
#def menu_modules(cls):
#""" Custom Modules Menu """
#return [
#homepage(),
#homepage("gis"),
#homepage("pr")(
#MM("Persons", f="person"),
#MM("Groups", f="group")
#),
#MM("more", link=False)(
#homepage("dvi"),
#homepage("irs")
#),
#]
# =============================================================================
#class S3OptionsMenu(default.S3OptionsMenu):
#"""
#Custom Controller Menus
#The options menu (left-hand options menu) is individual for each
#controller, so each controller has its own options menu function
#in this class.
#Each of these option menu functions can be customized separately,
#by simply overriding (re-defining) the default function. The
#options menu function must return an instance of the item layout.
#The standard menu uses the M item layout class, but you can of
#course also use any other layout class which you define in
#layouts.py (can also be mixed).
#Make sure additional helper functions in this class don't match
#any current or future controller prefix (e.g. by using an
#underscore prefix).
#"""
#def cr(self):
#""" CR / Shelter Registry """
#return M(c="cr")(
#M("Camp", f="shelter")(
#M("New", m="create"),
#M("List All"),
#M("Map", m="map"),
#M("Import", m="import"),
#)
#)
# END =========================================================================
| 35.504274
| 80
| 0.508907
|
acfe13d9a581b54b7a8c619bc799efc1defa3f05
| 12,996
|
py
|
Python
|
tests/unit/states/test_saltmod.py
|
yutiansut/salt
|
e96c0fa13a3d977f6bfa9ccb56b7e45534f78666
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/states/test_saltmod.py
|
yutiansut/salt
|
e96c0fa13a3d977f6bfa9ccb56b7e45534f78666
|
[
"Apache-2.0"
] | 1
|
2021-08-16T13:42:35.000Z
|
2021-08-16T13:42:35.000Z
|
tests/unit/states/test_saltmod.py
|
yutiansut/salt
|
e96c0fa13a3d977f6bfa9ccb56b7e45534f78666
|
[
"Apache-2.0"
] | 2
|
2021-05-21T06:31:03.000Z
|
2021-05-24T04:14:59.000Z
|
# -*- coding: utf-8 -*-
'''
:codeauthor: Jayesh Kariya <jayeshk@saltstack.com>
'''
# Import Python libs
from __future__ import absolute_import, unicode_literals, print_function
import os
import time
import tempfile
# Import Salt Testing Libs
from tests.support.runtests import RUNTIME_VARS
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import skipIf, TestCase
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
# Import Salt Libs
import salt.config
import salt.loader
import salt.utils.jid
import salt.utils.event
import salt.states.saltmod as saltmod
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SaltmodTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.states.saltmod
'''
def setup_loader_modules(self):
utils = salt.loader.utils(
salt.config.DEFAULT_MINION_OPTS.copy(),
whitelist=['state']
)
return {
saltmod: {
'__env__': 'base',
'__opts__': {
'__role': 'master',
'file_client': 'remote',
'sock_dir': tempfile.mkdtemp(dir=RUNTIME_VARS.TMP),
'transport': 'tcp'
},
'__salt__': {'saltutil.cmd': MagicMock()},
'__orchestration_jid__': salt.utils.jid.gen_jid({}),
'__utils__': utils,
}
}
# 'state' function tests: 1
def test_state(self):
'''
Test to invoke a state run on a given target
'''
name = 'state'
tgt = 'minion1'
comt = ('Passed invalid value for \'allow_fail\', must be an int')
ret = {'name': name,
'changes': {},
'result': False,
'comment': comt}
test_ret = {'name': name,
'changes': {},
'result': True,
'comment': 'States ran successfully.'
}
test_batch_return = {
'minion1': {
'ret': {
'test_|-notify_me_|-this is a name_|-show_notification': {
'comment': 'Notify me',
'name': 'this is a name',
'start_time': '10:43:41.487565',
'result': True,
'duration': 0.35,
'__run_num__': 0,
'__sls__': 'demo',
'changes': {},
'__id__': 'notify_me'
},
'retcode': 0
},
'out': 'highstate'
},
'minion2': {
'ret': {
'test_|-notify_me_|-this is a name_|-show_notification': {
'comment': 'Notify me',
'name': 'this is a name',
'start_time': '10:43:41.487565',
'result': True,
'duration': 0.35,
'__run_num__': 0,
'__sls__': 'demo',
'changes': {},
'__id__': 'notify_me'
},
'retcode': 0
},
'out': 'highstate'
},
'minion3': {
'ret': {
'test_|-notify_me_|-this is a name_|-show_notification': {
'comment': 'Notify me',
'name': 'this is a name',
'start_time': '10:43:41.487565',
'result': True,
'duration': 0.35,
'__run_num__': 0,
'__sls__': 'demo',
'changes': {},
'__id__': 'notify_me'
},
'retcode': 0
},
'out': 'highstate'
}
}
self.assertDictEqual(saltmod.state(name, tgt, allow_fail='a'), ret)
comt = ('No highstate or sls specified, no execution made')
ret.update({'comment': comt})
self.assertDictEqual(saltmod.state(name, tgt), ret)
comt = ("Must pass in boolean for value of 'concurrent'")
ret.update({'comment': comt})
self.assertDictEqual(saltmod.state(name, tgt, highstate=True,
concurrent='a'), ret)
ret.update({'comment': comt, 'result': None})
with patch.dict(saltmod.__opts__, {'test': True}):
self.assertDictEqual(saltmod.state(name, tgt, highstate=True), test_ret)
ret.update({'comment': 'States ran successfully. No changes made to silver.', 'result': True, '__jid__': '20170406104341210934'})
with patch.dict(saltmod.__opts__, {'test': False}):
mock = MagicMock(return_value={'silver': {'jid': '20170406104341210934', 'retcode': 0, 'ret': {'test_|-notify_me_|-this is a name_|-show_notification': {'comment': 'Notify me', 'name': 'this is a name', 'start_time': '10:43:41.487565', 'result': True, 'duration': 0.35, '__run_num__': 0, '__sls__': 'demo', 'changes': {}, '__id__': 'notify_me'}}, 'out': 'highstate'}})
with patch.dict(saltmod.__salt__, {'saltutil.cmd': mock}):
self.assertDictEqual(saltmod.state(name, tgt, highstate=True), ret)
ret.update({'comment': 'States ran successfully. No changes made to minion1, minion3, minion2.'})
del ret['__jid__']
with patch.dict(saltmod.__opts__, {'test': False}):
with patch.dict(saltmod.__salt__, {'saltutil.cmd': MagicMock(return_value=test_batch_return)}):
state_run = saltmod.state(name, tgt, highstate=True)
# Test return without checking the comment contents. Comments are tested later.
comment = state_run.pop('comment')
ret.pop('comment')
self.assertDictEqual(state_run, ret)
# Check the comment contents in a non-order specific way (ordering fails sometimes on PY3)
self.assertIn('States ran successfully. No changes made to', comment)
for minion in ['minion1', 'minion2', 'minion3']:
self.assertIn(minion, comment)
# 'function' function tests: 1
def test_function(self):
'''
Test to execute a single module function on a remote
minion via salt or salt-ssh
'''
name = 'state'
tgt = 'larry'
ret = {'name': name,
'changes': {},
'result': None,
'comment': 'Function state would be executed '
'on target {0}'.format(tgt)}
with patch.dict(saltmod.__opts__, {'test': True}):
self.assertDictEqual(saltmod.function(name, tgt), ret)
ret.update({'result': True,
'changes': {'out': 'highstate', 'ret': {tgt: ''}},
'comment': 'Function ran successfully.'
' Function state ran on {0}.'.format(tgt)})
with patch.dict(saltmod.__opts__, {'test': False}):
mock_ret = {'larry': {'ret': '', 'retcode': 0, 'failed': False}}
mock_cmd = MagicMock(return_value=mock_ret)
with patch.dict(saltmod.__salt__, {'saltutil.cmd': mock_cmd}):
self.assertDictEqual(saltmod.function(name, tgt), ret)
# 'wait_for_event' function tests: 1
def test_wait_for_event(self):
'''
Test to watch Salt's event bus and block until a condition is met
'''
name = 'state'
tgt = 'minion1'
comt = ('Timeout value reached.')
ret = {'name': name,
'changes': {},
'result': False,
'comment': comt}
class Mockevent(object):
'''
Mock event class
'''
flag = None
def __init__(self):
self.full = None
def get_event(self, full):
'''
Mock get_event method
'''
self.full = full
if self.flag:
return {'tag': name, 'data': {}}
return None
with patch.object(salt.utils.event, 'get_event',
MagicMock(return_value=Mockevent())):
with patch.dict(saltmod.__opts__, {'sock_dir': True,
'transport': True}):
with patch.object(time, 'time', MagicMock(return_value=1.0)):
self.assertDictEqual(saltmod.wait_for_event(name, 'salt',
timeout=-1.0),
ret)
Mockevent.flag = True
ret.update({'comment': 'All events seen in 0.0 seconds.',
'result': True})
self.assertDictEqual(saltmod.wait_for_event(name, ''), ret)
ret.update({'comment': 'Timeout value reached.',
'result': False})
self.assertDictEqual(saltmod.wait_for_event(name, tgt,
timeout=-1.0),
ret)
# 'runner' function tests: 1
def test_runner(self):
'''
Test to execute a runner module on the master
'''
name = 'state'
ret = {'changes': {'return': True}, 'name': 'state', 'result': True,
'comment': 'Runner function \'state\' executed.',
'__orchestration__': True}
runner_mock = MagicMock(return_value={'return': True})
with patch.dict(saltmod.__salt__, {'saltutil.runner': runner_mock}):
self.assertDictEqual(saltmod.runner(name), ret)
# 'wheel' function tests: 1
def test_wheel(self):
'''
Test to execute a wheel module on the master
'''
name = 'state'
ret = {'changes': {'return': True}, 'name': 'state', 'result': True,
'comment': 'Wheel function \'state\' executed.',
'__orchestration__': True}
wheel_mock = MagicMock(return_value={'return': True})
with patch.dict(saltmod.__salt__, {'saltutil.wheel': wheel_mock}):
self.assertDictEqual(saltmod.wheel(name), ret)
def test_state_ssh(self):
'''
Test saltmod passes roster to saltutil.cmd
'''
origcmd = saltmod.__salt__['saltutil.cmd']
cmd_kwargs = {}
cmd_args = []
def cmd_mock(*args, **kwargs):
cmd_args.extend(args)
cmd_kwargs.update(kwargs)
return origcmd(*args, **kwargs)
with patch.dict(saltmod.__salt__, {'saltutil.cmd': cmd_mock}):
ret = saltmod.state('state.sls', tgt='*', ssh=True, highstate=True, roster='my_roster')
assert 'roster' in cmd_kwargs
assert cmd_kwargs['roster'] == 'my_roster'
@skipIf(NO_MOCK, NO_MOCK_REASON)
class StatemodTests(TestCase, LoaderModuleMockMixin):
def setup_loader_modules(self):
self.tmp_cachedir = tempfile.mkdtemp(dir=RUNTIME_VARS.TMP)
return {
saltmod: {
'__env__': 'base',
'__opts__': {
'id': 'webserver2',
'argv': [],
'__role': 'master',
'cachedir': self.tmp_cachedir,
'extension_modules': os.path.join(self.tmp_cachedir, 'extmods'),
},
'__salt__': {'saltutil.cmd': MagicMock()},
'__orchestration_jid__': salt.utils.jid.gen_jid({})
}
}
def test_statemod_state(self):
''' Smoke test for for salt.states.statemod.state(). Ensures that we
don't take an exception if optional parameters are not specified in
__opts__ or __env__.
'''
args = ('webserver_setup', 'webserver2')
kwargs = {
'tgt_type': 'glob',
'fail_minions': None,
'pillar': None,
'top': None,
'batch': None,
'orchestration_jid': None,
'sls': 'vroom',
'queue': False,
'concurrent': False,
'highstate': None,
'expr_form': None,
'ret': '',
'ssh': False,
'timeout': None, 'test': False,
'allow_fail': 0,
'saltenv': None,
'expect_minions': False
}
ret = saltmod.state(*args, **kwargs)
expected = {
'comment': 'States ran successfully.',
'changes': {},
'name': 'webserver_setup',
'result': True
}
self.assertEqual(ret, expected)
| 36.711864
| 380
| 0.490074
|
acfe1441faccdf3dfc1b9e9ff9813d9742f8b18d
| 1,261
|
py
|
Python
|
net/text_detector.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
net/text_detector.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
net/text_detector.py
|
lithium0003/Image2UTF8-Transformer
|
2620af2a8bdaf332e25b39ce05d610e21e6492fc
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
width = 128
height = 128
def hard_swish(x):
return x * tf.nn.relu6(x+3) / 6
from keras.utils.generic_utils import get_custom_objects
get_custom_objects().update({'hard_swish': hard_swish})
def FeatureBlock():
inputs = {
'image': tf.keras.Input(shape=(height,width,3)),
}
base_model = tf.keras.applications.EfficientNetB2(include_top=False, weights='imagenet', activation=hard_swish)
x = base_model(inputs['image'])
x = tf.keras.layers.DepthwiseConv2D(4)(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(hard_swish)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(256)(x)
outputs = x
return tf.keras.Model(inputs, outputs, name='FeatureBlock')
def SimpleDecoderBlock():
embedded = tf.keras.Input(shape=(256,))
dense_id1 = tf.keras.layers.Dense(104)(embedded)
dense_id2 = tf.keras.layers.Dense(100)(embedded)
return tf.keras.Model(embedded,
{
'id1': dense_id1,
'id2': dense_id2,
},
name='SimpleDecoderBlock')
if __name__ == '__main__':
encoder = FeatureBlock()
encoder.summary()
decoder = SimpleDecoderBlock()
decoder.summary()
| 26.270833
| 115
| 0.663759
|
acfe1484486a990cdab1be34ec009bf4f96731ba
| 25,442
|
py
|
Python
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/stack/TestMiStack.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 3
|
2016-02-10T14:18:40.000Z
|
2018-02-05T03:15:56.000Z
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/stack/TestMiStack.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | 1
|
2016-02-10T15:40:03.000Z
|
2016-02-10T15:40:03.000Z
|
3.7.0/lldb-3.7.0.src/test/tools/lldb-mi/stack/TestMiStack.py
|
androm3da/clang_sles
|
2ba6d0711546ad681883c42dfb8661b842806695
|
[
"MIT"
] | null | null | null |
"""
Test lldb-mi -stack-xxx commands.
"""
import lldbmi_testcase
from lldbtest import *
import unittest2
class MiStackTestCase(lldbmi_testcase.MiTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_list_arguments(self):
"""Test that 'lldb-mi --interpreter' can shows arguments."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that -stack-list-arguments lists empty stack arguments if range is empty
self.runCmd("-stack-list-arguments 0 1 0")
self.expect("\^done,stack-args=\[\]")
# Test that -stack-list-arguments lists stack arguments without values
# (and that low-frame and high-frame are optional)
self.runCmd("-stack-list-arguments 0")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[name=\"argc\",name=\"argv\"\]}")
self.runCmd("-stack-list-arguments --no-values")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[name=\"argc\",name=\"argv\"\]}")
# Test that -stack-list-arguments lists stack arguments with all values
self.runCmd("-stack-list-arguments 1 0 0")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[{name=\"argc\",value=\"1\"},{name=\"argv\",value=\".*\"}\]}\]")
self.runCmd("-stack-list-arguments --all-values 0 0")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[{name=\"argc\",value=\"1\"},{name=\"argv\",value=\".*\"}\]}\]")
# Test that -stack-list-arguments lists stack arguments with simple values
self.runCmd("-stack-list-arguments 2 0 1")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[{name=\"argc\",type=\"int\",value=\"1\"},{name=\"argv\",type=\"const char \*\*\",value=\".*\"}\]}")
self.runCmd("-stack-list-arguments --simple-values 0 1")
self.expect("\^done,stack-args=\[frame={level=\"0\",args=\[{name=\"argc\",type=\"int\",value=\"1\"},{name=\"argv\",type=\"const char \*\*\",value=\".*\"}\]}")
# Test that an invalid low-frame is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-stack-list-arguments 0 -1 0")
#self.expect("\^error")
self.runCmd("-stack-list-arguments 0 0")
self.expect("\^error,msg=\"Command 'stack-list-arguments'\. Thread frame range invalid\"")
# Test that an invalid high-frame is handled
# FIXME: -1 is treated as unsigned int
self.runCmd("-stack-list-arguments 0 0 -1")
#self.expect("\^error")
# Test that a missing low-frame or high-frame is handled
self.runCmd("-stack-list-arguments 0 0")
self.expect("\^error,msg=\"Command 'stack-list-arguments'\. Thread frame range invalid\"")
# Test that an invalid low-frame is handled
self.runCmd("-stack-list-arguments 0 0")
self.expect("\^error,msg=\"Command 'stack-list-arguments'\. Thread frame range invalid\"")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_list_locals(self):
"""Test that 'lldb-mi --interpreter' can shows local variables."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test int local variables:
# Run to BP_local_int_test
line = line_number('main.cpp', '// BP_local_int_test')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"2\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-locals: use 0 or --no-values
self.runCmd("-stack-list-locals 0")
self.expect("\^done,locals=\[name=\"a\",name=\"b\"\]")
self.runCmd("-stack-list-locals --no-values")
self.expect("\^done,locals=\[name=\"a\",name=\"b\"\]")
# Test -stack-list-locals: use 1 or --all-values
self.runCmd("-stack-list-locals 1")
self.expect("\^done,locals=\[{name=\"a\",value=\"10\"},{name=\"b\",value=\"20\"}\]")
self.runCmd("-stack-list-locals --all-values")
self.expect("\^done,locals=\[{name=\"a\",value=\"10\"},{name=\"b\",value=\"20\"}\]")
# Test -stack-list-locals: use 2 or --simple-values
self.runCmd("-stack-list-locals 2")
self.expect("\^done,locals=\[{name=\"a\",type=\"int\",value=\"10\"},{name=\"b\",type=\"int\",value=\"20\"}\]")
self.runCmd("-stack-list-locals --simple-values")
self.expect("\^done,locals=\[{name=\"a\",type=\"int\",value=\"10\"},{name=\"b\",type=\"int\",value=\"20\"}\]")
# Test struct local variable:
# Run to BP_local_struct_test
line = line_number('main.cpp', '// BP_local_struct_test')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"3\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-locals: use 0 or --no-values
self.runCmd("-stack-list-locals 0")
self.expect("\^done,locals=\[name=\"var_c\"\]")
self.runCmd("-stack-list-locals --no-values")
self.expect("\^done,locals=\[name=\"var_c\"\]")
# Test -stack-list-locals: use 1 or --all-values
self.runCmd("-stack-list-locals 1")
self.expect("\^done,locals=\[{name=\"var_c\",value=\"{var_a = 10, var_b = 97 'a', inner_ = {var_d = 30}}\"}\]")
self.runCmd("-stack-list-locals --all-values")
self.expect("\^done,locals=\[{name=\"var_c\",value=\"{var_a = 10, var_b = 97 'a', inner_ = {var_d = 30}}\"}\]")
# Test -stack-list-locals: use 2 or --simple-values
self.runCmd("-stack-list-locals 2")
self.expect("\^done,locals=\[{name=\"var_c\",type=\"my_type\"}\]")
self.runCmd("-stack-list-locals --simple-values")
self.expect("\^done,locals=\[{name=\"var_c\",type=\"my_type\"}\]")
# Test array local variable:
# Run to BP_local_array_test
line = line_number('main.cpp', '// BP_local_array_test')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"4\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-locals: use 0 or --no-values
self.runCmd("-stack-list-locals 0")
self.expect("\^done,locals=\[name=\"array\"\]")
self.runCmd("-stack-list-locals --no-values")
self.expect("\^done,locals=\[name=\"array\"\]")
# Test -stack-list-locals: use 1 or --all-values
self.runCmd("-stack-list-locals 1")
self.expect("\^done,locals=\[{name=\"array\",value=\"{\[0\] = 100, \[1\] = 200, \[2\] = 300}\"}\]")
self.runCmd("-stack-list-locals --all-values")
self.expect("\^done,locals=\[{name=\"array\",value=\"{\[0\] = 100, \[1\] = 200, \[2\] = 300}\"}\]")
# Test -stack-list-locals: use 2 or --simple-values
self.runCmd("-stack-list-locals 2")
self.expect("\^done,locals=\[{name=\"array\",type=\"int \[3\]\"}\]")
self.runCmd("-stack-list-locals --simple-values")
self.expect("\^done,locals=\[{name=\"array\",type=\"int \[3\]\"}\]")
# Test pointers as local variable:
# Run to BP_local_pointer_test
line = line_number('main.cpp', '// BP_local_pointer_test')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"5\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-locals: use 0 or --no-values
self.runCmd("-stack-list-locals 0")
self.expect("\^done,locals=\[name=\"test_str\",name=\"var_e\",name=\"ptr\"\]")
self.runCmd("-stack-list-locals --no-values")
self.expect("\^done,locals=\[name=\"test_str\",name=\"var_e\",name=\"ptr\"\]")
# Test -stack-list-locals: use 1 or --all-values
self.runCmd("-stack-list-locals 1")
self.expect("\^done,locals=\[{name=\"test_str\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",value=\"24\"},{name=\"ptr\",value=\".*?\"}\]")
self.runCmd("-stack-list-locals --all-values")
self.expect("\^done,locals=\[{name=\"test_str\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",value=\"24\"},{name=\"ptr\",value=\".*?\"}\]")
# Test -stack-list-locals: use 2 or --simple-values
self.runCmd("-stack-list-locals 2")
self.expect("\^done,locals=\[{name=\"test_str\",type=\"const char \*\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",type=\"int\",value=\"24\"},{name=\"ptr\",type=\"int \*\",value=\".*?\"}\]")
self.runCmd("-stack-list-locals --simple-values")
self.expect("\^done,locals=\[{name=\"test_str\",type=\"const char \*\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",type=\"int\",value=\"24\"},{name=\"ptr\",type=\"int \*\",value=\".*?\"}\]")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_list_variables(self):
"""Test that 'lldb-mi --interpreter' can shows local variables and arguments."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test int local variables:
# Run to BP_local_int_test
line = line_number('main.cpp', '// BP_local_int_test_with_args')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"2\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-variables: use 0 or --no-values
self.runCmd("-stack-list-variables 0")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\"},{arg=\"1\",name=\"d\"},{name=\"a\"},{name=\"b\"}\]")
self.runCmd("-stack-list-variables --no-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\"},{arg=\"1\",name=\"d\"},{name=\"a\"},{name=\"b\"}\]")
# Test -stack-list-variables: use 1 or --all-values
self.runCmd("-stack-list-variables 1")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\",value=\"30\"},{arg=\"1\",name=\"d\",value=\"40\"},{name=\"a\",value=\"10\"},{name=\"b\",value=\"20\"}\]")
self.runCmd("-stack-list-variables --all-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\",value=\"30\"},{arg=\"1\",name=\"d\",value=\"40\"},{name=\"a\",value=\"10\"},{name=\"b\",value=\"20\"}\]")
# Test -stack-list-variables: use 2 or --simple-values
self.runCmd("-stack-list-variables 2")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\",type=\"int\",value=\"30\"},{arg=\"1\",name=\"d\",type=\"int\",value=\"40\"},{name=\"a\",type=\"int\",value=\"10\"},{name=\"b\",type=\"int\",value=\"20\"}\]")
self.runCmd("-stack-list-variables --simple-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"c\",type=\"int\",value=\"30\"},{arg=\"1\",name=\"d\",type=\"int\",value=\"40\"},{name=\"a\",type=\"int\",value=\"10\"},{name=\"b\",type=\"int\",value=\"20\"}\]")
# Test struct local variable:
# Run to BP_local_struct_test
line = line_number('main.cpp', '// BP_local_struct_test_with_args')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"3\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-variables: use 0 or --no-values
self.runCmd("-stack-list-variables 0")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\"},{name=\"var_c\"}\]")
self.runCmd("-stack-list-variables --no-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\"},{name=\"var_c\"}\]")
# Test -stack-list-variables: use 1 or --all-values
self.runCmd("-stack-list-variables 1")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\",value=\"{var_a = 20, var_b = 98 'b', inner_ = {var_d = 40}}\"},{name=\"var_c\",value=\"{var_a = 10, var_b = 97 'a', inner_ = {var_d = 30}}\"}\]")
self.runCmd("-stack-list-variables --all-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\",value=\"{var_a = 20, var_b = 98 'b', inner_ = {var_d = 40}}\"},{name=\"var_c\",value=\"{var_a = 10, var_b = 97 'a', inner_ = {var_d = 30}}\"}\]")
# Test -stack-list-variables: use 2 or --simple-values
self.runCmd("-stack-list-variables 2")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\",type=\"my_type\"},{name=\"var_c\",type=\"my_type\"}\]")
self.runCmd("-stack-list-variables --simple-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"var_e\",type=\"my_type\"},{name=\"var_c\",type=\"my_type\"}\]")
# Test array local variable:
# Run to BP_local_array_test
line = line_number('main.cpp', '// BP_local_array_test_with_args')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"4\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-variables: use 0 or --no-values
self.runCmd("-stack-list-variables 0")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\"},{name=\"array\"}\]")
self.runCmd("-stack-list-variables --no-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\"},{name=\"array\"}\]")
# Test -stack-list-variables: use 1 or --all-values
self.runCmd("-stack-list-variables 1")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\",value=\".*?\"},{name=\"array\",value=\"{\[0\] = 100, \[1\] = 200, \[2\] = 300}\"}\]")
self.runCmd("-stack-list-variables --all-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\",value=\".*?\"},{name=\"array\",value=\"{\[0\] = 100, \[1\] = 200, \[2\] = 300}\"}\]")
# Test -stack-list-variables: use 2 or --simple-values
self.runCmd("-stack-list-variables 2")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\",type=\"int \*\",value=\".*?\"},{name=\"array\",type=\"int \[3\]\"}\]")
self.runCmd("-stack-list-variables --simple-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"other_array\",type=\"int \*\",value=\".*?\"},{name=\"array\",type=\"int \[3\]\"}\]")
# Test pointers as local variable:
# Run to BP_local_pointer_test
line = line_number('main.cpp', '// BP_local_pointer_test_with_args')
self.runCmd("-break-insert --file main.cpp:%d" % line)
self.expect("\^done,bkpt={number=\"5\"")
self.runCmd("-exec-continue")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test -stack-list-variables: use 0 or --no-values
self.runCmd("-stack-list-variables 0")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\"},{arg=\"1\",name=\"arg_ptr\"},{name=\"test_str\"},{name=\"var_e\"},{name=\"ptr\"}\]")
self.runCmd("-stack-list-variables --no-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\"},{arg=\"1\",name=\"arg_ptr\"},{name=\"test_str\"},{name=\"var_e\"},{name=\"ptr\"}\]")
# Test -stack-list-variables: use 1 or --all-values
self.runCmd("-stack-list-variables 1")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\",value=\".*?String.*?\"},{arg=\"1\",name=\"arg_ptr\",value=\".*?\"},{name=\"test_str\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",value=\"24\"},{name=\"ptr\",value=\".*?\"}\]")
self.runCmd("-stack-list-variables --all-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\",value=\".*?String.*?\"},{arg=\"1\",name=\"arg_ptr\",value=\".*?\"},{name=\"test_str\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",value=\"24\"},{name=\"ptr\",value=\".*?\"}\]")
# Test -stack-list-variables: use 2 or --simple-values
self.runCmd("-stack-list-variables 2")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\",type=\"const char \*\",value=\".*?String.*?\"},{arg=\"1\",name=\"arg_ptr\",type=\"int \*\",value=\".*?\"},{name=\"test_str\",type=\"const char \*\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",type=\"int\",value=\"24\"},{name=\"ptr\",type=\"int \*\",value=\".*?\"}\]")
self.runCmd("-stack-list-variables --simple-values")
self.expect("\^done,variables=\[{arg=\"1\",name=\"arg_str\",type=\"const char \*\",value=\".*?String.*?\"},{arg=\"1\",name=\"arg_ptr\",type=\"int \*\",value=\".*?\"},{name=\"test_str\",type=\"const char \*\",value=\".*?Rakaposhi.*?\"},{name=\"var_e\",type=\"int\",value=\"24\"},{name=\"ptr\",type=\"int \*\",value=\".*?\"}\]")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_info_depth(self):
"""Test that 'lldb-mi --interpreter' can shows depth of the stack."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that -stack-info-depth works
# (and that max-depth is optional)
self.runCmd("-stack-info-depth")
self.expect("\^done,depth=\"[1-9]\"")
# Test that max-depth restricts check of stack depth
#FIXME: max-depth argument is ignored
self.runCmd("-stack-info-depth 1")
#self.expect("\^done,depth=\"1\"")
# Test that invalid max-depth argument is handled
#FIXME: max-depth argument is ignored
self.runCmd("-stack-info-depth -1")
#self.expect("\^error")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
@skipUnlessDarwin
def test_lldbmi_stack_info_frame(self):
"""Test that 'lldb-mi --interpreter' can show information about current frame."""
self.spawnLldbMi(args = None)
# Test that -stack-info-frame fails when program isn't running
self.runCmd("-stack-info-frame")
self.expect("\^error,msg=\"Command 'stack-info-frame'\. Invalid process during debug session\"")
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that -stack-info-frame works when program was stopped on BP
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"0\",addr=\"0x[0-9a-f]+\",func=\"main\",file=\"main\.cpp\",fullname=\".+?main\.cpp\",line=\"\d+\"\}")
# Select frame #1
self.runCmd("-stack-select-frame 1")
self.expect("\^done")
# Test that -stack-info-frame works when specified frame was selected
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"1\",addr=\"0x[0-9a-f]+\",func=\".+?\",file=\"\?\?\",fullname=\"\?\?\",line=\"-1\"\}")
# Test that -stack-info-frame fails when an argument is specified
#FIXME: unknown argument is ignored
self.runCmd("-stack-info-frame unknown_arg")
#self.expect("\^error")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_list_frames(self):
"""Test that 'lldb-mi --interpreter' can lists the frames on the stack."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test stack frame: get frame #0 info
self.runCmd("-stack-list-frames 0 0")
self.expect("\^done,stack=\[frame=\{level=\"0\",addr=\"0x[0-9a-f]+\",func=\"main\",file=\"main\.cpp\",fullname=\".+?main\.cpp\",line=\"\d+\"\}\]")
@lldbmi_test
@expectedFailureWindows("llvm.org/pr22274: need a pexpect replacement for windows")
@skipIfFreeBSD # llvm.org/pr22411: Failure presumably due to known thread races
def test_lldbmi_stack_select_frame(self):
"""Test that 'lldb-mi --interpreter' can choose current frame."""
self.spawnLldbMi(args = None)
# Load executable
self.runCmd("-file-exec-and-symbols %s" % self.myexe)
self.expect("\^done")
# Run to main
self.runCmd("-break-insert -f main")
self.expect("\^done,bkpt={number=\"1\"")
self.runCmd("-exec-run")
self.expect("\^running")
self.expect("\*stopped,reason=\"breakpoint-hit\"")
# Test that -stack-select-frame requires 1 mandatory argument
self.runCmd("-stack-select-frame")
self.expect("\^error,msg=\"Command 'stack-select-frame'\. Command Args\. Validation failed. Mandatory args not found: frame\"")
# Test that -stack-select-frame fails on invalid frame number
self.runCmd("-stack-select-frame 99")
self.expect("\^error,msg=\"Command 'stack-select-frame'\. Frame ID invalid\"")
# Test that current frame is #0
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"0\",addr=\"0x[0-9a-f]+\",func=\"main\",file=\"main\.cpp\",fullname=\".+?main\.cpp\",line=\"\d+\"\}")
# Test that -stack-select-frame can select the selected frame
self.runCmd("-stack-select-frame 0")
self.expect("\^done")
# Test that current frame is still #0
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"0\",addr=\"0x[0-9a-f]+\",func=\"main\",file=\"main\.cpp\",fullname=\".+?main\.cpp\",line=\"\d+\"\}")
# Test that -stack-select-frame can select frame #1 (parent frame)
self.runCmd("-stack-select-frame 1")
self.expect("\^done")
# Test that current frame is #1
# Note that message is different in Darwin and Linux:
# Darwin: "^done,frame={level=\"1\",addr=\"0x[0-9a-f]+\",func=\"start\",file=\"??\",fullname=\"??\",line=\"-1\"}"
# Linux: "^done,frame={level=\"1\",addr=\"0x[0-9a-f]+\",func=\".+\",file=\".+\",fullname=\".+\",line=\"\d+\"}"
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"1\",addr=\"0x[0-9a-f]+\",func=\".+?\",file=\".+?\",fullname=\".+?\",line=\"(-1|\d+)\"\}")
# Test that -stack-select-frame can select frame #0 (child frame)
self.runCmd("-stack-select-frame 0")
self.expect("\^done")
# Test that current frame is #0 and it has the same information
self.runCmd("-stack-info-frame")
self.expect("\^done,frame=\{level=\"0\",addr=\"0x[0-9a-f]+\",func=\"main\",file=\"main\.cpp\",fullname=\".+?main\.cpp\",line=\"\d+\"\}")
if __name__ == '__main__':
unittest2.main()
| 52.2423
| 334
| 0.577352
|
acfe14ad3c7f9e47eacc80d7f7c65c14a9d810d8
| 4,077
|
py
|
Python
|
rubin_sim/maf/web/mafTracking.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/web/mafTracking.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
rubin_sim/maf/web/mafTracking.py
|
RileyWClarke/flarubin
|
eb7b1ee21c828523f8a5374fe4510fe6e5ec2a2a
|
[
"MIT"
] | null | null | null |
from builtins import object
import os
from collections import OrderedDict
import numpy as np
import rubin_sim.maf.db as db
from .mafRunResults import MafRunResults
__all__ = ['MafTracking']
class MafTracking(object):
"""
Class to read MAF's tracking SQLite database (tracking a set of MAF runs)
and handle the output for web display.
"""
def __init__(self, database=None):
"""
Instantiate the (multi-run) layout visualization class.
Parameters
----------
database :str
Path to the sqlite tracking database file.
If not set, looks for 'trackingDb_sqlite.db' file in current directory.
"""
if database is None:
database = os.path.join(os.getcwd(), 'trackingDb_sqlite.db')
# Read in the results database.
tdb = db.Database(database=database, longstrings=True)
cols = ['mafRunId', 'opsimRun', 'opsimGroup', 'mafComment', 'opsimComment', 'dbFile',
'mafDir', 'opsimVersion', 'opsimDate', 'mafVersion', 'mafDate']
self.runs = tdb.query_columns('runs', colnames=cols)
self.runs = self.sortRuns(self.runs, order=['mafRunId', 'opsimRun', 'mafComment'])
self.runsPage = {}
def runInfo(self, run):
"""
Provide the tracking database information relevant for a given run in a format
that the jinja2 templates can use.
Parameters
----------
run : numpy.NDarray
One line from self.runs
Returns
-------
OrderedDict
Ordered dict version of the numpy structured array.
"""
runInfo = OrderedDict()
runInfo['OpsimRun'] = run['opsimRun']
runInfo['OpsimGroup'] = run['opsimGroup']
runInfo['MafComment'] = run['mafComment']
runInfo['OpsimComment'] = run['opsimComment']
runInfo['SQLite File'] = [os.path.relpath(run['dbFile']), os.path.split(run['dbFile'])[1]]
runInfo['ResultsDb'] = os.path.relpath(os.path.join(run['mafDir'], 'resultsDb_sqlite.db'))
runInfo['MafDir'] = run['mafDir']
runInfo['OpsimVersion'] = run['opsimVersion']
runInfo['OpsimDate'] = run['opsimDate']
runInfo['MafVersion'] = run['mafVersion']
runInfo['MafDate'] = run['mafDate']
return runInfo
def sortRuns(self, runs, order=['opsimRun', 'mafComment', 'mafRunId']):
"""
Sort the numpy array of run data.
Parameters
----------
runs : numpy.NDarray
The runs from self.runs to sort.
order : list
The fields to use to sort the runs array.
Returns
-------
numpy.NDarray
A sorted numpy array.
"""
return np.sort(runs, order=order)
def getRun(self, mafRunId):
"""
Set up a mafRunResults object to read and handle the data from an individual run.
Caches the mafRunResults object, meaning the metric information from a particular run
is only read once from disk.
Parameters
----------
mafRunId : int
mafRunId value in the tracking database corresponding to a particular MAF run.
Returns
-------
MafRunResults
A MafRunResults object containing the information about a particular run.
Stored internally in self.runsPage dict, but also passed back to the tornado server.
"""
if not isinstance(mafRunId, int):
if isinstance(mafRunId, dict):
mafRunId = int(mafRunId['runId'][0][0])
if isinstance(mafRunId, list):
mafRunId = int(mafRunId[0])
if mafRunId in self.runsPage:
return self.runsPage[mafRunId]
match = (self.runs['mafRunId'] == mafRunId)
mafDir = self.runs[match]['mafDir'][0]
runName = self.runs[match]['opsimRun'][0]
if runName == 'NULL':
runName = None
self.runsPage[mafRunId] = MafRunResults(mafDir, runName)
return self.runsPage[mafRunId]
| 35.763158
| 98
| 0.595536
|
acfe14f4e909268a30eea0877bae104ba47ac3aa
| 1,526
|
py
|
Python
|
tests/utils/cgroup_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils/cgroup_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
tests/utils/cgroup_tests.py
|
shareablee/apm-agent-python
|
29f12ceb410b3c1a7f933b29dcecccf628dbbb6c
|
[
"BSD-3-Clause"
] | null | null | null |
import mock
import pytest
from elasticapm.utils import cgroup, compat
@pytest.mark.parametrize(
"test_input,expected",
[
(
"12:devices:/docker/051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76",
{"container": {"id": "051e2ee0bce99116029a13df4a9e943137f19f957f38ac02d6bad96f9b700f76"}},
),
(
"1:name=systemd:/system.slice/docker-cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411.scope",
{"container": {"id": "cde7c2bab394630a42d73dc610b9c57415dced996106665d427f6d0566594411"}},
),
(
"1:name=systemd:/kubepods/besteffort/pode9b90526-f47d-11e8-b2a5-080027b9f4fb/15aa6e53-b09a-40c7-8558-c6c31e36c88a",
{
"container": {"id": "15aa6e53-b09a-40c7-8558-c6c31e36c88a"},
"pod": {"uid": "e9b90526-f47d-11e8-b2a5-080027b9f4fb"},
},
),
(
"1:name=systemd:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod90d81341_92de_11e7_8cf2_507b9d4141fa.slice/crio-2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63.scope",
{
"container": {"id": "2227daf62df6694645fee5df53c1f91271546a9560e8600a525690ae252b7f63"},
"pod": {"uid": "90d81341_92de_11e7_8cf2_507b9d4141fa"},
},
),
],
)
def test_cgroup_parsing(test_input, expected):
f = compat.StringIO(test_input)
result = cgroup.parse_cgroups(f)
assert result == expected
| 40.157895
| 211
| 0.663172
|
acfe152ca87830bfb13de195af2b019b2b70d379
| 370
|
py
|
Python
|
python-binance/unit10/03.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 9
|
2020-10-25T15:13:32.000Z
|
2022-03-26T11:27:21.000Z
|
python-binance/unit10/03.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | null | null | null |
python-binance/unit10/03.py
|
sharebook-kr/learningspoons-bootcamp-finance
|
0288f3f3b39f54420e4e9987f1de12892dc680ea
|
[
"MIT"
] | 7
|
2021-03-01T11:06:45.000Z
|
2022-03-14T07:06:04.000Z
|
import ccxt
import pprint
with open("../api.txt") as f:
lines = f.readlines()
api_key = lines[0].strip()
secret = lines[1].strip()
binance = ccxt.binance(config={
'apiKey': api_key,
'secret': secret,
'enableRateLimit': True,
'options': {
'defaultType': 'future'
}
})
btc = binance.fetch_ticker("BTC/USDT")
pprint.pprint(btc)
| 19.473684
| 38
| 0.608108
|
acfe1565b38b39e0bca223feb621bcb7e6128a7c
| 490
|
py
|
Python
|
py/tests/testHouseRobber/test_HouseRobber.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | 1
|
2022-01-26T16:33:45.000Z
|
2022-01-26T16:33:45.000Z
|
py/tests/testHouseRobber/test_HouseRobber.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | null | null | null |
py/tests/testHouseRobber/test_HouseRobber.py
|
zcemycl/algoTest
|
9518fb2b60fd83c85aeb2ab809ff647aaf643f0a
|
[
"MIT"
] | 1
|
2022-01-26T16:35:44.000Z
|
2022-01-26T16:35:44.000Z
|
import unittest
from parameterized import parameterized as p
from solns.houseRobber.houseRobber import *
class UnitTest_HouseRobber(unittest.TestCase):
@p.expand([
[[1,2,3,1],4],[[2,7,9,3,1],12],[[1,2],2]
])
def test_naive(self,nums,expected):
self.assertEqual(Solution.naive(nums), expected)
@p.expand([
[[1,2,3,1],4],[[2,7,9,3,1],12],[[1,2],2]
])
def test_2max(self,nums,expected):
self.assertEqual(Solution.twomax(nums), expected)
| 32.666667
| 57
| 0.638776
|
acfe1586681dcde2695f27a98cfef945220c0742
| 3,278
|
py
|
Python
|
test/test_TCCNet.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | 2
|
2021-12-18T21:04:29.000Z
|
2022-01-04T14:14:27.000Z
|
test/test_TCCNet.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | null | null | null |
test/test_TCCNet.py
|
EmbeddedML-EDAGroup/PIT
|
02897f6977b481d3072e9aa915aec0fe43faeb02
|
[
"Apache-2.0"
] | null | null | null |
#*----------------------------------------------------------------------------*
#* Copyright (C) 2021 Politecnico di Torino, Italy *
#* SPDX-License-Identifier: Apache-2.0 *
#* *
#* Licensed under the Apache License, Version 2.0 (the "License"); *
#* you may not use this file except in compliance with the License. *
#* You may obtain a copy of the License at *
#* *
#* http://www.apache.org/licenses/LICENSE-2.0 *
#* *
#* Unless required by applicable law or agreed to in writing, software *
#* distributed under the License is distributed on an "AS IS" BASIS, *
#* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. *
#* See the License for the specific language governing permissions and *
#* limitations under the License. *
#* *
#* Author: Matteo Risso <matteo.risso@polito.it> *
#*----------------------------------------------------------------------------*
import unittest
from model.TCCNet import TCCNet
from torchinfo import summary
import torch
import json
import pdb
class TestTCCNet(unittest.TestCase):
def test_object_instantiation(self):
with open('config/config_NinaProDB1.json', 'r') as f:
arguments = json.load(f)
#learned_dil = [2, 1, 2, 16, 32, 64]
#model = TCCNet('NinaProDB1', arguments['arch']['args'], learned_dil=learned_dil)
model = TCCNet('NinaProDB1', arguments['arch']['args'])
def test_plain_architecture(self):
with open('config_NinaProDB1.json', 'r') as f:
arguments = json.load(f)
model = TCCNet('NinaProDB1', arguments['arch']['args'], do_nas=False)
summ = summary(model,
(30, 150, 10),
verbose = 2,
col_width = 16,
col_names=['kernel_size', 'input_size', 'output_size', 'num_params', 'mult_adds']
)
print('FLOPs: {}'.format(summ.total_mult_adds * 2))
def test_learned_architecture(self):
with open('config/config_NinaProDB1.json', 'r') as f:
arguments = json.load(f)
nas_config = arguments['nas']['nas_config']
learned_dil = [1, 1, 2, 2, 2, 2, 4]
learned_rf = [3, 3, 7, 13, 15, 63, 45]
learned_ch = [32, 32, 10, 31, 12, 41, 82]
model = TCCNet('NinaProDB1', arguments['arch']['args'], do_nas=False, nas_config=nas_config,
learned_dil=learned_dil, learned_rf=learned_rf, learned_ch=learned_ch)
summ = summary(model,
(30, 150, 10),
verbose = 2,
col_width = 16,
col_names=['kernel_size', 'input_size', 'output_size', 'num_params', 'mult_adds']
)
print('FLOPs: {}'.format(summ.total_mult_adds * 2))
| 48.925373
| 100
| 0.480476
|
acfe15c1121082f8c69cce3b3f09607f77297a92
| 12,901
|
py
|
Python
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/customer_extension_setting_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/customer_extension_setting_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/services/services/customer_extension_setting_service/transports/grpc.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import gapic_v1 # type: ignore
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.ads.googleads.v7.resources.types import customer_extension_setting
from google.ads.googleads.v7.services.types import customer_extension_setting_service
from .base import CustomerExtensionSettingServiceTransport, DEFAULT_CLIENT_INFO
class CustomerExtensionSettingServiceGrpcTransport(CustomerExtensionSettingServiceTransport):
"""gRPC backend transport for CustomerExtensionSettingService.
Service to manage customer extension settings.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
def __init__(self, *,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn("api_mtls_endpoint and client_cert_source are deprecated", DeprecationWarning)
host = api_mtls_endpoint if ":" in api_mtls_endpoint else api_mtls_endpoint + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = google.auth.default(scopes=self.AUTH_SCOPES)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
ssl_credentials=ssl_channel_credentials,
scopes=self.AUTH_SCOPES,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
client_info=client_info,
)
@classmethod
def create_channel(cls,
host: str = 'googleads.googleapis.com',
credentials: ga_credentials.Credentials = None,
scopes: Optional[Sequence[str]] = None,
**kwargs) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optionsl[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
scopes=scopes or cls.AUTH_SCOPES,
**kwargs
)
def close(self):
self.grpc_channel.close()
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def get_customer_extension_setting(self) -> Callable[
[customer_extension_setting_service.GetCustomerExtensionSettingRequest],
customer_extension_setting.CustomerExtensionSetting]:
r"""Return a callable for the get customer extension setting method over gRPC.
Returns the requested customer extension setting in full detail.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `HeaderError <>`__
`InternalError <>`__ `QuotaError <>`__ `RequestError <>`__
Returns:
Callable[[~.GetCustomerExtensionSettingRequest],
~.CustomerExtensionSetting]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'get_customer_extension_setting' not in self._stubs:
self._stubs['get_customer_extension_setting'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.CustomerExtensionSettingService/GetCustomerExtensionSetting',
request_serializer=customer_extension_setting_service.GetCustomerExtensionSettingRequest.serialize,
response_deserializer=customer_extension_setting.CustomerExtensionSetting.deserialize,
)
return self._stubs['get_customer_extension_setting']
@property
def mutate_customer_extension_settings(self) -> Callable[
[customer_extension_setting_service.MutateCustomerExtensionSettingsRequest],
customer_extension_setting_service.MutateCustomerExtensionSettingsResponse]:
r"""Return a callable for the mutate customer extension
settings method over gRPC.
Creates, updates, or removes customer extension settings.
Operation statuses are returned.
List of thrown errors: `AuthenticationError <>`__
`AuthorizationError <>`__ `CollectionSizeError <>`__
`CriterionError <>`__ `DatabaseError <>`__ `DateError <>`__
`DistinctError <>`__ `ExtensionSettingError <>`__
`FieldError <>`__ `HeaderError <>`__ `IdError <>`__
`InternalError <>`__ `ListOperationError <>`__
`MutateError <>`__ `NewResourceCreationError <>`__
`NotEmptyError <>`__ `NullError <>`__ `OperatorError <>`__
`QuotaError <>`__ `RangeError <>`__ `RequestError <>`__
`SizeLimitError <>`__ `StringFormatError <>`__
`StringLengthError <>`__ `UrlFieldError <>`__
Returns:
Callable[[~.MutateCustomerExtensionSettingsRequest],
~.MutateCustomerExtensionSettingsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if 'mutate_customer_extension_settings' not in self._stubs:
self._stubs['mutate_customer_extension_settings'] = self.grpc_channel.unary_unary(
'/google.ads.googleads.v7.services.CustomerExtensionSettingService/MutateCustomerExtensionSettings',
request_serializer=customer_extension_setting_service.MutateCustomerExtensionSettingsRequest.serialize,
response_deserializer=customer_extension_setting_service.MutateCustomerExtensionSettingsResponse.deserialize,
)
return self._stubs['mutate_customer_extension_settings']
__all__ = (
'CustomerExtensionSettingServiceGrpcTransport',
)
| 46.574007
| 125
| 0.644679
|
acfe160a9ac125caa8a5eff1a96c40381886cfdd
| 819
|
py
|
Python
|
benchmarks.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 18
|
2018-07-26T05:36:24.000Z
|
2022-02-25T11:45:31.000Z
|
benchmarks.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 9
|
2018-04-22T22:05:22.000Z
|
2022-01-17T02:39:35.000Z
|
benchmarks.py
|
HumanCompatibleAI/population-irl
|
c0881829adb750a9e43e90ce632851eed3e3a5e5
|
[
"MIT"
] | 2
|
2019-04-20T01:09:41.000Z
|
2020-04-01T09:39:04.000Z
|
from contextlib import contextmanager
from joblib import Memory
import hermes.backend.redis
import time
import numpy as np
jcache = Memory('/tmp/foo-cache').cache
hcache = hermes.Hermes(hermes.backend.redis.Backend, ttl=None, port='6380', db=0)
@jcache
def f(n):
return np.random.randn(n, n)
@hcache
def g(n):
return np.random.randn(n, n)
@contextmanager
def timer(label):
start = time.time()
yield
end = time.time()
print('{} - {}s'.format(label, end - start))
with timer("f miss 1000"):
f(1000)
with timer("f hit 1000"):
f(1000)
with timer("g miss 1000"):
g(1000)
with timer("g hit 1000"):
g(1000)
with timer("f miss 5000"):
f(5000)
with timer("f hit 5000"):
f(1000)
with timer("g miss 5000"):
g(5000)
with timer("g hit 5000"):
g(5000)
| 18.2
| 81
| 0.6337
|
acfe16b3d0d0c29b4d62b804d53bd839c871bbaa
| 334
|
py
|
Python
|
1221. Split a String in Balanced Strings.py
|
sonalsrivas/Leetcode-Solutions-Oct2020
|
32ae8fba6aaf3e6ce47f7b3de13907f4d30a92ab
|
[
"MIT"
] | null | null | null |
1221. Split a String in Balanced Strings.py
|
sonalsrivas/Leetcode-Solutions-Oct2020
|
32ae8fba6aaf3e6ce47f7b3de13907f4d30a92ab
|
[
"MIT"
] | null | null | null |
1221. Split a String in Balanced Strings.py
|
sonalsrivas/Leetcode-Solutions-Oct2020
|
32ae8fba6aaf3e6ce47f7b3de13907f4d30a92ab
|
[
"MIT"
] | null | null | null |
class Solution:
def balancedStringSplit(self, s: str) -> int:
# Using Greedy Approach
R=0; L=0
sol=0
for i in s:
if i=='L':
L+=1
else:
R+=1
if L==R:
sol+=1
L=0; R=0
return sol
| 22.266667
| 50
| 0.338323
|
acfe16bca499ca8386ca3ad5dac423b7ceccb8f8
| 3,966
|
py
|
Python
|
kolibri/core/content/management/commands/exportcontent.py
|
techZM/kolibri
|
617e4c382e446b16a968e8add7f1766f8cd7c54a
|
[
"MIT"
] | null | null | null |
kolibri/core/content/management/commands/exportcontent.py
|
techZM/kolibri
|
617e4c382e446b16a968e8add7f1766f8cd7c54a
|
[
"MIT"
] | null | null | null |
kolibri/core/content/management/commands/exportcontent.py
|
techZM/kolibri
|
617e4c382e446b16a968e8add7f1766f8cd7c54a
|
[
"MIT"
] | 1
|
2021-07-26T11:38:29.000Z
|
2021-07-26T11:38:29.000Z
|
import logging
import os
from ...utils import import_export_content
from ...utils import paths
from ...utils import transfer
from kolibri.core.content.errors import InvalidStorageFilenameError
from kolibri.core.tasks.management.commands.base import AsyncCommand
logger = logging.getLogger(__name__)
class Command(AsyncCommand):
def add_arguments(self, parser):
node_ids_help_text = """
Specify one or more node IDs to import. Only the files associated to those node IDs will be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--node_ids", "-n",
# Split the comma separated string we get, into a list of strings
type=lambda x: x.split(","),
default=[],
required=False,
dest="node_ids",
help=node_ids_help_text,
)
exclude_node_ids_help_text = """
Specify one or more node IDs to exclude. Files associated to those node IDs will be not be imported.
Make sure to call this near the end of the argument list.
e.g.
kolibri manage importcontent network <channel id> --exclude_node_ids <id1>,<id2>, [<ids>,...]
"""
parser.add_argument(
"--exclude_node_ids",
type=lambda x: x.split(","),
default=[],
required=False,
dest="exclude_node_ids",
help=exclude_node_ids_help_text
)
parser.add_argument("channel_id", type=str)
parser.add_argument("destination", type=str)
def handle_async(self, *args, **options):
channel_id = options["channel_id"]
data_dir = os.path.realpath(options["destination"])
node_ids = options["node_ids"]
exclude_node_ids = options["exclude_node_ids"]
logger.info("Exporting content for channel id {} to {}".format(channel_id, data_dir))
files, total_bytes_to_transfer = import_export_content.get_files_to_transfer(
channel_id, node_ids, exclude_node_ids, True)
exported_files = []
with self.start_progress(total=total_bytes_to_transfer) as overall_progress_update:
for f in files:
if self.is_cancelled():
break
filename = f.get_filename()
try:
srcpath = paths.get_content_storage_file_path(filename)
dest = paths.get_content_storage_file_path(filename, datafolder=data_dir)
except InvalidStorageFilenameError:
# If any files have an invalid storage file name, don't export them.
overall_progress_update(f.file_size)
continue
# if the file already exists, add its size to our overall progress, and skip
if os.path.isfile(dest) and os.path.getsize(dest) == f.file_size:
overall_progress_update(f.file_size)
continue
copy = transfer.FileCopy(srcpath, dest)
with copy:
with self.start_progress(total=copy.total_size) as file_cp_progress_update:
for chunk in copy:
if self.is_cancelled():
copy.cancel()
break
length = len(chunk)
overall_progress_update(length)
file_cp_progress_update(length)
else:
exported_files.append(dest)
if self.is_cancelled():
# Cancelled, clean up any already downloading files.
for dest in exported_files:
os.remove(dest)
self.cancel()
| 36.385321
| 109
| 0.577156
|
acfe1718fc564218c658dc2e635a252ef7bbd619
| 4,604
|
py
|
Python
|
electrumx/lib/server_base.py
|
cyppper/electrumx-wayf
|
376e545ddc36635a99f4c6db6e427efa02993e2f
|
[
"MIT"
] | null | null | null |
electrumx/lib/server_base.py
|
cyppper/electrumx-wayf
|
376e545ddc36635a99f4c6db6e427efa02993e2f
|
[
"MIT"
] | null | null | null |
electrumx/lib/server_base.py
|
cyppper/electrumx-wayf
|
376e545ddc36635a99f4c6db6e427efa02993e2f
|
[
"MIT"
] | 1
|
2021-12-14T16:29:01.000Z
|
2021-12-14T16:29:01.000Z
|
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Base class of servers'''
import asyncio
import os
import platform
import re
import signal
import sys
import time
from contextlib import suppress
from functools import partial
from typing import TYPE_CHECKING
from aiorpcx import spawn
from electrumx.lib.util import class_logger
if TYPE_CHECKING:
from electrumx.server.env import Env
class ServerBase:
'''Base class server implementation.
Derived classes are expected to:
- set PYTHON_MIN_VERSION and SUPPRESS_MESSAGE_REGEX as appropriate
- implement the serve() coroutine, called from the run() method.
Upon return the event loop runs until the shutdown signal is received.
'''
SUPPRESS_MESSAGE_REGEX = re.compile('SSL handshake|Fatal read error on|'
'SSL error in data received|'
'socket.send() raised exception')
SUPPRESS_TASK_REGEX = re.compile('accept_connection2')
PYTHON_MIN_VERSION = (3, 7)
def __init__(self, env: 'Env'):
'''Save the environment, perform basic sanity checks, and set the
event loop policy.
'''
# First asyncio operation must be to set the event loop policy
# as this replaces the event loop
asyncio.set_event_loop_policy(env.loop_policy)
self.logger = class_logger(__name__, self.__class__.__name__)
version_str = ' '.join(sys.version.splitlines())
self.logger.info(f'Python version: {version_str}')
self.env = env
self.start_time = 0
# Sanity checks
if sys.version_info < self.PYTHON_MIN_VERSION:
mvs = '.'.join(str(part) for part in self.PYTHON_MIN_VERSION)
raise RuntimeError(f'Python version >= {mvs} is required')
if platform.system() == 'Windows':
pass
elif os.geteuid() == 0 and not env.allow_root:
raise RuntimeError('RUNNING AS ROOT IS STRONGLY DISCOURAGED!\n'
'You shoud create an unprivileged user account '
'and use that.\n'
'To continue as root anyway, restart with '
'environment variable ALLOW_ROOT non-empty')
async def serve(self, shutdown_event: asyncio.Event):
'''Override to provide the main server functionality.
Run as a task that will be cancelled to request shutdown.
Setting the event also shuts down the server.
'''
def on_exception(self, loop, context):
'''Suppress spurious messages it appears we cannot control.'''
message = context.get('message')
if message and self.SUPPRESS_MESSAGE_REGEX.match(message):
return
if self.SUPPRESS_TASK_REGEX.match(repr(context.get('task'))):
return
loop.default_exception_handler(context)
async def run(self):
'''Run the server application:
- record start time
- install SIGINT and SIGTERM handlers to trigger shutdown_event
- set loop's exception handler to suppress unwanted messages
- run the event loop until serve() completes
'''
def on_signal(signame):
shutdown_event.set()
self.logger.warning(f'received {signame} signal, initiating shutdown')
async def serve():
try:
await self.serve(shutdown_event)
finally:
shutdown_event.set()
self.start_time = time.time()
loop = asyncio.get_event_loop()
shutdown_event = asyncio.Event()
if platform.system() != 'Windows':
# No signals on Windows
for signame in ('SIGINT', 'SIGTERM'):
loop.add_signal_handler(getattr(signal, signame),
partial(on_signal, signame))
loop.set_exception_handler(self.on_exception)
# Start serving and wait for shutdown, log receipt of the event
server_task = await spawn(serve, daemon=True)
try:
await shutdown_event.wait()
except KeyboardInterrupt:
self.logger.warning('received keyboard interrupt, initiating shutdown')
self.logger.info('shutting down')
server_task.cancel()
try:
with suppress(asyncio.CancelledError):
await server_task
finally:
self.logger.info('shutdown complete')
| 34.878788
| 83
| 0.624674
|
acfe1747e5b1d4a06ef0d4a731a54864d5ccd72f
| 3,498
|
py
|
Python
|
fastestimator/op/tensorop/meta/fuse.py
|
Phillistan16/fastestimator
|
54c9254098aee89520814ed54b6e6016b821424f
|
[
"Apache-2.0"
] | null | null | null |
fastestimator/op/tensorop/meta/fuse.py
|
Phillistan16/fastestimator
|
54c9254098aee89520814ed54b6e6016b821424f
|
[
"Apache-2.0"
] | null | null | null |
fastestimator/op/tensorop/meta/fuse.py
|
Phillistan16/fastestimator
|
54c9254098aee89520814ed54b6e6016b821424f
|
[
"Apache-2.0"
] | 1
|
2020-04-28T12:16:10.000Z
|
2020-04-28T12:16:10.000Z
|
# Copyright 2019 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Any, Dict, List, Optional, Set, TypeVar, Union
import tensorflow as tf
import torch
from fastestimator.network import BaseNetwork
from fastestimator.op.tensorop.tensorop import TensorOp
from fastestimator.util.traceability_util import traceable
from fastestimator.util.util import to_list
Tensor = TypeVar('Tensor', tf.Tensor, torch.Tensor)
Model = TypeVar('Model', tf.keras.Model, torch.nn.Module)
@traceable()
class Fuse(TensorOp):
"""Run a sequence of TensorOps as a single Op.
Args:
ops: A sequence of TensorOps to run. They must all share the same mode. It also doesn't support scheduled ops at
the moment, though the subnet itself may be scheduled.
Raises:
ValueError: If `ops` are invalid.
"""
def __init__(self, ops: Union[TensorOp, List[TensorOp]]) -> None:
ops = to_list(ops)
if len(ops) < 1:
raise ValueError("Fuse requires at least one op")
inputs = []
outputs = []
mode = ops[0].mode
self.last_retain_idx = 0
self.models = set()
self.loss_keys = set()
for idx, op in enumerate(ops):
if op.mode != mode:
raise ValueError(f"All Fuse ops must share the same mode, but got {mode} and {op.mode}")
for inp in op.inputs:
if inp not in inputs and inp not in outputs:
inputs.append(inp)
for out in op.outputs:
if out not in outputs:
outputs.append(out)
if op.fe_retain_graph(True) is not None: # Set all of the internal ops to retain
self.last_retain_idx = idx # Keep tabs on the last one since it might be set to False
self.models |= op.get_fe_models()
self.loss_keys |= op.get_fe_loss_keys()
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.ops = ops
def build(self, framework: str, device: Optional[torch.device] = None) -> None:
for op in self.ops:
op.build(framework, device)
def get_fe_models(self) -> Set[Model]:
return self.models
def get_fe_loss_keys(self) -> Set[str]:
return self.loss_keys
def fe_retain_graph(self, retain: Optional[bool] = None) -> Optional[bool]:
return self.ops[self.last_retain_idx].fe_retain_graph(retain)
def __getstate__(self) -> Dict[str, List[Dict[Any, Any]]]:
return {'ops': [elem.__getstate__() if hasattr(elem, '__getstate__') else {} for elem in self.ops]}
def forward(self, data: List[Tensor], state: Dict[str, Any]) -> List[Tensor]:
data = {key: elem for key, elem in zip(self.inputs, data)}
BaseNetwork._forward_batch(data, state, self.ops)
return [data[key] for key in self.outputs]
| 40.674419
| 120
| 0.641795
|
acfe182f820564d5789e2bba482a4d054167270d
| 24,157
|
py
|
Python
|
tests/sagemaker/mock/test_sagemaker_service_mock.py
|
brucebcampbell/mlflow
|
9aca8e27198f16ce4fa1e7a0a502554f2f81068b
|
[
"Apache-2.0"
] | 10,351
|
2018-07-31T02:52:49.000Z
|
2022-03-31T23:33:13.000Z
|
tests/sagemaker/mock/test_sagemaker_service_mock.py
|
brucebcampbell/mlflow
|
9aca8e27198f16ce4fa1e7a0a502554f2f81068b
|
[
"Apache-2.0"
] | 3,733
|
2018-07-31T01:38:51.000Z
|
2022-03-31T23:56:25.000Z
|
tests/sagemaker/mock/test_sagemaker_service_mock.py
|
brucebcampbell/mlflow
|
9aca8e27198f16ce4fa1e7a0a502554f2f81068b
|
[
"Apache-2.0"
] | 2,596
|
2018-07-31T06:38:39.000Z
|
2022-03-31T23:56:32.000Z
|
import boto3
import pytest
from tests.helper_functions import set_boto_credentials # pylint: disable=unused-import
from tests.sagemaker.mock import mock_sagemaker
@pytest.fixture
def sagemaker_client():
return boto3.client("sagemaker", region_name="us-west-2")
def create_sagemaker_model(sagemaker_client, model_name):
return sagemaker_client.create_model(
ExecutionRoleArn="arn:aws:iam::012345678910:role/sample-role",
ModelName=model_name,
PrimaryContainer={
"Image": "012345678910.dkr.ecr.us-west-2.amazonaws.com/sample-container",
},
)
def create_endpoint_config(sagemaker_client, endpoint_config_name, model_name):
return sagemaker_client.create_endpoint_config(
EndpointConfigName=endpoint_config_name,
ProductionVariants=[
{
"VariantName": "sample-variant",
"ModelName": model_name,
"InitialInstanceCount": 1,
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1.0,
},
],
)
@mock_sagemaker
def test_created_model_is_listed_by_list_models_function(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
models_response = sagemaker_client.list_models()
assert "Models" in models_response
models = models_response["Models"]
assert all(["ModelName" in model for model in models])
assert model_name in [model["ModelName"] for model in models]
@mock_sagemaker
def test_create_model_returns_arn_containing_model_name(sagemaker_client):
model_name = "sample-model"
model_create_response = create_sagemaker_model(
sagemaker_client=sagemaker_client, model_name=model_name
)
assert "ModelArn" in model_create_response
assert model_name in model_create_response["ModelArn"]
@mock_sagemaker
def test_creating_model_with_name_already_in_use_raises_exception(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
with pytest.raises(ValueError):
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
@mock_sagemaker
def test_all_models_are_listed_after_creating_many_models(sagemaker_client):
model_names = []
for i in range(100):
model_name = "sample-model-{idx}".format(idx=i)
model_names.append(model_name)
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
listed_models = sagemaker_client.list_models()["Models"]
listed_model_names = [model["ModelName"] for model in listed_models]
for model_name in model_names:
assert model_name in listed_model_names
@mock_sagemaker
def test_describe_model_response_contains_expected_attributes(sagemaker_client):
model_name = "sample-model"
execution_role_arn = "arn:aws:iam::012345678910:role/sample-role"
primary_container = {
"Image": "012345678910.dkr.ecr.us-west-2.amazonaws.com/sample-container",
}
sagemaker_client.create_model(
ModelName=model_name,
ExecutionRoleArn=execution_role_arn,
PrimaryContainer=primary_container,
)
describe_model_response = sagemaker_client.describe_model(ModelName=model_name)
assert "CreationTime" in describe_model_response
assert "ModelArn" in describe_model_response
assert "ExecutionRoleArn" in describe_model_response
assert describe_model_response["ExecutionRoleArn"] == execution_role_arn
assert "ModelName" in describe_model_response
assert describe_model_response["ModelName"] == model_name
assert "PrimaryContainer" in describe_model_response
assert describe_model_response["PrimaryContainer"] == primary_container
@mock_sagemaker
def test_describe_model_throws_exception_for_nonexistent_model(sagemaker_client):
with pytest.raises(ValueError):
sagemaker_client.describe_model(ModelName="nonexistent-model")
@mock_sagemaker
def test_model_is_no_longer_listed_after_deletion(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
sagemaker_client.delete_model(ModelName=model_name)
listed_models = sagemaker_client.list_models()["Models"]
listed_model_names = [model["ModelName"] for model in listed_models]
assert model_name not in listed_model_names
@mock_sagemaker
def test_created_endpoint_config_is_listed_by_list_endpoints_function(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_configs_response = sagemaker_client.list_endpoint_configs()
assert "EndpointConfigs" in endpoint_configs_response
endpoint_configs = endpoint_configs_response["EndpointConfigs"]
assert all(["EndpointConfigName" in endpoint_config for endpoint_config in endpoint_configs])
assert endpoint_config_name in [
endpoint_config["EndpointConfigName"] for endpoint_config in endpoint_configs
]
@mock_sagemaker
def test_create_endpoint_config_returns_arn_containing_config_name(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_config_response = create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
assert "EndpointConfigArn" in create_config_response
assert endpoint_config_name in create_config_response["EndpointConfigArn"]
@mock_sagemaker
def test_creating_endpoint_config_with_name_already_in_use_raises_exception(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
with pytest.raises(ValueError):
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
@mock_sagemaker
def test_all_endpoint_configs_are_listed_after_creating_many_configs(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_names = []
for i in range(100):
endpoint_config_name = "sample-config-{idx}".format(idx=i)
endpoint_config_names.append(endpoint_config_name)
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
listed_endpoint_configs = sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
listed_endpoint_config_names = [
endpoint_config["EndpointConfigName"] for endpoint_config in listed_endpoint_configs
]
for endpoint_config_name in endpoint_config_names:
assert endpoint_config_name in listed_endpoint_config_names
@mock_sagemaker
def test_describe_endpoint_config_response_contains_expected_attributes(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
production_variants = [
{
"VariantName": "sample-variant",
"ModelName": model_name,
"InitialInstanceCount": 1,
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1.0,
},
]
sagemaker_client.create_endpoint_config(
EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants,
)
describe_endpoint_config_response = sagemaker_client.describe_endpoint_config(
EndpointConfigName=endpoint_config_name
)
assert "CreationTime" in describe_endpoint_config_response
assert "EndpointConfigArn" in describe_endpoint_config_response
assert "EndpointConfigName" in describe_endpoint_config_response
assert describe_endpoint_config_response["EndpointConfigName"] == endpoint_config_name
assert "ProductionVariants" in describe_endpoint_config_response
assert describe_endpoint_config_response["ProductionVariants"] == production_variants
@mock_sagemaker
def test_describe_endpoint_config_throws_exception_for_nonexistent_config(sagemaker_client):
with pytest.raises(ValueError):
sagemaker_client.describe_endpoint_config(EndpointConfigName="nonexistent-config")
@mock_sagemaker
def test_endpoint_config_is_no_longer_listed_after_deletion(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
sagemaker_client.delete_endpoint_config(EndpointConfigName=endpoint_config_name)
listed_endpoint_configs = sagemaker_client.list_endpoint_configs()["EndpointConfigs"]
listed_endpoint_config_names = [
endpoint_config["EndpointConfigName"] for endpoint_config in listed_endpoint_configs
]
assert endpoint_config_name not in listed_endpoint_config_names
@mock_sagemaker
def test_created_endpoint_is_listed_by_list_endpoints_function(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name,
EndpointName=endpoint_name,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
endpoints_response = sagemaker_client.list_endpoints()
assert "Endpoints" in endpoints_response
endpoints = endpoints_response["Endpoints"]
assert all(["EndpointName" in endpoint for endpoint in endpoints])
assert endpoint_name in [endpoint["EndpointName"] for endpoint in endpoints]
@mock_sagemaker
def test_create_endpoint_returns_arn_containing_endpoint_name(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
create_endpoint_response = sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name,
EndpointName=endpoint_name,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
assert "EndpointArn" in create_endpoint_response
assert endpoint_name in create_endpoint_response["EndpointArn"]
@mock_sagemaker
def test_creating_endpoint_with_name_already_in_use_raises_exception(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name,
EndpointName=endpoint_name,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
with pytest.raises(ValueError):
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name,
EndpointName=endpoint_name,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
@mock_sagemaker
def test_all_endpoint_are_listed_after_creating_many_endpoints(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_names = []
for i in range(100):
endpoint_name = "sample-endpoint-{idx}".format(idx=i)
endpoint_names.append(endpoint_name)
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name,
EndpointName=endpoint_name,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
listed_endpoints = sagemaker_client.list_endpoints()["Endpoints"]
listed_endpoint_names = [endpoint["EndpointName"] for endpoint in listed_endpoints]
for endpoint_name in endpoint_names:
assert endpoint_name in listed_endpoint_names
@mock_sagemaker
def test_describe_endpoint_response_contains_expected_attributes(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
production_variants = [
{
"VariantName": "sample-variant",
"ModelName": model_name,
"InitialInstanceCount": 1,
"InstanceType": "ml.m4.xlarge",
"InitialVariantWeight": 1.0,
},
]
sagemaker_client.create_endpoint_config(
EndpointConfigName=endpoint_config_name, ProductionVariants=production_variants,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointName=endpoint_name, EndpointConfigName=endpoint_config_name,
)
describe_endpoint_response = sagemaker_client.describe_endpoint(EndpointName=endpoint_name)
assert "CreationTime" in describe_endpoint_response
assert "LastModifiedTime" in describe_endpoint_response
assert "EndpointArn" in describe_endpoint_response
assert "EndpointStatus" in describe_endpoint_response
assert "ProductionVariants" in describe_endpoint_response
@mock_sagemaker
def test_describe_endpoint_throws_exception_for_nonexistent_endpoint(sagemaker_client):
with pytest.raises(ValueError):
sagemaker_client.describe_endpoint(EndpointName="nonexistent-endpoint")
@mock_sagemaker
def test_endpoint_is_no_longer_listed_after_deletion(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name, EndpointName=endpoint_name,
)
sagemaker_client.delete_endpoint(EndpointName=endpoint_name)
listed_endpoints = sagemaker_client.list_endpoints()["Endpoints"]
listed_endpoint_names = [endpoint["EndpointName"] for endpoint in listed_endpoints]
assert endpoint_name not in listed_endpoint_names
@mock_sagemaker
def test_update_endpoint_modifies_config_correctly(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
first_endpoint_config_name = "sample-config-1"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=first_endpoint_config_name,
model_name=model_name,
)
second_endpoint_config_name = "sample-config-2"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=second_endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointConfigName=first_endpoint_config_name, EndpointName=endpoint_name,
)
first_describe_endpoint_response = sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)
assert first_describe_endpoint_response["EndpointConfigName"] == first_endpoint_config_name
sagemaker_client.update_endpoint(
EndpointName=endpoint_name, EndpointConfigName=second_endpoint_config_name
)
second_describe_endpoint_response = sagemaker_client.describe_endpoint(
EndpointName=endpoint_name
)
assert second_describe_endpoint_response["EndpointConfigName"] == second_endpoint_config_name
@mock_sagemaker
def test_update_endpoint_with_nonexistent_config_throws_exception(sagemaker_client):
model_name = "sample-model-name"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
endpoint_config_name = "sample-config"
create_endpoint_config(
sagemaker_client=sagemaker_client,
endpoint_config_name=endpoint_config_name,
model_name=model_name,
)
endpoint_name = "sample-endpoint"
sagemaker_client.create_endpoint(
EndpointConfigName=endpoint_config_name, EndpointName=endpoint_name,
)
with pytest.raises(ValueError):
sagemaker_client.update_endpoint(
EndpointName=endpoint_name, EndpointConfigName="nonexistent-config"
)
@mock_sagemaker
def test_created_transform_job_is_listed_by_list_transform_jobs_function(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
transform_input = {
"DataSource": {"S3DataSource": {"S3DataType": "Some Data Type", "S3Uri": "Some Input Uri"}}
}
transform_output = {"S3OutputPath": "Some Output Path"}
transform_resources = {"InstanceType": "Some Instance Type", "InstanceCount": 1}
job_name = "sample-job"
sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
transform_jobs_response = sagemaker_client.list_transform_jobs()
assert "TransformJobSummaries" in transform_jobs_response
transform_jobs = transform_jobs_response["TransformJobSummaries"]
assert all(["TransformJobName" in transform_job for transform_job in transform_jobs])
assert job_name in [transform_job["TransformJobName"] for transform_job in transform_jobs]
@mock_sagemaker
def test_create_transform_job_returns_arn_containing_transform_job_name(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
transform_input = {
"DataSource": {"S3DataSource": {"S3DataType": "Some Data Type", "S3Uri": "Some Input Uri"}}
}
transform_output = {"S3OutputPath": "Some Output Path"}
transform_resources = {"InstanceType": "Some Instance Type", "InstanceCount": 1}
job_name = "sample-job"
create_transform_job_response = sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
assert "TransformJobArn" in create_transform_job_response
assert job_name in create_transform_job_response["TransformJobArn"]
@mock_sagemaker
def test_creating_transform_job_with_name_already_in_use_raises_exception(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
transform_input = {
"DataSource": {"S3DataSource": {"S3DataType": "Some Data Type", "S3Uri": "Some Input Uri"}}
}
transform_output = {"S3OutputPath": "Some Output Path"}
transform_resources = {"InstanceType": "Some Instance Type", "InstanceCount": 1}
job_name = "sample-job"
sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
with pytest.raises(ValueError):
sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
@mock_sagemaker
def test_all_transform_jobs_are_listed_after_creating_many_transform_jobs(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
transform_input = {
"DataSource": {"S3DataSource": {"S3DataType": "Some Data Type", "S3Uri": "Some Input Uri"}}
}
transform_output = {"S3OutputPath": "Some Output Path"}
transform_resources = {"InstanceType": "Some Instance Type", "InstanceCount": 1}
job_names = []
for i in range(100):
job_name = "sample-job-{idx}".format(idx=i)
job_names.append(job_name)
sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
listed_transform_jobs = sagemaker_client.list_transform_jobs()["TransformJobSummaries"]
listed_transform_job_names = [
transform_job["TransformJobName"] for transform_job in listed_transform_jobs
]
for job_name in job_names:
assert job_name in listed_transform_job_names
@mock_sagemaker
def test_describe_transform_job_response_contains_expected_attributes(sagemaker_client):
model_name = "sample-model"
create_sagemaker_model(sagemaker_client=sagemaker_client, model_name=model_name)
transform_input = {
"DataSource": {"S3DataSource": {"S3DataType": "Some Data Type", "S3Uri": "Some Input Uri"}}
}
transform_output = {"S3OutputPath": "Some Output Path"}
transform_resources = {"InstanceType": "Some Instance Type", "InstanceCount": 1}
job_name = "sample-job"
sagemaker_client.create_transform_job(
TransformJobName=job_name,
ModelName=model_name,
TransformInput=transform_input,
TransformOutput=transform_output,
TransformResources=transform_resources,
Tags=[{"Key": "Some Key", "Value": "Some Value"}],
)
describe_transform_job_response = sagemaker_client.describe_transform_job(
TransformJobName=job_name
)
assert "TransformJobName" in describe_transform_job_response
assert "CreationTime" in describe_transform_job_response
assert "TransformJobArn" in describe_transform_job_response
assert "TransformJobStatus" in describe_transform_job_response
assert "ModelName" in describe_transform_job_response
@mock_sagemaker
def test_describe_transform_job_throws_exception_for_nonexistent_transform_job(sagemaker_client):
with pytest.raises(ValueError):
sagemaker_client.describe_transform_job(TransformJobName="nonexistent-job")
| 36.109118
| 99
| 0.754729
|
acfe186166ae756c23073434c5dc7207b0905a2a
| 7,815
|
py
|
Python
|
examples/dockpot/hpfeeds/hpfeeds.py
|
connectthefuture/docker-hacks
|
d7ea13522188233d5e8a97179d2b0a872239f58d
|
[
"MIT"
] | null | null | null |
examples/dockpot/hpfeeds/hpfeeds.py
|
connectthefuture/docker-hacks
|
d7ea13522188233d5e8a97179d2b0a872239f58d
|
[
"MIT"
] | 1
|
2021-03-20T04:49:20.000Z
|
2021-03-20T04:49:20.000Z
|
examples/dockpot/hpfeeds/hpfeeds.py
|
connectthefuture/docker-hacks
|
d7ea13522188233d5e8a97179d2b0a872239f58d
|
[
"MIT"
] | null | null | null |
from twisted.python import log
from twisted.internet import threads
import os
import struct
import hashlib
import json
import socket
import uuid
import datetime
BUFSIZ = 16384
OP_ERROR = 0
OP_INFO = 1
OP_AUTH = 2
OP_PUBLISH = 3
OP_SUBSCRIBE = 4
MAXBUF = 1024**2
SIZES = {
OP_ERROR: 5+MAXBUF,
OP_INFO: 5+256+20,
OP_AUTH: 5+256+20,
OP_PUBLISH: 5+MAXBUF,
OP_SUBSCRIBE: 5+256*2,
}
HONSSHAUTHCHAN = 'honssh.auth'
HONSSHSESHCHAN = 'honssh.sessions'
class BadClient(Exception):
pass
# packs a string with 1 byte length field
def strpack8(x):
if isinstance(x, str): x = x.encode('latin1')
return struct.pack('!B', len(x)) + x
# unpacks a string with 1 byte length field
def strunpack8(x):
l = x[0]
return x[1:1+l], x[1+l:]
def msghdr(op, data):
return struct.pack('!iB', 5+len(data), op) + data
def msgpublish(ident, chan, data):
return msghdr(OP_PUBLISH, strpack8(ident) + strpack8(chan) + data)
def msgsubscribe(ident, chan):
if isinstance(chan, str): chan = chan.encode('latin1')
return msghdr(OP_SUBSCRIBE, strpack8(ident) + chan)
def msgauth(rand, ident, secret):
hash = hashlib.sha1(bytes(rand)+secret).digest()
return msghdr(OP_AUTH, strpack8(ident) + hash)
class FeedUnpack(object):
def __init__(self):
self.buf = bytearray()
def __iter__(self):
return self
def next(self):
return self.unpack()
def feed(self, data):
self.buf.extend(data)
def unpack(self):
if len(self.buf) < 5:
raise StopIteration('No message.')
ml, opcode = struct.unpack('!iB', buffer(self.buf,0,5))
if ml > SIZES.get(opcode, MAXBUF):
raise BadClient('Not respecting MAXBUF.')
if len(self.buf) < ml:
raise StopIteration('No message.')
data = bytearray(buffer(self.buf, 5, ml-5))
del self.buf[:ml]
return opcode, data
class hpclient(object):
def __init__(self, server, port, ident, secret):
log.msg('[HPFEEDS] - hpfeeds client init broker {0}:{1}, identifier {2}'.format(server, port, ident))
self.server, self.port = server, int(port)
self.ident, self.secret = ident.encode('latin1'), secret.encode('latin1')
self.unpacker = FeedUnpack()
self.state = 'INIT'
self.connect()
self.sendfiles = []
self.filehandle = None
def connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.settimeout(3)
try: self.s.connect((self.server, self.port))
except:
log.msg('[HPFEEDS] - hpfeeds client could not connect to broker.')
self.s = None
else:
self.s.settimeout(None)
self.handle_established()
def send(self, data):
if not self.s: return
self.s.send(data)
def close(self):
self.s.close()
self.s = None
def handle_established(self):
log.msg('[HPFEEDS] - hpclient established')
while self.state != 'GOTINFO':
self.read()
#quickly try to see if there was an error message
self.s.settimeout(0.5)
self.read()
self.s.settimeout(None)
def read(self):
if not self.s: return
try: d = self.s.recv(BUFSIZ)
except socket.timeout:
return
if not d:
self.close()
return
self.unpacker.feed(d)
try:
for opcode, data in self.unpacker:
log.msg('[HPFEEDS] - hpclient msg opcode {0} data {1}'.format(opcode, data))
if opcode == OP_INFO:
name, rand = strunpack8(data)
log.msg('[HPFEEDS] - hpclient server name {0} rand {1}'.format(name, rand))
self.send(msgauth(rand, self.ident, self.secret))
self.state = 'GOTINFO'
elif opcode == OP_PUBLISH:
ident, data = strunpack8(data)
chan, data = strunpack8(data)
log.msg('[HPFEEDS] - publish to {0} by {1}: {2}'.format(chan, ident, data))
elif opcode == OP_ERROR:
log.err('[HPFEEDS] - errormessage from server: {0}'.format(data))
else:
log.err('[HPFEEDS] - unknown opcode message: {0}'.format(opcode))
except BadClient:
log.err('[HPFEEDS] - unpacker error, disconnecting.')
self.close()
def publish(self, channel, **kwargs):
try:
self.send(msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
except Exception, e:
log.err('[HPFEEDS] - connection to hpfriends lost: {0}'.format(e))
log.err('[HPFEEDS] - connecting')
self.connect()
self.send(msgpublish(self.ident, channel, json.dumps(kwargs).encode('latin1')))
def sendfile(self, filepath):
# does not read complete binary into memory, read and send chunks
if not self.filehandle:
self.sendfileheader(i.file)
self.sendfiledata()
else: self.sendfiles.append(filepath)
def sendfileheader(self, filepath):
self.filehandle = open(filepath, 'rb')
fsize = os.stat(filepath).st_size
headc = strpack8(self.ident) + strpack8(UNIQUECHAN)
headh = struct.pack('!iB', 5+len(headc)+fsize, OP_PUBLISH)
self.send(headh + headc)
def sendfiledata(self):
tmp = self.filehandle.read(BUFSIZ)
if not tmp:
if self.sendfiles:
fp = self.sendfiles.pop(0)
self.sendfileheader(fp)
else:
self.filehandle = None
self.handle_io_in(b'')
else:
self.send(tmp)
class HPLogger():
def start(self, cfg):
log.msg('[HPFEEDS] - hpfeeds DBLogger start')
server = cfg.get('hpfeeds', 'server')
port = cfg.get('hpfeeds', 'port')
ident = cfg.get('hpfeeds', 'identifier')
secret = cfg.get('hpfeeds', 'secret')
return hpclient(server, port, ident, secret)
def setClient(self, hpClient, cfg):
self.sensor_name = cfg.get('honeypot','sensor_name')
self.client = hpClient
def createSession(self, session, peerIP, peerPort, hostIP, hostPort):
self.sessionMeta = { 'sensor_name': self.sensor_name, 'uuid': session, 'startTime': self.getDateTime(), 'channels': [] }
self.sessionMeta['connection'] = {'peerIP': peerIP, 'peerPort': peerPort, 'hostIP': hostIP, 'hostPort': hostPort, 'version': None}
return session
def handleConnectionLost(self):
log.msg('[HPFEEDS] - publishing metadata to hpfeeds')
meta = self.sessionMeta
meta['endTime'] = self.getDateTime()
log.msg("[HPFEEDS] - sessionMeta: " + str(meta))
threads.deferToThread(self.client.publish, HONSSHSESHCHAN, **meta)
def handleLoginFailed(self, username, password):
authMeta = {'sensor_name': self.sensor_name, 'datetime': self.getDateTime(),'username': username, 'password': password, 'success': False}
log.msg('[HPFEEDS] - authMeta: ' + str(authMeta))
threads.deferToThread(self.client.publish, HONSSHAUTHCHAN, **authMeta)
def handleLoginSucceeded(self, username, password):
authMeta = {'sensor_name': self.sensor_name, 'datetime': self.getDateTime(),'username': username, 'password': password, 'success': True}
log.msg('[HPFEEDS] - authMeta: ' + str(authMeta))
threads.deferToThread(self.client.publish, HONSSHAUTHCHAN, **authMeta)
def channelOpened(self, uuid, channelName):
self.sessionMeta['channels'].append({'name': channelName, 'uuid': uuid, 'startTime': self.getDateTime(), 'commands': []})
def channelClosed(self, uuid, ttylog=None):
chan = self.findChannel(uuid)
chan['endTime'] = self.getDateTime()
if ttylog != None:
fp = open(ttylog, 'rb')
ttydata = fp.read()
fp.close()
chan['ttylog'] = ttydata.encode('hex')
def handleCommand(self, uuid, command):
chan = self.findChannel(uuid)
chan['commands'].append([self.getDateTime(), command])
def handleClientVersion(self, version):
self.sessionMeta['connection']['version'] = version
def getDateTime(self):
return datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")[:-3]
def findChannel(self, uuid):
for chan in self.sessionMeta['channels']:
if chan['uuid'] == uuid:
return chan
| 31.135458
| 145
| 0.656174
|
acfe1892bec2764dc6fc618080257041386e1545
| 13,734
|
py
|
Python
|
signbank/registration/forms.py
|
codev/bslsignbank
|
5c8f48c4aa14f48483b38f922f4bc4a4d4eda33e
|
[
"BSD-3-Clause"
] | 4
|
2018-01-27T17:28:59.000Z
|
2019-11-06T17:59:33.000Z
|
signbank/registration/forms.py
|
codev/bslsignbank
|
5c8f48c4aa14f48483b38f922f4bc4a4d4eda33e
|
[
"BSD-3-Clause"
] | 2
|
2020-02-12T00:09:31.000Z
|
2020-06-05T17:51:51.000Z
|
signbank/registration/forms.py
|
codev/bslsignbank
|
5c8f48c4aa14f48483b38f922f4bc4a4d4eda33e
|
[
"BSD-3-Clause"
] | null | null | null |
"""
Forms and validation code for user registration.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
from django.conf import settings
from models import RegistrationProfile, UserProfile
import re
alnum_re = re.compile(r'^\w+$')
# I put this on all required fields, because it's easier to pick up
# on them with CSS or JavaScript if they have a class of "required"
# in the HTML. Your mileage may vary. If/when Django ticket #3515
# lands in trunk, this will no longer be necessary.
attrs_reqd = { 'class': 'required form-control' }
attrs_default = {'class': 'form-control'}
class RegistrationForm(forms.Form):
"""
Form for registering a new user account.
Validates that the request username is not already in use, and
requires the password to be entered twice to catch typos.
Subclasses should feel free to add any additional validation they
need, but should either preserve the base ``save()`` or implement
a ``save()`` which accepts the ``profile_callback`` keyword
argument and passes it through to
``RegistrationProfile.objects.create_inactive_user()``.
"""
username = forms.CharField(max_length=30,
widget=forms.TextInput(attrs=attrs_reqd),
label=_(u'Username'))
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_reqd,
maxlength=75)
),
label=_(u'Your Email Address'))
password1 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_reqd),
label=_(u'Password'))
password2 = forms.CharField(widget=forms.PasswordInput(attrs=attrs_reqd),
label=_(u'Password (again)'))
def clean_username(self):
"""
Validates that the username is alphanumeric and is not already
in use.
"""
try:
user = User.objects.get(username__exact=self.cleaned_data['username'])
except User.DoesNotExist:
return self.cleaned_data['username']
raise forms.ValidationError(_(u'This username is already taken. Please choose another.'))
def clean_password2(self):
"""
Validates that the two password inputs match.
"""
if 'password1' in self.cleaned_data and 'password2' in self.cleaned_data:
if self.cleaned_data['password1'] == self.cleaned_data['password2']:
return self.cleaned_data['password2']
raise forms.ValidationError(_(u'You must type the same password each time'))
def save(self, profile_callback=None):
"""
Creates the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
This is essentially a light wrapper around
``RegistrationProfile.objects.create_inactive_user()``,
feeding it the form data and a profile callback (see the
documentation on ``create_inactive_user()`` for details) if
supplied.
"""
new_user = RegistrationProfile.objects.create_inactive_user(username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
profile_callback=profile_callback)
barf
return new_user
class RegistrationFormTermsOfService(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which adds a required checkbox
for agreeing to a site's Terms of Service.
"""
tos = forms.BooleanField(widget=forms.CheckboxInput(attrs=attrs_reqd),
label=_(u'I have read and agree to the Terms of Service'))
def clean_tos(self):
"""
Validates that the user accepted the Terms of Service.
"""
if self.cleaned_data.get('tos', False):
return self.cleaned_data['tos']
raise forms.ValidationError(_(u'You must agree to the terms to register'))
class RegistrationFormUniqueEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which enforces uniqueness of
email addresses.
"""
def clean_email(self):
"""
Validates that the supplied email address is unique for the
site.
"""
try:
user = User.objects.get(email__exact=self.cleaned_data['email'])
except User.DoesNotExist:
return self.cleaned_data['email']
raise forms.ValidationError(_(u'This email address is already in use. Please supply a different email address.'))
class RegistrationFormNoFreeEmail(RegistrationForm):
"""
Subclass of ``RegistrationForm`` which disallows registration with
email addresses from popular free webmail services; moderately
useful for preventing automated spam registrations.
To change the list of banned domains, subclass this form and
override the attribute ``bad_domains``.
"""
bad_domains = ['aim.com', 'aol.com', 'email.com', 'gmail.com',
'googlemail.com', 'hotmail.com', 'hushmail.com',
'msn.com', 'mail.ru', 'mailinator.com', 'live.com']
def clean_email(self):
"""
Checks the supplied email address against a list of known free
webmail domains.
"""
email_domain = self.cleaned_data['email'].split('@')[1]
if email_domain in self.bad_domains:
raise forms.ValidationError(_(u'Registration using free email addresses is prohibited. Please supply a different email address.'))
return self.cleaned_data['email']
import re
import time
class BirthYearField(forms.Field):
"""A form field for entry of a year of birth,
must be before this year and not more than 110 years ago"""
year_re = re.compile("\d\d\d\d")
def clean(self, value):
if not value:
raise forms.ValidationError('Enter a four digit year, eg. 1984.')
if not self.year_re.match(str(value)):
raise forms.ValidationError('%s is not a valid year.' % value )
year = int(value)
# check not after this year
thisyear = time.localtime()[0]
if year > thisyear:
raise forms.ValidationError("%s is in the future, please enter your year of birth." % value )
# or that this person isn't over 110
if year < thisyear-110:
raise forms.ValidationError("If you were born in %s you are now %s years old! Please enter your real birth year." % (year, thisyear-year))
return year
from models import backgroundChoices, learnedChoices, schoolChoices, teachercommChoices
yesnoChoices = ((1, 'yes'), (0, 'no'))
import string
def t(message):
"""Replace $country and $language in message with dat from settings"""
tpl = string.Template(message)
return tpl.substitute(country=settings.COUNTRY_NAME, language=settings.LANGUAGE_NAME)
class RegistrationFormAuslan(RegistrationFormUniqueEmail):
"""
Registration form for the site
"""
username = forms.CharField(widget=forms.HiddenInput, required=False)
firstname = forms.CharField(label=t("Firstname"), max_length=50)
lastname = forms.CharField(label=t("Lastname"), max_length=50)
yob = BirthYearField(label=t("What year were you born?"))
australian = forms.ChoiceField(yesnoChoices, label=t("Do you live in ${country}?"))
postcode = forms.CharField(label=t("If you live in $country, what is your postcode?"),
max_length=20, required=False)
background = forms.MultipleChoiceField(backgroundChoices, label=_("Which of the following best describes you?"))
researcher_credentials = forms.CharField(label=t("(OPTIONAL) If you would like access to advanced SignBank features, e.g. advanced search and detail view of signs, please give evidence of your researcher status here (e.g. link to your university staff profile page, or evidence that you are a research student)."), widget=forms.Textarea, required=False)
auslan_user = forms.ChoiceField(yesnoChoices, label=t("Do you use $language?"), required=False)
learned = forms.ChoiceField(label=t("If you use $language, when did you learn sign language?"),
choices=learnedChoices, required=False)
deaf = forms.ChoiceField(yesnoChoices, label=t("Are you a deaf person?"))
schooltype = forms.ChoiceField(label=t("What sort of school do you (or did you) attend?"),
choices=schoolChoices, required=False)
school = forms.CharField(label=t("Which school do you (or did you) attend?"),
max_length=50, required=False)
teachercomm = forms.ChoiceField(label=t("How do (or did) your teachers communicate with you?"),
choices=teachercommChoices,
required=False)
def save(self, profile_callback=None):
"""
Creates the new ``User`` and ``RegistrationProfile``, and
returns the ``User``.
Also create the userprofile with additional info from the form.
Differs from the default by using the email address as the username.
"""
# construct a username based on the email address
# need to truncate to 30 chars
username = self.cleaned_data['email'].replace('@','').replace('.','')
username = username[:30]
# Get the indices of the selected backgrounds to help decide if this is a researcher
background_list = ",".join(self.cleaned_data['background'])
new_user = RegistrationProfile.objects.create_inactive_user(username=username,
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'],
firstname=self.cleaned_data['firstname'],
lastname=self.cleaned_data['lastname'],
profile_callback=profile_callback,
is_researcher=UserProfile.is_researcher_from_background(background_list))
# now also create the userprofile for this user with
# the extra information from the form
profile = UserProfile(user=new_user,
yob=self.cleaned_data['yob'],
australian=self.cleaned_data['australian'] == '1',
postcode=self.cleaned_data['postcode'],
background=background_list,
researcher_credentials=self.cleaned_data['researcher_credentials'],
auslan_user=self.cleaned_data['auslan_user'] == '1',
learned=self.cleaned_data['learned'],
deaf=self.cleaned_data['deaf'] == '1',
schooltype=self.cleaned_data['schooltype'],
school=self.cleaned_data['school'],
teachercomm=self.cleaned_data['teachercomm'],
data_protection_agree=True)
profile.save()
return new_user
from django.views.decorators.cache import never_cache
from django.contrib.auth import authenticate
class EmailAuthenticationForm(forms.Form):
"""
Base class for authenticating users. Extend this to get a form that accepts
username/password logins.
"""
email = forms.CharField(label=_("Email"), max_length=100)
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput)
def __init__(self, request=None, *args, **kwargs):
"""
If request is passed in, the form will validate that cookies are
enabled. Note that the request (a HttpRequest object) must have set a
cookie with the key TEST_COOKIE_NAME and value TEST_COOKIE_VALUE before
running this validation.
"""
self.request = request
self.user_cache = None
super(EmailAuthenticationForm, self).__init__(*args, **kwargs)
def clean(self):
email = self.cleaned_data.get('email')
password = self.cleaned_data.get('password')
if email and password:
self.user_cache = authenticate(username=email, password=password)
if self.user_cache is None:
raise forms.ValidationError(_("Please enter a correct email and password. Note that password is case-sensitive."))
elif not self.user_cache.is_active:
raise forms.ValidationError(_("This account is inactive."))
# TODO: determine whether this should move to its own method.
if self.request:
if not self.request.session.test_cookie_worked():
raise forms.ValidationError(_("Your Web browser doesn't appear to have cookies enabled. Cookies are required for logging in."))
return self.cleaned_data
def get_user_id(self):
if self.user_cache:
return self.user_cache.id
return None
def get_user(self):
return self.user_cache
| 41.36747
| 357
| 0.612567
|
acfe1a62d91c8939cc602b13dfc188765a31c4ff
| 11,234
|
py
|
Python
|
futu/quote/quote_response_handler.py
|
zhuzhenping/py-futu-api
|
540cf951738e387fd001064a76ceef6284c75d41
|
[
"Apache-2.0"
] | 1
|
2021-01-10T00:54:39.000Z
|
2021-01-10T00:54:39.000Z
|
futu/quote/quote_response_handler.py
|
GOGOYAO/py-futu-api
|
540cf951738e387fd001064a76ceef6284c75d41
|
[
"Apache-2.0"
] | null | null | null |
futu/quote/quote_response_handler.py
|
GOGOYAO/py-futu-api
|
540cf951738e387fd001064a76ceef6284c75d41
|
[
"Apache-2.0"
] | 1
|
2021-02-17T17:46:36.000Z
|
2021-02-17T17:46:36.000Z
|
# -*- coding: utf-8 -*-
import pandas as pd
from futu.common import RspHandlerBase
from futu.quote.quote_query import *
class StockQuoteHandlerBase(RspHandlerBase):
"""
异步处理推送的订阅股票的报价。
.. code:: python
class StockQuoteTest(StockQuoteHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, content = super(StockQuoteTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("StockQuoteTest: error, msg: %s" % content)
return RET_ERROR, content
print("StockQuoteTest ", content) # StockQuoteTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, quote_list = StockQuoteQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, quote_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时报价推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_stock_quote的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'data_date', 'data_time', 'last_price', 'open_price',
'high_price', 'low_price', 'prev_close_price', 'volume',
'turnover', 'turnover_rate', 'amplitude', 'suspension',
'listing_date', 'price_spread', 'dark_status', 'sec_status', 'strike_price',
'contract_size', 'open_interest', 'implied_volatility',
'premium', 'delta', 'gamma', 'vega', 'theta', 'rho',
'net_open_interest', 'expiry_date_distance', 'contract_nominal_value',
'owner_lot_multiplier', 'option_area_type', 'contract_multiplier',
]
col_list.extend(row[0] for row in pb_field_map_PreAfterMarketData_pre)
col_list.extend(row[0] for row in pb_field_map_PreAfterMarketData_after)
quote_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, quote_frame_table
class OrderBookHandlerBase(RspHandlerBase):
"""
异步处理推送的实时摆盘。
.. code:: python
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(OrderBookTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("OrderBookTest: error, msg: %s" % data)
return RET_ERROR, data
print("OrderBookTest ", data) # OrderBookTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, order_book = OrderBookQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, order_book
def on_recv_rsp(self, rsp_pb):
"""
在收到实摆盘数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_order_book的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code == RET_OK:
self.on_recv_log(content)
return ret_code, content
class CurKlineHandlerBase(RspHandlerBase):
"""
异步处理推送的k线数据。
.. code:: python
class CurKlineTest(CurKlineHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(CurKlineTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("CurKlineTest ", data) # CurKlineTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, kline_list = CurKlinePush.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, kline_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时k线数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_cur_kline的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time_key', 'open', 'close', 'high', 'low', 'volume',
'turnover', 'k_type'
]
kline_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, kline_frame_table
class TickerHandlerBase(RspHandlerBase):
"""
异步处理推送的逐笔数据。
.. code:: python
class TickerTest(TickerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(TickerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("CurKlineTest: error, msg: %s" % data)
return RET_ERROR, data
print("TickerTest ", data) # TickerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, ticker_list = TickerQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, ticker_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_ticker的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
self.on_recv_log(content)
col_list = [
'code', 'time', 'price', 'volume', 'turnover',
"ticker_direction", 'sequence', 'type', 'push_data_type',
]
ticker_frame_table = pd.DataFrame(content, columns=col_list)
return RET_OK, ticker_frame_table
class RTDataHandlerBase(RspHandlerBase):
"""
异步处理推送的分时数据。
.. code:: python
class RTDataTest(RTDataHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(RTDataTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("RTDataTest: error, msg: %s" % data)
return RET_ERROR, data
print("RTDataTest ", data) # RTDataTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, rt_data_list = RtDataQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, rt_data_list
def on_recv_rsp(self, rsp_pb):
"""
在收到实时逐笔数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 参见get_rt_data的返回值
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content
else:
col_list = [
'code', 'time', 'is_blank', 'opened_mins', 'cur_price',
"last_close", 'avg_price', 'turnover', 'volume'
]
rt_data_table = pd.DataFrame(content, columns=col_list)
return RET_OK, rt_data_table
class BrokerHandlerBase(RspHandlerBase):
"""
异步处理推送的经纪数据。
.. code:: python
class BrokerTest(BrokerHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(BrokerTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("BrokerTest: error, msg: %s" % data)
return RET_ERROR, data
print("BrokerTest ", data) # BrokerTest自己的处理逻辑
return RET_OK, content
"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, (stock_code, bid_content,
ask_content) = BrokerQueueQuery.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, (stock_code, bid_content, ask_content)
def on_recv_rsp(self, rsp_pb):
"""
在收到实时经纪数据推送后会回调到该函数,使用者需要在派生类中覆盖此方法
注意该回调是在独立子线程中
:param rsp_pb: 派生类中不需要直接处理该参数
:return: 成功时返回(RET_OK, stock_code, [bid_frame_table, ask_frame_table]), 相关frame table含义见 get_broker_queue_ 的返回值说明
失败时返回(RET_ERROR, ERR_MSG, None)
"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
if ret_code != RET_OK:
return ret_code, content, None
else:
self.on_recv_log(content)
stock_code, bid_content, ask_content = content
bid_list = [
'code', 'bid_broker_id', 'bid_broker_name', 'bid_broker_pos'
]
ask_list = [
'code', 'ask_broker_id', 'ask_broker_name', 'ask_broker_pos'
]
bid_frame_table = pd.DataFrame(bid_content, columns=bid_list)
ask_frame_table = pd.DataFrame(ask_content, columns=ask_list)
return ret_code, stock_code, [bid_frame_table, ask_frame_table]
class KeepAliveHandlerBase(RspHandlerBase):
"""Base class for handling KeepAlive"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, msg, alive_time = KeepAlive.unpack_rsp(rsp_pb)
if ret_code != RET_OK:
return ret_code, msg
else:
return RET_OK, alive_time
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class SysNotifyHandlerBase(RspHandlerBase):
"""sys notify"""
@classmethod
def parse_rsp_pb(cls, rsp_pb):
ret_code, content = SysNotifyPush.unpack_rsp(rsp_pb)
return ret_code, content
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, content = self.parse_rsp_pb(rsp_pb)
return ret_code, content
class AsyncHandler_InitConnect(RspHandlerBase):
""" AsyncHandler_TrdSubAccPush"""
def __init__(self, notify_obj=None):
self._notify_obj = notify_obj
super(AsyncHandler_InitConnect, self).__init__()
def on_recv_rsp(self, rsp_pb):
"""receive response callback function"""
ret_code, msg, conn_info_map = InitConnect.unpack_rsp(rsp_pb)
if self._notify_obj is not None:
self._notify_obj.on_async_init_connect(
ret_code, msg, conn_info_map)
return ret_code, msg
#
# class OrderDetailHandlerBase(RspHandlerBase):
# def __init__(self):
# super(OrderDetailHandlerBase, self).__init__()
#
# def on_recv_rsp(self, rsp_pb):
# """receive response callback function"""
# ret_code, msg, data = OrderDetail.unpack_rsp(rsp_pb)
#
# if ret_code != RET_OK:
# return ret_code, msg
# else:
# return ret_code, data
| 30.610354
| 121
| 0.591953
|
acfe1ac011231b65faec0b61444184297866b0e1
| 848
|
py
|
Python
|
mysite/blog/migrations/0007_auto_20190520_1318.py
|
Kiraeraser/My_Blog
|
0e47fd2bf72ccfea12a0220ef780779543c33f03
|
[
"MIT"
] | null | null | null |
mysite/blog/migrations/0007_auto_20190520_1318.py
|
Kiraeraser/My_Blog
|
0e47fd2bf72ccfea12a0220ef780779543c33f03
|
[
"MIT"
] | null | null | null |
mysite/blog/migrations/0007_auto_20190520_1318.py
|
Kiraeraser/My_Blog
|
0e47fd2bf72ccfea12a0220ef780779543c33f03
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-20 07:48
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('blog', '0006_blogpost_user'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='Timestamp',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='blogpost',
name='publish_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='blogpost',
name='updated',
field=models.DateTimeField(auto_now=True),
),
]
| 27.354839
| 94
| 0.571934
|
acfe1bfa5eaf00a4ef0a10bfaf8cd68cd79cb9e9
| 6,695
|
bzl
|
Python
|
build/container.bzl
|
vass-engineering/cert-manager
|
7fbdd6487646e812fe74c0c05503805b5d9d4751
|
[
"Apache-2.0"
] | null | null | null |
build/container.bzl
|
vass-engineering/cert-manager
|
7fbdd6487646e812fe74c0c05503805b5d9d4751
|
[
"Apache-2.0"
] | null | null | null |
build/container.bzl
|
vass-engineering/cert-manager
|
7fbdd6487646e812fe74c0c05503805b5d9d4751
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The Jetstack cert-manager contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
load(":platforms.bzl", "go_platform_constraint")
load("@bazel_tools//tools/build_defs/pkg:pkg.bzl", "pkg_tar")
# multi_arch_container produces a private internal container_image, multiple
# arch-specific tagged container_bundles (named NAME-ARCH), an alias
# from NAME to the appropriately NAME-ARCH container_bundle target, and a
# genrule for NAME.tar copying the appropriate NAME-ARCH container bundle
# tarball output for the currently-configured architecture.
# Additionally, if docker_push_tags is provided, uses multi_arch_container_push
# to create container_bundles named push-NAME-ARCH with the provided push tags,
# along with a push-NAME docker_push target.
# Args:
# name: name used for the alias; the internal container_image and
# container_bundles are based on this name
# architectures: list of architectures (in GOARCH naming parlance) to
# configure
# base: base image to use for the containers. The format string {ARCH} will
# be replaced with the configured GOARCH.
# docker_tags: list of docker tags to apply to the image. The format string
# {ARCH} will be replaced with the configured GOARCH; any stamping variables
# should be escaped, e.g. {{STABLE_MY_VAR}}.
# docker_push_tags: list of docker tags to apply to the image for pushing.
# The format string {ARCH} will be replaced with the configured GOARCH;
# any stamping variables should be escaped, e.g. {{STABLE_MY_VAR}}.
# tags: will be applied to all targets
# visibility: will be applied only to the container_bundles; the internal
# container_image is private
# All other args will be applied to the internal container_image.
def multi_arch_container(
name,
architectures,
base,
docker_tags,
stamp = True,
docker_push_tags = None,
tags = None,
visibility = None,
user = "0",
**kwargs):
go_image(
name = "%s-internal-notimestamp" % name,
base = select({
go_platform_constraint(os = "linux", arch = arch): base.format(ARCH = arch)
for arch in architectures
}),
architecture = select({
go_platform_constraint(os = "linux", arch = arch): arch
for arch in architectures
}),
stamp = stamp,
tags = tags,
user = user,
visibility = ["//visibility:private"],
**kwargs
)
# Create a tar file containing the created license files
pkg_tar(
name = "%s.license_tar" % name,
srcs = ["//:LICENSE", "//:LICENSES"],
package_dir = "licenses",
)
container_image(
name = "%s.image" % name,
base = ":%s-internal-notimestamp" % name,
tars = [":%s.license_tar" % name],
stamp = stamp,
tags = tags,
user = user,
architecture = select({
go_platform_constraint(os = "linux", arch = arch): arch
for arch in architectures
}),
visibility = ["//visibility:public"],
)
for arch in architectures:
container_bundle(
name = "%s-%s" % (name, arch),
images = {
docker_tag.format(ARCH = arch): ":%s.image" % name
for docker_tag in docker_tags
},
tags = tags,
visibility = visibility,
)
native.alias(
name = name,
tags = tags,
actual = select({
go_platform_constraint(os = "linux", arch = arch): "%s-%s" % (name, arch)
for arch in architectures
}),
)
native.genrule(
name = "gen_%s.tar" % name,
outs = ["%s.tar" % name],
tags = tags,
srcs = select({
go_platform_constraint(os = "linux", arch = arch): ["%s-%s.tar" % (name, arch)]
for arch in architectures
}),
cmd = "cp $< $@",
output_to_bindir = True,
)
if docker_push_tags:
multi_arch_container_push(
name = name,
architectures = architectures,
docker_tags_images = {docker_push_tag: ":%s.image" % name for docker_push_tag in docker_push_tags},
tags = tags,
)
# multi_arch_container_push creates container_bundles named push-NAME-ARCH for
# the provided architectures, populating them with the images directory.
# It additionally creates a push-NAME docker_push rule which can be run to
# push the images to a Docker repository.
# Args:
# name: name used for targets created by this macro; the internal
# container_bundles are based on this name
# architectures: list of architectures (in GOARCH naming parlance) to
# configure
# docker_tags_images: dictionary mapping docker tag to the corresponding
# container_image target. The format string {ARCH} will be replaced
# in tags with the configured GOARCH; any stamping variables should be
# escaped, e.g. {{STABLE_MY_VAR}}.
# tags: applied to container_bundle targets
def multi_arch_container_push(
name,
architectures,
docker_tags_images,
tags = None):
for arch in architectures:
container_bundle(
name = "push-%s-%s" % (name, arch),
images = {tag.format(ARCH = arch): image for tag, image in docker_tags_images.items()},
tags = tags,
visibility = ["//visibility:private"],
)
native.alias(
name = name,
tags = tags,
actual = select({
go_platform_constraint(os = "linux", arch = arch): "push-%s-%s" % (name, arch)
for arch in architectures
}),
)
docker_push(
name = "push-%s" % name,
tags = tags,
bundle = select({
go_platform_constraint(os = "linux", arch = arch): "push-%s-%s" % (name, arch)
for arch in architectures
}),
)
| 37.61236
| 111
| 0.634205
|
acfe1cd211f19de8f5215bacb7a2f174c4d3c686
| 995
|
py
|
Python
|
test/test_host.py
|
jlk/qualys-cs-python-client
|
e2e39fd64d41fd6671d45343843ef36fa3ab59a4
|
[
"Apache-2.0"
] | null | null | null |
test/test_host.py
|
jlk/qualys-cs-python-client
|
e2e39fd64d41fd6671d45343843ef36fa3ab59a4
|
[
"Apache-2.0"
] | null | null | null |
test/test_host.py
|
jlk/qualys-cs-python-client
|
e2e39fd64d41fd6671d45343843ef36fa3ab59a4
|
[
"Apache-2.0"
] | 1
|
2020-05-15T04:20:48.000Z
|
2020-05-15T04:20:48.000Z
|
# coding: utf-8
"""
Container Security APIs
All features of the Container Security are available through REST APIs.<br/>Access support information at www.qualys.com/support/<br/><br/><b>Permissions:</b><br/>User must have the Container module enabled<br/>User must have API ACCESS permission # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import qualys_cs_api
from qualys_cs_api.models.host import Host # noqa: E501
from qualys_cs_api.rest import ApiException
class TestHost(unittest.TestCase):
"""Host unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHost(self):
"""Test Host"""
# FIXME: construct object with mandatory attributes with example values
# model = qualys_cs_api.models.host.Host() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 24.875
| 265
| 0.697487
|
acfe1cfbce688f3925797a8217f487b78cc7fc30
| 6,101
|
py
|
Python
|
nuplan/planning/metrics/evaluation_metrics/common/ego_safety_performance.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 128
|
2021-12-06T15:41:14.000Z
|
2022-03-29T13:16:32.000Z
|
nuplan/planning/metrics/evaluation_metrics/common/ego_safety_performance.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 28
|
2021-12-11T08:11:31.000Z
|
2022-03-25T02:35:43.000Z
|
nuplan/planning/metrics/evaluation_metrics/common/ego_safety_performance.py
|
motional/nuplan-devkit
|
e39029e788b17f47f2fcadb774098ef8fbdd0d67
|
[
"Apache-2.0"
] | 14
|
2021-12-11T04:12:26.000Z
|
2022-03-24T06:38:30.000Z
|
from typing import Dict, List, Optional
from nuplan.planning.metrics.evaluation_metrics.base.metric_base import MetricBase
from nuplan.planning.metrics.evaluation_metrics.common.drivable_area_violation import DrivableAreaViolationStatistics
from nuplan.planning.metrics.evaluation_metrics.common.ego_at_fault_collisions import EgoAtFaultCollisionStatistics
from nuplan.planning.metrics.evaluation_metrics.common.ego_min_distance_to_lead_agent import EgoMinDistanceToLeadAgent
from nuplan.planning.metrics.evaluation_metrics.common.time_to_collision import TimeToCollisionStatistics
from nuplan.planning.metrics.metric_result import MetricStatistics, MetricStatisticsType, Statistic, TimeSeries
from nuplan.planning.scenario_builder.abstract_scenario import AbstractScenario
from nuplan.planning.simulation.history.simulation_history import SimulationHistory
class EgoSafetyStatistics(MetricBase):
"""
Ego safety performance metric. We assume that ego and other tracks do not drive in reverse mode (backwards).
Checks if:
1. Ego does not have an at_fault_collision, and
2. Ego does not get too close to the front agent, and
3. Ego maintains a minimum TTC greater than a given threahsold, and
4. Ego drives in drivable area.
"""
def __init__(
self,
name: str,
category: str,
time_to_collision_metric: TimeToCollisionStatistics,
drivable_area_violation_metric: DrivableAreaViolationStatistics,
ego_at_fault_collisions_metric: EgoAtFaultCollisionStatistics,
ego_min_distance_to_lead_agent_metric: EgoMinDistanceToLeadAgent,
):
"""
Initializes the EgoSafetyStatistics class
:param name: Metric name
:param category: Metric category
:param time_to_collision_metric: time to collision metric
:param drivable_area_violation_metric: drivable area violation metric
:param ego_at_fault_collisions_metric: Ego at fault collisions metric
:param ego_min_distance_to_lead_agent_metric: Minimum distance between ego and the front agent
"""
super().__init__(name=name, category=category)
self._time_to_collision = time_to_collision_metric
self._drivable_area_violation = drivable_area_violation_metric
self._at_fault_collisions = ego_at_fault_collisions_metric
self._min_distance_to_lead_agent = ego_min_distance_to_lead_agent_metric
def compute_score(
self,
scenario: AbstractScenario,
metric_statistics: Dict[str, Statistic],
time_series: Optional[TimeSeries] = None,
) -> float:
"""Inherited, see superclass."""
# Return 1.0 if safe, otherwise 0
return float(metric_statistics[MetricStatisticsType.BOOLEAN].value)
def check_ego_safety_performance(self, history: SimulationHistory, scenario: AbstractScenario) -> bool:
"""
We assume that ego and other tracks do not drive in reverse mode (backwards).
Returns True if:
1. Ego does not have an at_fault_collision, and
2. Ego does not get too close to the front agent, and
3. Ego maintains a minimum TTC greater than a threahsold, and
4. Ego drives in drivable area,
Otherwise returns False
:param history: History from a simulation engine.
:param scenario: Scenario running this metric.
:return True if safety performance is acceptable else False.
"""
# Load pre-calculated violations from ego_at_fault_collision metric
assert (
self._at_fault_collisions.results
), "ego_at_fault_collisions metric must be run prior to calling {}".format(self.name)
ego_at_fault_metric_count = self._at_fault_collisions.results[0].statistics[MetricStatisticsType.COUNT].value
if ego_at_fault_metric_count > 0:
return False
# Load pre-calculated violations from ego_min_distance_to_lead_agent metric
assert (
self._min_distance_to_lead_agent.results
), "ego_min_distance_to_lead_agent metric must be run prior to calling {}".format(self.name)
distance_to_lead_agents_within_bound = (
self._min_distance_to_lead_agent.results[0].statistics[MetricStatisticsType.BOOLEAN].value
)
if not distance_to_lead_agents_within_bound:
return False
# Load pre-calculated TTC within bound from time_to_collision metric
assert self._time_to_collision.results, "time_to_collision metric must be run prior to calling {}".format(
self.name
)
time_to_collision_within_bound = (
self._time_to_collision.results[0].statistics[MetricStatisticsType.BOOLEAN].value
)
if not time_to_collision_within_bound:
return False
# Load pre-calculated drivable area violation from drivable_area_violation metric
assert (
self._drivable_area_violation.results
), "drivable_area_violation metric must be run prior to calling {}".format(self.name)
number_of_drivable_area_violation = (
self._drivable_area_violation.results[0].statistics[MetricStatisticsType.COUNT].value
)
if number_of_drivable_area_violation > 0:
return False
return True
def compute(self, history: SimulationHistory, scenario: AbstractScenario) -> List[MetricStatistics]:
"""
Returns the estimated metric
:param history: History from a simulation engine
:param scenario: Scenario running this metric
:return: the estimated metric.
"""
safety_performance_metric = self.check_ego_safety_performance(history=history, scenario=scenario)
statistics = {
MetricStatisticsType.BOOLEAN: Statistic(
name="ego_safety_performance", unit="boolean", value=safety_performance_metric
)
}
results = self._construct_metric_results(metric_statistics=statistics, time_series=None, scenario=scenario)
return results # type: ignore
| 47.294574
| 118
| 0.726602
|
acfe1db0fcf76b7589f61cd623217e422adc45ed
| 2,512
|
py
|
Python
|
numba/tests/test_remove_dead.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | 1
|
2021-08-14T13:48:12.000Z
|
2021-08-14T13:48:12.000Z
|
numba/tests/test_remove_dead.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
numba/tests/test_remove_dead.py
|
ehsantn/numba
|
4749ef7ccc630b7f649ec972497bc5b7fca79303
|
[
"BSD-2-Clause",
"MIT"
] | null | null | null |
#
# Copyright (c) 2017 Intel Corporation
# SPDX-License-Identifier: BSD-2-Clause
#
from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c,d):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), typemap, calltypes, null_func, None)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
| 34.410959
| 129
| 0.631369
|
acfe1e308bea257178661cadf8068ca29ab1bb30
| 5,444
|
py
|
Python
|
Question_41_50/answers/answer_44.py
|
OverHall27/Gasyori100knock
|
341c528eb4c0789034898ee1f7d0a4b2f8b23eff
|
[
"MIT"
] | 1
|
2019-09-02T11:02:44.000Z
|
2019-09-02T11:02:44.000Z
|
Question_41_50/answers/answer_44.py
|
OverHall27/Gasyori100knock
|
341c528eb4c0789034898ee1f7d0a4b2f8b23eff
|
[
"MIT"
] | 7
|
2020-08-31T18:15:30.000Z
|
2021-06-25T15:42:29.000Z
|
Question_41_50/answers/answer_44.py
|
OverHall27/Gasyori100knock
|
341c528eb4c0789034898ee1f7d0a4b2f8b23eff
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
def Canny(img):
# Gray scale
def BGR2GRAY(img):
b = img[:, :, 0].copy()
g = img[:, :, 1].copy()
r = img[:, :, 2].copy()
# Gray scale
out = 0.2126 * r + 0.7152 * g + 0.0722 * b
out = out.astype(np.uint8)
return out
# Gaussian filter for grayscale
def gaussian_filter(img, K_size=3, sigma=1.3):
if len(img.shape) == 3:
H, W, C = img.shape
else:
img = np.expand_dims(img, axis=-1)
H, W, C = img.shape
## Zero padding
pad = K_size // 2
out = np.zeros([H + pad * 2, W + pad * 2, C], dtype=np.float)
out[pad: pad + H, pad: pad + W] = img.copy().astype(np.float)
## prepare Kernel
K = np.zeros((K_size, K_size), dtype=np.float)
for x in range(-pad, -pad + K_size):
for y in range(-pad, -pad + K_size):
K[y+pad, x+pad] = np.exp( -(x ** 2 + y ** 2) / (2 * (sigma ** 2)))
K /= (sigma * np.sqrt(2 * np.pi))
K /= K.sum()
tmp = out.copy()
# filtering
for y in range(H):
for x in range(W):
for c in range(C):
out[pad + y, pad + x, c] = np.sum(K * tmp[y: y + K_size, x: x + K_size, c])
out = out[pad: pad + H, pad: pad + W].astype(np.uint8)
out = out[..., 0]
return out
# sobel filter
def sobel_filter(img, K_size=3):
H, W = img.shape
# Zero padding
pad = K_size // 2
out = np.zeros((H + pad * 2, W + pad * 2), dtype=np.float)
out[pad: pad + H, pad: pad + W] = gray.copy().astype(np.float)
tmp = out.copy()
out_v = out.copy()
out_h = out.copy()
## Sobel vertical
Kv = [[1., 2., 1.],[0., 0., 0.], [-1., -2., -1.]]
## Sobel horizontal
Kh = [[1., 0., -1.],[2., 0., -2.],[1., 0., -1.]]
# filtering
for y in range(H):
for x in range(W):
out_v[pad + y, pad + x] = np.sum(Kv * (tmp[y: y + K_size, x: x + K_size]))
out_h[pad + y, pad + x] = np.sum(Kh * (tmp[y: y + K_size, x: x + K_size]))
out_v = np.clip(out_v, 0, 255)
out_h = np.clip(out_h, 0, 255)
out_v = out_v[pad: pad + H, pad: pad + W].astype(np.uint8)
out_h = out_h[pad: pad + H, pad: pad + W].astype(np.uint8)
return out_v, out_h
def get_edge_tan(fx, fy):
# get edge strength
edge = np.sqrt(np.power(fx.astype(np.float32), 2) + np.power(fy.astype(np.float32), 2))
edge = np.clip(edge, 0, 255)
fx = np.maximum(fx, 1e-5)
#fx[np.abs(fx) <= 1e-5] = 1e-5
# get edge angle
tan = np.arctan(fy / fx)
return edge, tan
def angle_quantization(tan):
angle = np.zeros_like(tan, dtype=np.uint8)
angle[np.where((tan > -0.4142) & (tan <= 0.4142))] = 0
angle[np.where((tan > 0.4142) & (tan < 2.4142))] = 45
angle[np.where((tan >= 2.4142) | (tan <= -2.4142))] = 95
angle[np.where((tan > -2.4142) & (tan <= -0.4142))] = 135
return angle
def non_maximum_suppression(angle, edge):
H, W = angle.shape
for y in range(H):
for x in range(W):
if angle[y, x] == 0:
dx1, dy1, dx2, dy2 = -1, 0, 1, 0
elif angle[y, x] == 45:
dx1, dy1, dx2, dy2 = -1, 1, 1, -1
elif angle[y, x] == 90:
dx1, dy1, dx2, dy2 = 0, -1, 0, 1
elif angle[y, x] == 135:
dx1, dy1, dx2, dy2 = -1, -1, 1, 1
if x == 0:
dx1 = max(dx1, 0)
dx2 = max(dx2, 0)
if x == W-1:
dx1 = min(dx1, 0)
dx2 = min(dx2, 0)
if y == 0:
dy1 = max(dy1, 0)
dy2 = max(dy2, 0)
if y == H-1:
dy1 = min(dy1, 0)
dy2 = min(dy2, 0)
if max(max(edge[y, x], edge[y+dy1, x+dx1]), edge[y+dy2, x+dx2]) != edge[y, x]:
edge[y, x] = 0
return edge
def hysterisis(edge, HT=100, LT=30):
H, W = edge.shape
# Histeresis threshold
edge[edge >= HT] = 255
edge[edge <= LT] = 0
_edge = np.zeros((H+2, W+2), dtype=np.float32)
_edge[1:H+1, 1:W+1] = edge
## 8 - Nearest neighbor
nn = np.array(((1., 1., 1.), (1., 0., 1.), (1., 1., 1.)), dtype=np.float32)
for y in range(1, H+2):
for x in range(1, W+2):
if _edge[y, x] < LT or _edge[y, x] > HT:
continue
if np.max(_edge[y-1:y+2, x-1:x+2] * nn) >= HT:
_edge[y, x] = 255
else:
_edge[y, x] = 0
edge = _edge[1:H+1, 1:W+1]
return edge
# grayscale
gray = BGR2GRAY(img)
# gaussian filtering
gaussian = gaussian_filter(gray, K_size=5, sigma=1.4)
# sobel filtering
fy, fx = sobel_filter(gaussian, K_size=3)
# get edge strength, angle
edge, tan = get_edge_tan(fx, fy)
# angle quantization
angle = angle_quantization(tan)
# non maximum suppression
edge = non_maximum_suppression(angle, edge)
# hysterisis threshold
out = hysterisis(edge)
return out
def Hough_Line_step1(edge):
## Voting
def voting(edge):
H, W = edge.shape
drho = 1
dtheta = 1
# get rho max length
rho_max = np.ceil(np.sqrt(H ** 2 + W ** 2)).astype(np.int)
# hough table
hough = np.zeros((rho_max, 180), dtype=np.int)
# get index of edge
ind = np.where(edge == 255)
## hough transformation
for y, x in zip(ind[0], ind[1]):
for theta in range(0, 180, dtheta):
# get polar coordinat4s
t = np.pi / 180 * theta
rho = int(x * np.cos(t) + y * np.sin(t))
# vote
hough[rho, theta] += 1
out = hough.astype(np.uint8)
return out
# voting
out = voting(edge)
return out
# Read image
img = cv2.imread("thorino.jpg").astype(np.float32)
# Canny
edge = Canny(img)
# Hough
out = Hough_Line_step1(edge)
out = out.astype(np.uint8)
# Save result
cv2.imwrite("out.jpg", out)
cv2.imshow("result", out)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 22.220408
| 89
| 0.555474
|
acfe1edac0cb4e117407d423c9cb7b9a86addff6
| 2,997
|
py
|
Python
|
payloader.py
|
pyrat3/nyan-payload
|
b1e6ee3b8a421864dd4b3fc695874dad403c4675
|
[
"MIT"
] | 2
|
2021-02-11T02:59:47.000Z
|
2021-02-20T09:36:36.000Z
|
payloader.py
|
pyrat3/nyan-payload
|
b1e6ee3b8a421864dd4b3fc695874dad403c4675
|
[
"MIT"
] | null | null | null |
payloader.py
|
pyrat3/nyan-payload
|
b1e6ee3b8a421864dd4b3fc695874dad403c4675
|
[
"MIT"
] | 2
|
2021-03-24T02:06:19.000Z
|
2021-04-06T07:33:57.000Z
|
import base64
import inspect
import os
import stager
class Payloader:
def __init__(self, nyan_cat_folder="nyan-cat-code", add_exec_wrapper=True, add_python_bash_wrapper=True,
write_to_file=True, payload_file_name="python_nyan_cat_payload.txt"):
self.nyan_cat_folder = nyan_cat_folder
self.add_exec_wrapper = add_exec_wrapper
self.add_python_bash_wrapper = add_python_bash_wrapper
self.write_to_file = write_to_file
self.payload_file_name = payload_file_name
self.function_call_to_append = "write_files({})"
self.exec_wrapper = "import base64;exec(base64.b64decode('{}'))"
self.python_bash_wrapper = """python -c "{}" """
self.source = ""
def get_file_paths(self):
files_in_folder = os.listdir(self.nyan_cat_folder)
file_paths = []
for file in files_in_folder:
file_path = os.path.join(self.nyan_cat_folder, file)
if os.path.isfile(file_path):
file_paths.append(file_path)
return file_paths
def file_to_base64(self, file_path):
with open(file_path, "rb") as file:
content = file.read()
base64_content = base64.b64encode(content)
return base64_content, os.path.basename(file_path)
def all_files_to_base64(self):
file_paths = self.get_file_paths()
return [self.file_to_base64(file_path) for file_path in file_paths]
def write_payload_to_file(self):
if self.write_to_file:
with open(self.payload_file_name, "w") as file:
file.write(self.source)
def do_add_eval_wrapper(self):
if self.add_exec_wrapper:
self.source = self.exec_wrapper.format(self.source)
def do_add_python_bash_wrapper(self):
if self.add_python_bash_wrapper:
self.source = self.python_bash_wrapper.format(self.source)
def stager_source(self):
files = self.all_files_to_base64()
_source, *_ = inspect.getsourcelines(stager)
source = _source.copy()
formated_function_call = self.function_call_to_append.format(files)
source.append(formated_function_call)
source = "".join(source)
source = source.encode("UTF-8")
source = base64.b64encode(source)
self.source = source.decode("UTF-8")
def make_payload(self):
self.stager_source()
self.do_add_eval_wrapper()
self.do_add_python_bash_wrapper()
self.write_payload_to_file()
def go():
payload_only = Payloader(add_python_bash_wrapper=False, add_exec_wrapper=False,
payload_file_name="python_nyan_cat_payload_only.txt")
payload_with_exec = Payloader(add_python_bash_wrapper=False,
payload_file_name="python_nyan_cat_payload_with_exec_wrapper.txt")
payload_full = Payloader()
payload_only.make_payload()
payload_with_exec.make_payload()
payload_full.make_payload()
| 36.54878
| 108
| 0.674675
|
acfe1efda29ae03097b9abc03fef93594190a70f
| 9,253
|
py
|
Python
|
solo/methods/simsiam.py
|
pantheon5100/simsimpp
|
147d5cdaa986d1da1608efb6cf663826bfd57053
|
[
"MIT"
] | 3
|
2021-08-23T12:47:50.000Z
|
2022-01-16T02:06:34.000Z
|
solo/methods/simsiam.py
|
pantheon5100/simsimpp
|
147d5cdaa986d1da1608efb6cf663826bfd57053
|
[
"MIT"
] | null | null | null |
solo/methods/simsiam.py
|
pantheon5100/simsimpp
|
147d5cdaa986d1da1608efb6cf663826bfd57053
|
[
"MIT"
] | null | null | null |
import argparse
from typing import Any, Dict, List, Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from solo.losses.simsiam import simsiam_loss_func
from solo.methods.base import BaseModel
from solo.losses.vicreg import covariance_loss
def value_constrain(x, type=None):
if type == "sigmoid":
return 2*torch.sigmoid(x)-1
elif type == "tanh":
return torch.tanh(x)
else:
return x
class BaisLayer(nn.Module):
def __init__(self, output_dim, bias=False, weight_matrix=False, constrain_type="none"):
super(BaisLayer, self).__init__()
self.constrain_type = constrain_type
self.weight_matrix = weight_matrix
if weight_matrix:
self.w = nn.Linear(output_dim, output_dim, bias=False)
self.bias = bias
if bias:
self.bias = nn.Parameter(torch.zeros(1, output_dim))
def forward(self,x):
x = F.normalize(x, dim=-1)
if self.bias:
self.bias.data = value_constrain(self.bias.data, type=self.constrain_type).detach()
x = x + self.bias
if self.weight_matrix:
self.w.weight.data = value_constrain(self.w.weight.data, type=self.constrain_type).detach()
x = self.w(x)
return x
class SimSiam(BaseModel):
def __init__(
self,
output_dim: int,
proj_hidden_dim: int,
pred_hidden_dim: int,
BL:bool,
**kwargs,
):
"""Implements SimSiam (https://arxiv.org/abs/2011.10566).
Args:
output_dim (int): number of dimensions of projected features.
proj_hidden_dim (int): number of neurons of the hidden layers of the projector.
pred_hidden_dim (int): number of neurons of the hidden layers of the predictor.
"""
super().__init__(**kwargs)
# projector
self.projector = nn.Sequential(
nn.Linear(self.features_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, proj_hidden_dim, bias=False),
nn.BatchNorm1d(proj_hidden_dim),
nn.ReLU(),
nn.Linear(proj_hidden_dim, output_dim),
# nn.BatchNorm1d(output_dim, affine=False),
)
# self.projector[6].bias.requires_grad = False # hack: not use bias as it is followed by BN
# predictor
if not BL:
self.predictor = nn.Sequential(
nn.Linear(output_dim, pred_hidden_dim, bias=False),
nn.BatchNorm1d(pred_hidden_dim),
nn.ReLU(),
nn.Linear(pred_hidden_dim, output_dim),
)
elif BL:
self.predictor = nn.Sequential(
BaisLayer(output_dim,bias=False, weight_matrix=False, constrain_type="none"),
)
@staticmethod
def add_model_specific_args(parent_parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
parent_parser = super(SimSiam, SimSiam).add_model_specific_args(parent_parser)
parser = parent_parser.add_argument_group("simsiam")
# projector
parser.add_argument("--output_dim", type=int, default=128)
parser.add_argument("--proj_hidden_dim", type=int, default=2048)
# predictor
parser.add_argument("--BL", action="store_true")
SUPPORTED_VALUE_CONSTRAIN = ["none", "sigmoid", "tanh"]
parser.add_argument("--constrain", choices=SUPPORTED_VALUE_CONSTRAIN, type=str)
parser.add_argument("--pred_hidden_dim", type=int, default=512)
return parent_parser
@property
def learnable_params(self) -> List[dict]:
"""Adds projector and predictor parameters to the parent's learnable parameters.
Returns:
List[dict]: list of learnable parameters.
"""
extra_learnable_params: List[dict] = [
{"params": self.projector.parameters()},
{"params": self.predictor.parameters(), "static_lr": True},
]
return super().learnable_params + extra_learnable_params
def forward(self, X: torch.Tensor, *args, **kwargs) -> Dict[str, Any]:
"""Performs the forward pass of the encoder, the projector and the predictor.
Args:
X (torch.Tensor): a batch of images in the tensor format.
Returns:
Dict[str, Any]:
a dict containing the outputs of the parent
and the projected and predicted features.
"""
out = super().forward(X, *args, **kwargs)
z = self.projector(out["feats"])
p = self.predictor(z)
return {**out, "z": z, "p": p}
def training_step(self, batch: Sequence[Any], batch_idx: int) -> torch.Tensor:
"""Training step for SimSiam reusing BaseModel training step.
Args:
batch (Sequence[Any]): a batch of data in the format of [img_indexes, [X], Y], where
[X] is a list of size self.num_crops containing batches of images
batch_idx (int): index of the batch
Returns:
torch.Tensor: total loss composed of SimSiam loss and classification loss
"""
out = super().training_step(batch, batch_idx)
class_loss = out["loss"]
feats1, feats2 = out["feats"]
z1 = self.projector(feats1)
z2 = self.projector(feats2)
p1 = self.predictor(z1)
p2 = self.predictor(z2)
# ------- contrastive loss -------
neg_cos_sim = simsiam_loss_func(p1, z2) / 2 + simsiam_loss_func(p2, z1) / 2
# calculate std of features
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
with torch.no_grad():
# normalize the vector to make it comparable
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
centervector = ((z1 + z2)/2).mean(dim=0)
residualvector = z2 - centervector
# import pdb; pdb.set_trace()
ZvsC = F.cosine_similarity(z2, centervector.expand(z2.size(0), 2048), dim=-1).mean()
ZvsR = F.cosine_similarity(z2, residualvector, dim=-1).mean()
CvsR = F.cosine_similarity(centervector.expand(z2.size(0), 2048), residualvector, dim=-1).mean()
ratio_RvsW = (torch.linalg.norm(residualvector, dim=1, ord=2) / torch.linalg.norm(z2, dim=1, ord=2)).mean()
ratio_CvsW = (torch.linalg.norm(centervector.expand(z2.size(0), 2048), dim=1, ord=2) / torch.linalg.norm(z2, dim=1, ord=2)).mean()
CS1vsCc = F.cosine_similarity(self.onestepbeforecentering, centervector.reshape(1, -1))
CS1minusCcvsCc = F.cosine_similarity(centervector.reshape(1, -1)-self.onestepbeforecentering , centervector.reshape(1, -1))
CS1minusCcvsCS1 = F.cosine_similarity(centervector.reshape(1, -1)-self.onestepbeforecentering , self.onestepbeforecentering)
# self.recod_epoch[self.trainer.global_step - self.trainer.current_epoch * 195] = CS1minusCcvsCc.cpu()
# CS1minusCcvsCc = F.cosine_similarity(self.onestepbeforecentering, centervector.reshape(1, -1))
# if self.trainer.is_last_batch:
# import numpy as np
# np.savetxt( f"BS{self.trainer.current_epoch}.txt", self.recod_epoch.numpy(),)
self.onestepbeforecentering = centervector.reshape(1, -1)
new_metric_log={"ZvsC_norm":ZvsC,
"ZvsR_norm":ZvsR,
"ratio_RvsW_norm":ratio_RvsW,
"ZvsR_norm":ZvsR,
"ratio_CvsW_norm":ratio_CvsW,
"CvsR_norm":CvsR,
"CS1vsCc":CS1vsCc,
"CS1minusCcvsCc":CS1minusCcvsCc,
"CS1minusCcvsCS1":CS1minusCcvsCS1,
}
if self.trainer.global_step % 100 == 0:
CpvsCc = F.cosine_similarity(self.previouscentering, centervector.reshape(1, -1))
self.previouscentering = centervector.reshape(1, -1).clone()
new_metric_log.update({"CpvsCc_norm": CpvsCc})
# calculate std of features
z1_std = F.normalize(z1, dim=-1).std(dim=0).mean()
z2_std = F.normalize(z2, dim=-1).std(dim=0).mean()
z_std = (z1_std + z2_std) / 2
with torch.no_grad():
cov_loss = covariance_loss(z1, z2)
mean_z = (z1.abs().mean(dim=1) + z2.abs().mean(dim=1)).mean()/2
z1 = F.normalize(z1, dim=-1)
z2 = F.normalize(z2, dim=-1)
norm_cov_loss = covariance_loss(z1, z2)
norm_mean_z = (z1.abs().mean(dim=1) + z2.abs().mean(dim=1)).mean()/2
metrics = {
"neg_cos_sim": neg_cos_sim,
"train_z_std": z_std,
"cov_loss": cov_loss,
"norm_cov_loss": norm_cov_loss,
"mean_z": mean_z,
"norm_mean_z": norm_mean_z,
}
metrics.update(new_metric_log)
self.log_dict(metrics, on_epoch=True, sync_dist=True)
return neg_cos_sim + class_loss
| 36.573123
| 142
| 0.596779
|
acfe21012e83c9ede1d90e01718b177313a43182
| 13,370
|
py
|
Python
|
chemdataextractor_batteries/chemdataextractor/doc/table.py
|
MB9991/test_demo-
|
ca3df4ecf20f7a26a621f68caf668f2e726a737d
|
[
"MIT"
] | 199
|
2016-10-07T06:55:23.000Z
|
2022-03-29T09:50:03.000Z
|
chemdataextractor/doc/table.py
|
qingtong00/ChemDataExtractor
|
349a3bea965f2073141d62043b89319222e46af1
|
[
"MIT"
] | 29
|
2016-10-04T08:56:05.000Z
|
2022-03-06T19:36:55.000Z
|
chemdataextractor/doc/table.py
|
qingtong00/ChemDataExtractor
|
349a3bea965f2073141d62043b89319222e46af1
|
[
"MIT"
] | 95
|
2016-10-10T14:24:27.000Z
|
2022-03-16T18:30:00.000Z
|
# -*- coding: utf-8 -*-
"""
chemdataextractor.doc.table
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Table document elements.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from collections import defaultdict
from ..model import Compound, ModelList
from ..parse.table import CompoundHeadingParser, CompoundCellParser, UvvisAbsHeadingParser, UvvisAbsCellParser, \
QuantumYieldHeadingParser, QuantumYieldCellParser, UvvisEmiHeadingParser, UvvisEmiCellParser, ExtinctionCellParser, \
ExtinctionHeadingParser, FluorescenceLifetimeHeadingParser, FluorescenceLifetimeCellParser, \
ElectrochemicalPotentialHeadingParser, ElectrochemicalPotentialCellParser, IrHeadingParser, IrCellParser, \
SolventCellParser, SolventHeadingParser, SolventInHeadingParser, UvvisAbsEmiQuantumYieldHeadingParser, \
UvvisAbsEmiQuantumYieldCellParser, MeltingPointHeadingParser, MeltingPointCellParser, GlassTransitionHeadingParser, GlassTransitionCellParser, TempInHeadingParser, \
UvvisAbsDisallowedHeadingParser, UvvisEmiQuantumYieldHeadingParser, UvvisEmiQuantumYieldCellParser
# TODO: Sort out the above import... import module instead
from ..nlp.tag import NoneTagger
from ..nlp.tokenize import FineWordTokenizer
from ..utils import memoized_property
from .element import CaptionedElement
from .text import Sentence
log = logging.getLogger(__name__)
class Table(CaptionedElement):
#: Table cell parsers
parsers = [
(CompoundHeadingParser(), CompoundCellParser()),
(UvvisAbsEmiQuantumYieldHeadingParser(), UvvisAbsEmiQuantumYieldCellParser()),
(UvvisEmiQuantumYieldHeadingParser(), UvvisEmiQuantumYieldCellParser()),
(UvvisEmiHeadingParser(), UvvisEmiCellParser()),
(UvvisAbsHeadingParser(), UvvisAbsCellParser(), UvvisAbsDisallowedHeadingParser()),
(IrHeadingParser(), IrCellParser()),
(ExtinctionHeadingParser(), ExtinctionCellParser()),
(QuantumYieldHeadingParser(), QuantumYieldCellParser()),
(FluorescenceLifetimeHeadingParser(), FluorescenceLifetimeCellParser()),
(ElectrochemicalPotentialHeadingParser(), ElectrochemicalPotentialCellParser()),
(MeltingPointHeadingParser(), MeltingPointCellParser()),
(GlassTransitionHeadingParser(), GlassTransitionCellParser()),
(SolventHeadingParser(), SolventCellParser()),
(SolventInHeadingParser(),),
(TempInHeadingParser(),)
]
def __init__(self, caption, label=None, headings=None, rows=None, footnotes=None, **kwargs):
super(Table, self).__init__(caption=caption, label=label, **kwargs)
self.headings = headings if headings is not None else [] # list(list(Cell))
self.rows = rows if rows is not None else [] # list(list(Cell))
self.footnotes = footnotes if footnotes is not None else []
@property
def document(self):
return self._document
@document.setter
def document(self, document):
self._document = document
self.caption.document = document
for row in self.headings:
for cell in row:
cell.document = document
for row in self.rows:
for cell in row:
cell.document = document
def serialize(self):
"""Convert Table element to python dictionary."""
data = {
'type': self.__class__.__name__,
'caption': self.caption.serialize(),
'headings': [[cell.serialize() for cell in hrow] for hrow in self.headings],
'rows': [[cell.serialize() for cell in row] for row in self.rows],
}
return data
def _repr_html_(self):
html_lines = ['<table class="table">']
html_lines.append(self.caption._repr_html_ ())
html_lines.append('<thead>')
for hrow in self.headings:
html_lines.append('<tr>')
for cell in hrow:
html_lines.append('<th>' + cell.text + '</th>')
html_lines.append('</thead>')
html_lines.append('<tbody>')
for row in self.rows:
html_lines.append('<tr>')
for cell in row:
html_lines.append('<td>' + cell.text + '</td>')
html_lines.append('</tbody>')
html_lines.append('</table>')
return '\n'.join(html_lines)
@property
def records(self):
"""Chemical records that have been parsed from the table."""
caption_records = self.caption.records
# Parse headers to extract contextual data and determine value parser for the column
value_parsers = {}
header_compounds = defaultdict(list)
table_records = ModelList()
seen_compound_col = False
log.debug('Parsing table headers')
for i, col_headings in enumerate(zip(*self.headings)):
# log.info('Considering column %s' % i)
for parsers in self.parsers:
log.debug(parsers)
heading_parser = parsers[0]
value_parser = parsers[1] if len(parsers) > 1 else None
disallowed_parser = parsers[2] if len(parsers) > 2 else None
allowed = False
disallowed = False
for cell in col_headings:
log.debug(cell.tagged_tokens)
results = list(heading_parser.parse(cell.tagged_tokens))
if results:
allowed = True
log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.serialize() for c in results]))
# Results from every parser are stored as header compounds
header_compounds[i].extend(results)
# Referenced footnote records are also stored
for footnote in self.footnotes:
# print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references))
if footnote.id in cell.references:
log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.serialize() for c in footnote.records]))
# print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
header_compounds[i].extend(footnote.records)
# Check if the disallowed parser matches this cell
if disallowed_parser and list(disallowed_parser.parse(cell.tagged_tokens)):
log.debug('Column %s: Disallowed %s' % (i, heading_parser.__class__.__name__))
disallowed = True
# If heading parser matches and disallowed parser doesn't, store the value parser
if allowed and not disallowed and value_parser and i not in value_parsers:
if isinstance(value_parser, CompoundCellParser):
# Only take the first compound col
if seen_compound_col:
continue
seen_compound_col = True
log.debug('Column %s: Value parser: %s' % (i, value_parser.__class__.__name__))
value_parsers[i] = value_parser
# Stop after value parser is assigned?
# for hrow in self.headings:
# for i, cell in enumerate(hrow):
# log.debug(cell.tagged_tokens)
# for heading_parser, value_parser in self.parsers:
# results = list(heading_parser.parse(cell.tagged_tokens))
# if results:
# log.debug('Heading column %s: Match %s: %s' % (i, heading_parser.__class__.__name__, [c.to_primitive() for c in results]))
# # Results from every parser are stored as header compounds
# header_compounds[i].extend(results)
# if results and value_parser and i not in value_parsers:
# if isinstance(value_parser, CompoundCellParser):
# # Only take the first compound col
# if seen_compound_col:
# continue
# seen_compound_col = True
# value_parsers[i] = value_parser
# break # Stop after first heading parser matches
# # Referenced footnote records are also stored
# for footnote in self.footnotes:
# # print('%s - %s - %s' % (footnote.id, cell.references, footnote.id in cell.references))
# if footnote.id in cell.references:
# log.debug('Adding footnote %s to column %s: %s' % (footnote.id, i, [c.to_primitive() for c in footnote.records]))
# # print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
# header_compounds[i].extend(footnote.records)
# If no parsers, skip processing table
if value_parsers:
# If no CompoundCellParser() in value_parsers and value_parsers[0] == [] then set CompoundCellParser()
if not seen_compound_col and 0 not in value_parsers:
log.debug('No compound column found in table, assuming first column')
value_parsers[0] = CompoundCellParser()
for row in self.rows:
row_compound = Compound()
# Keep cell records that are contextual to merge at the end
contextual_cell_compounds = []
for i, cell in enumerate(row):
log.debug(cell.tagged_tokens)
if i in value_parsers:
results = list(value_parsers[i].parse(cell.tagged_tokens))
if results:
log.debug('Cell column %s: Match %s: %s' % (i, value_parsers[i].__class__.__name__, [c.serialize() for c in results]))
# For each result, merge in values from elsewhere
for result in results:
# Merge each header_compounds[i]
for header_compound in header_compounds[i]:
if header_compound.is_contextual:
result.merge_contextual(header_compound)
# Merge footnote compounds
for footnote in self.footnotes:
if footnote.id in cell.references:
for footnote_compound in footnote.records:
result.merge_contextual(footnote_compound)
if result.is_contextual:
# Don't merge cell as a value compound if there are no values
contextual_cell_compounds.append(result)
else:
row_compound.merge(result)
# Merge contextual information from cells
for contextual_cell_compound in contextual_cell_compounds:
row_compound.merge_contextual(contextual_cell_compound)
# If no compound name/label, try take from previous row
if not row_compound.names and not row_compound.labels and table_records:
prev = table_records[-1]
row_compound.names = prev.names
row_compound.labels = prev.labels
# Merge contextual information from caption into the full row
for caption_compound in caption_records:
if caption_compound.is_contextual:
row_compound.merge_contextual(caption_compound)
# And also merge from any footnotes that are referenced from the caption
for footnote in self.footnotes:
if footnote.id in self.caption.references:
# print('Footnote records: %s' % [c.to_primitive() for c in footnote.records])
for fn_compound in footnote.records:
row_compound.merge_contextual(fn_compound)
log.debug(row_compound.serialize())
if row_compound.serialize():
table_records.append(row_compound)
# TODO: If no rows have name or label, see if one is in the caption
# Include non-contextual caption records in the final output
caption_records = [c for c in caption_records if not c.is_contextual]
table_records += caption_records
return table_records
# TODO: extend abbreviations property to include footnotes
# TODO: Resolve footnote records into headers
class Cell(Sentence):
word_tokenizer = FineWordTokenizer()
# pos_tagger = NoneTagger()
ner_tagger = NoneTagger()
@memoized_property
def abbreviation_definitions(self):
"""Empty list. Abbreviation detection is disabled within table cells."""
return []
@property
def records(self):
"""Empty list. Individual cells don't provide records, this is handled by the parent Table."""
return []
| 50.836502
| 169
| 0.599402
|
acfe212df01b352350e2422679b6e0679de7a04f
| 4,017
|
py
|
Python
|
test/programytest/parser/pattern/nodes_tests/test_iset.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 2
|
2018-06-16T09:32:22.000Z
|
2019-07-21T13:16:00.000Z
|
test/programytest/parser/pattern/nodes_tests/test_iset.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 3
|
2020-07-16T04:00:42.000Z
|
2021-03-31T18:52:22.000Z
|
test/programytest/parser/pattern/nodes_tests/test_iset.py
|
whackur/chatbot
|
bb4b4dace89f1f8aae2b6377bf7d2601e66af7a7
|
[
"MIT"
] | 4
|
2018-06-29T23:50:44.000Z
|
2020-11-05T08:13:47.000Z
|
from programytest.parser.base import ParserTestsBaseClass
from programy.parser.pattern.nodes.iset import PatternISetNode
from programy.dialog.dialog import Sentence
from programy.parser.exceptions import ParserException
class PatternSetNodeTests(ParserTestsBaseClass):
def test_init_with_text(self):
node = PatternISetNode({}, "test1, test2, test3")
self.assertIsNotNone(node)
self.assertEquals("TEST1", node.words[0])
self.assertEquals("TEST2", node.words[1])
self.assertEquals("TEST3", node.words[2])
def test_init_with_attribs(self):
node = PatternISetNode({"words": "test1, test2, test3"}, "")
self.assertIsNotNone(node)
self.assertEquals("TEST1", node.words[0])
self.assertEquals("TEST2", node.words[1])
self.assertEquals("TEST3", node.words[2])
def test_init_with_invalid_attribs(self):
with self.assertRaises(ParserException) as raised:
node = PatternISetNode({"unknwon": "test1"}, "")
self.assertEqual(str(raised.exception), "Invalid iset node, no words specified as attribute or text")
def test_init_with_nothing(self):
with self.assertRaises(ParserException) as raised:
node = PatternISetNode({}, "")
self.assertEqual(str(raised.exception), "Invalid iset node, no words specified as attribute or text")
def test_init(self):
node = PatternISetNode([], "test1, test2, test3")
self.assertIsNotNone(node)
self.assertFalse(node.is_root())
self.assertFalse(node.is_priority())
self.assertFalse(node.is_wildcard())
self.assertFalse(node.is_zero_or_more())
self.assertFalse(node.is_one_or_more())
self.assertFalse(node.is_set())
self.assertFalse(node.is_bot())
self.assertFalse(node.is_template())
self.assertFalse(node.is_that())
self.assertFalse(node.is_topic())
self.assertFalse(node.is_wildcard())
self.assertTrue(node.is_iset())
self.assertIsNotNone(node.children)
self.assertFalse(node.has_children())
self.assertIsNotNone(node.words)
self.assertEquals(3, len(node.words))
self.assertEquals("TEST1", node.words[0])
self.assertEquals("TEST2", node.words[1])
self.assertEquals("TEST3", node.words[2])
self.assertTrue(node.equivalent(PatternISetNode([], "test1, test2, test3")))
sentence = Sentence(self._client_context.brain.tokenizer, "TEST1 TEST2 TEST3")
result = node.equals(self._client_context, sentence, 0)
self.assertTrue(result.matched)
result = node.equals(self._client_context, sentence, 1)
self.assertTrue(result.matched)
result = node.equals(self._client_context, sentence, 2)
self.assertTrue(result.matched)
result = node.equals(self._client_context, sentence, 3)
self.assertFalse(result.matched)
self.assertEqual(node.to_string(), "ISET [P(0)^(0)#(0)C(0)_(0)*(0)To(0)Th(0)Te(0)] words=[TEST1,TEST2,TEST3]")
self.assertEqual('<iset words="TEST1. TEST2. TEST3"></iset>\n', node.to_xml(self._client_context))
def test_parse_words(self):
node = PatternISetNode([], "test1")
self.assertIsNotNone(node)
self.assertIsNotNone(node.words)
self.assertEquals(1, len(node.words))
self.assertEquals("TEST1", node.words[0])
node = PatternISetNode([], "test1,test2")
self.assertIsNotNone(node)
self.assertIsNotNone(node.words)
self.assertEquals(2, len(node.words))
self.assertEquals("TEST1", node.words[0])
self.assertEquals("TEST2", node.words[1])
node = PatternISetNode([], " test1, test2 , test3 ")
self.assertIsNotNone(node)
self.assertIsNotNone(node.words)
self.assertEquals(3, len(node.words))
self.assertEquals("TEST1", node.words[0])
self.assertEquals("TEST2", node.words[1])
self.assertEquals("TEST3", node.words[2])
| 41.412371
| 118
| 0.665671
|
acfe217226f8a1c787404aa7352a5ec6cd100e03
| 5,798
|
py
|
Python
|
tests/io/test_memory_data_set.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | 1
|
2021-08-24T14:23:18.000Z
|
2021-08-24T14:23:18.000Z
|
tests/io/test_memory_data_set.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | null | null | null |
tests/io/test_memory_data_set.py
|
yhzqb/kedro
|
619d7f0ccb51895d3bb43d30e3dee9d4d0cebcab
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018-2019 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=unused-argument
import numpy as np
import pandas as pd
import pytest
from kedro.io import DataSetError, MemoryDataSet
def _update_data(data, idx, jdx, value):
if isinstance(data, pd.DataFrame):
data.iloc[idx, jdx] = value
return data
if isinstance(data, np.ndarray):
data[idx, jdx] = value
return data
return data # pragma: no cover
def _check_equals(data1, data2):
if isinstance(data1, pd.DataFrame) and isinstance(data2, pd.DataFrame):
return data1.equals(data2)
if isinstance(data1, np.ndarray) and isinstance(data2, np.ndarray):
return np.array_equal(data1, data2)
return False # pragma: no cover
@pytest.fixture
def dummy_numpy_array():
return np.array([[1, 4, 5], [2, 5, 6]])
@pytest.fixture
def dummy_dataframe():
return pd.DataFrame({"col1": [1, 2], "col2": [4, 5], "col3": [5, 6]})
@pytest.fixture(params=["dummy_dataframe", "dummy_numpy_array"])
def input_data(request):
return request.getfixturevalue(request.param)
@pytest.fixture
def new_data():
return pd.DataFrame({"col1": ["a", "b"], "col2": ["c", "d"], "col3": ["e", "f"]})
@pytest.fixture
def memory_data_set(input_data):
return MemoryDataSet(data=input_data)
class TestMemoryDataSet:
def test_load(self, memory_data_set, input_data):
"""Test basic load"""
loaded_data = memory_data_set.load()
assert _check_equals(loaded_data, input_data)
def test_save(self, memory_data_set, input_data, new_data):
"""Test overriding the data set"""
memory_data_set.save(data=new_data)
reloaded = memory_data_set.load()
assert not _check_equals(reloaded, input_data)
assert _check_equals(reloaded, new_data)
def test_load_modify_original_data(self, memory_data_set, input_data):
"""Check that the data set object is not updated when the original
object is changed."""
input_data = _update_data(input_data, 1, 1, -5)
assert not _check_equals(memory_data_set.load(), input_data)
def test_save_modify_original_data(self, memory_data_set, new_data):
"""Check that the data set object is not updated when the original
object is changed."""
memory_data_set.save(new_data)
new_data = _update_data(new_data, 1, 1, "new value")
assert not _check_equals(memory_data_set.load(), new_data)
@pytest.mark.parametrize(
"input_data", ["dummy_dataframe", "dummy_numpy_array"], indirect=True
)
def test_load_returns_new_object(self, memory_data_set, input_data):
"""Test that consecutive loads point to different objects in case of a
pandas DataFrame and numpy array"""
loaded_data = memory_data_set.load()
reloaded_data = memory_data_set.load()
assert _check_equals(loaded_data, input_data)
assert _check_equals(reloaded_data, input_data)
assert loaded_data is not reloaded_data
def test_create_without_data(self):
"""Test instantiation without data"""
assert MemoryDataSet() is not None
def test_loading_none(self):
"""Check the error when attempting to load the data set that doesn't
contain any data"""
pattern = r"Data for MemoryDataSet has not been saved yet\."
with pytest.raises(DataSetError, match=pattern):
MemoryDataSet().load()
def test_saving_none(self):
"""Check the error when attempting to save the data set without
providing the data"""
pattern = r"Saving `None` to a `DataSet` is not allowed"
with pytest.raises(DataSetError, match=pattern):
MemoryDataSet().save(None)
@pytest.mark.parametrize(
"input_data,expected",
[
("dummy_dataframe", "MemoryDataSet(data=<DataFrame>)"),
("dummy_numpy_array", "MemoryDataSet(data=<ndarray>)"),
],
indirect=["input_data"],
)
def test_str_representation(self, memory_data_set, input_data, expected):
"""Test string representation of the data set"""
assert expected in str(memory_data_set)
def test_exists(self, new_data):
"""Test `exists` method invocation"""
data_set = MemoryDataSet()
assert not data_set.exists()
data_set.save(new_data)
assert data_set.exists()
| 37.166667
| 85
| 0.697482
|
acfe21775b10bae82db509de40df1053a975586b
| 6,124
|
py
|
Python
|
pytorch_toolkit/face_recognition/evaluate_landmarks.py
|
JinYAnGHe/openvino_training_extensions
|
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
|
[
"Apache-2.0"
] | null | null | null |
pytorch_toolkit/face_recognition/evaluate_landmarks.py
|
JinYAnGHe/openvino_training_extensions
|
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
|
[
"Apache-2.0"
] | null | null | null |
pytorch_toolkit/face_recognition/evaluate_landmarks.py
|
JinYAnGHe/openvino_training_extensions
|
a0b4456a3c9fe6c1b7eabc9d5eb4e74d01453dee
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from collections import OrderedDict
import csv
from tensorboardX import SummaryWriter
import glog as log
import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.transforms import transforms as t
from tqdm import tqdm
from datasets import IBUG
from model.common import models_landmarks
from utils.landmarks_augmentation import Rescale, ToTensor
from utils.utils import load_model_state
def evaluate(val_loader, model):
"""Calculates average error"""
total_loss = 0.
total_pp_error = 0.
failures_num = 0
items_num = 0
for _, data in enumerate(tqdm(val_loader), 0):
data, gt_landmarks = data['img'].cuda(), data['landmarks'].cuda()
predicted_landmarks = model(data)
gt_landmarks = gt_landmarks.view(-1, 136)
loss = predicted_landmarks - gt_landmarks
items_num += loss.shape[0]
n_points = loss.shape[1] // 2
per_point_error = loss.data.view(-1, n_points, 2)
per_point_error = torch.norm(per_point_error, p=2, dim=2)
avg_error = torch.sum(per_point_error, 1) / n_points
eyes_dist = torch.norm(gt_landmarks[:, 72:74] - gt_landmarks[:, 90:92], p=2, dim=1).reshape(-1)
per_point_error = torch.div(per_point_error, eyes_dist.view(-1, 1))
total_pp_error += torch.sum(per_point_error, 0)
avg_error = torch.div(avg_error, eyes_dist)
failures_num += torch.nonzero(avg_error > 0.1).shape[0]
total_loss += torch.sum(avg_error)
return total_loss / items_num, (total_pp_error / items_num).data.cpu().numpy(), float(failures_num) / items_num
def start_evaluation_300w(args):
dataset = IBUG(args.val, args.v_land, test=True)
dataset.transform = t.Compose([Rescale((112, 112)), ToTensor(switch_rb=True)])
val_loader = DataLoader(dataset, batch_size=args.val_batch_size, num_workers=4, shuffle=False, pin_memory=True)
writer = SummaryWriter('./logs_landm/LandNet-68-single-ibug')
for i in range(0, 12001, 200):
model = models_landmarks['mobilelandnet']()
# assert args.snapshot is not None
log.info('Testing snapshot ' + "./snapshots/LandNet_{}.pt".format(str(i)) + ' ...')
model = load_model_state(model, "./snapshots/LandNet-68single_{}.pt".format(str(i)), args.device, eval_state=True)
model.eval()
cudnn.benchmark = True
# model = torch.nn.DataParallel(model)
log.info('Face landmarks model:')
log.info(model)
avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
log.info('Avg RMSE error: {}'.format(avg_err))
log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
log.info('Failure rate: {}'.format(failures_rate))
# info[i] = (avg_err.cpu().item(), failures_rate)
writer.add_scalar('Quality/Avg_error', avg_err, i)
writer.add_scalar('Quality/Failure_rate', failures_rate, i)
# print(info)
# write_csv(info)
def write_csv(dic):
with open("result.csv", "w") as outfile:
writer = csv.writer(outfile)
writer.writerow(dic)
def start_evaluation(args):
"""Launches the evaluation process"""
if args.dataset == 'vgg':
dataset = VGGFace2(args.val, args.v_list, args.v_land, landmarks_training=True)
elif args.dataset == 'celeb':
dataset = CelebA(args.val, args.v_land, test=True)
else:
dataset = NDG(args.val, args.v_land)
if dataset.have_landmarks:
log.info('Use alignment for the train data')
dataset.transform = t.Compose([Rescale((48, 48)), ToTensor(switch_rb=True)])
else:
exit()
val_loader = DataLoader(dataset, batch_size=args.val_batch_size, num_workers=4, shuffle=False, pin_memory=True)
model = models_landmarks['landnet']()
assert args.snapshot is not None
log.info('Testing snapshot ' + args.snapshot + ' ...')
model = load_model_state(model, args.snapshot, args.device, eval_state=True)
model.eval()
cudnn.benchmark = True
model = torch.nn.DataParallel(model, device_ids=[args.device])
log.info('Face landmarks model:')
log.info(model)
avg_err, per_point_avg_err, failures_rate = evaluate(val_loader, model)
log.info('Avg RMSE error: {}'.format(avg_err))
log.info('Per landmark RMSE error: {}'.format(per_point_avg_err))
log.info('Failure rate: {}'.format(failures_rate))
def main():
"""Creates a cl parser"""
parser = argparse.ArgumentParser(description='Evaluation script for landmarks detection network')
parser.add_argument('--device', '-d', default=0, type=int)
parser.add_argument('--val_data_root', dest='val', required=True, type=str, help='Path to val data.')
parser.add_argument('--val_list', dest='v_list', required=False, type=str, help='Path to test data image list.')
parser.add_argument('--val_landmarks', dest='v_land', default='', required=False, type=str,
help='Path to landmarks for test images.')
parser.add_argument('--val_batch_size', type=int, default=1, help='Validation batch size.')
parser.add_argument('--snapshot', type=str, default=None, help='Snapshot to evaluate.')
parser.add_argument('--dataset', choices=['vgg', 'celeb', 'ngd'], type=str, default='vgg', help='Dataset.')
arguments = parser.parse_args()
with torch.cuda.device(arguments.device):
# start_evaluation(arguments)
start_evaluation_300w(arguments)
if __name__ == '__main__':
main()
| 41.100671
| 122
| 0.687786
|
acfe220b48f3bfc8886094ad0f50f4bf7cf2bf6e
| 24,964
|
py
|
Python
|
ema_workbench/analysis/plotting_util.py
|
sid-marain/EMAworkbench
|
49b6d963170fbd15b0fb5adba773b5cc3d86b5b6
|
[
"BSD-3-Clause"
] | null | null | null |
ema_workbench/analysis/plotting_util.py
|
sid-marain/EMAworkbench
|
49b6d963170fbd15b0fb5adba773b5cc3d86b5b6
|
[
"BSD-3-Clause"
] | null | null | null |
ema_workbench/analysis/plotting_util.py
|
sid-marain/EMAworkbench
|
49b6d963170fbd15b0fb5adba773b5cc3d86b5b6
|
[
"BSD-3-Clause"
] | 1
|
2020-02-18T23:11:14.000Z
|
2020-02-18T23:11:14.000Z
|
'''
Plotting utility functions
'''
from __future__ import (absolute_import, print_function, division,
unicode_literals)
import copy
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats.kde as kde
import seaborn as sns
import six
from scipy.stats import gaussian_kde, scoreatpercentile
from ..util import EMAError, info, warning
# .. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
COLOR_LIST = sns.color_palette()
'''Default color list'''
sns.set_palette(COLOR_LIST)
TIME = "TIME"
'''Default key for time'''
ENVELOPE = 'envelope'
'''constant for plotting envelopes'''
LINES = 'lines'
'''constant for plotting lines'''
ENV_LIN = "env_lin"
'''constant for plotting envelopes with lines'''
KDE = 'kde'
'''constant for plotting density as a kernel density estimate'''
HIST = 'hist'
'''constant for plotting density as a histogram'''
BOXPLOT = 'boxplot'
'''constant for plotting density as a boxplot'''
VIOLIN = 'violin'
'''constant for plotting density as a violin plot, which combines a
Gaussian density estimate with a boxplot'''
# used for legend
LINE = 'line'
PATCH = 'patch'
SCATTER = 'scatter'
# see http://matplotlib.sourceforge.net/users/customizing.html for details
#mpl.rcParams['savefig.dpi'] = 600
#mpl.rcParams['axes.formatter.limits'] = (-5, 5)
#mpl.rcParams['font.family'] = 'serif'
#mpl.rcParams['font.serif'] = 'Times New Roman'
#mpl.rcParams['font.size'] = 12.0
# ==============================================================================
# actual plotting functions
# ==============================================================================
def plot_envelope(ax, j, time, value, fill=False):
'''
Helper function, responsible for plotting an envelope.
Parameters
----------
ax : axes instance
j : int
time : ndarray
value : ndarray
fill : bool
'''
# plot minima and maxima
minimum = np.min(value, axis=0)
maximum = np.max(value, axis=0)
color = get_color(j)
if fill:
# ax.plot(time, minimum, color=color, alpha=0.3)
# ax.plot(time, maximum, color=color, alpha=0.3)
ax.fill_between(time,
minimum,
maximum,
facecolor=color,
alpha=0.3,
)
else:
ax.plot(time, minimum, c=color)
ax.plot(time, maximum, c=color)
def plot_histogram(ax, values, log):
'''
Helper function, responsible for plotting a histogram
Parameters
----------
ax : axes instance
values : ndarray
log : bool
'''
if isinstance(values, list):
color = [get_color(i) for i in range(len(values))]
else:
color = get_color(0)
a = ax.hist(values,
bins=11,
orientation='horizontal',
histtype='bar',
density=True,
color=color,
log=log)
if not log:
ax.set_xticks([0, ax.get_xbound()[1]])
return a
def plot_kde(ax, values, log):
'''
Helper function, responsible for plotting a KDE.
Parameters
----------
ax : axes instance
values : ndarray
log : bool
'''
for j, value in enumerate(values):
color = get_color(j)
kde_x, kde_y = determine_kde(value)
ax.plot(kde_x, kde_y, c=color, ms=1, markevery=20)
if log:
ax.set_xscale('log')
else:
ax.set_xticks([int(0),
ax.get_xaxis().
get_view_interval()[1]])
labels = ["{0:.2g}".format(0), "{0:.2g}".format(ax.get_xlim()[1])]
ax.set_xticklabels(labels)
def plot_boxplots(ax, values, log, group_labels=None):
'''
helper function for plotting a boxplot
Parameters
----------
ax : axes instance
value : ndarray
log : bool
group_labels : list of str, optional
'''
if log:
warning("log option ignored for boxplot")
ax.boxplot(values)
if group_labels:
ax.set_xticklabels(group_labels, rotation='vertical')
def plot_violinplot(ax, value, log, group_labels=None):
'''
helper function for plotting violin plots on axes
Parameters
----------
ax : axes instance
value : ndarray
log : bool
group_labels : list of str, optional
'''
if log:
warning("log option ignored for violin plot")
pos = range(len(value))
dist = max(pos)-min(pos)
_ = min(0.15*max(dist, 1.0), 0.5)
for data, p in zip(value, pos):
if len(data) > 0:
kde = gaussian_kde(data) # calculates the kernel density
x = np.linspace(np.min(data), np.max(data),
250.) # support for violin
v = kde.evaluate(x) # violin profile (density curve)
scl = 1 / (v.max() / 0.4)
v = v*scl # scaling the violin to the available space
ax.fill_betweenx(
x, p-v, p+v, facecolor=get_color(p), alpha=0.6, lw=1.5)
for percentile in [25, 75]:
quant = scoreatpercentile(data.ravel(), percentile)
q_x = kde.evaluate(quant) * scl
q_x = [p - q_x, p + q_x]
ax.plot(q_x, [quant, quant], linestyle=":", c='k')
med = np.median(data)
m_x = kde.evaluate(med) * scl
m_x = [p - m_x, p + m_x]
ax.plot(m_x, [med, med], linestyle="--", c='k', lw=1.5)
if group_labels:
labels = group_labels[:]
labels.insert(0, '')
ax.set_xticklabels(labels, rotation='vertical')
def group_density(ax_d, density, outcomes, outcome_to_plot, group_labels,
log=False, index=-1):
'''
helper function for plotting densities in case of grouped data
Parameters
----------
ax_d : axes instance
density : {HIST, BOXPLOT, VIOLIN, KDE}
outcomes : dict
outcome_to_plot : str
group_labels : list of str
log : bool, optional
index : int, optional
Raises
------
EMAError
if density is unkown
'''
if density == HIST:
values = [outcomes[key][outcome_to_plot][:, index] for key in
group_labels]
plot_histogram(ax_d, values, log)
elif density == BOXPLOT:
values = [outcomes[key][outcome_to_plot][:, index] for key in
group_labels]
plot_boxplots(ax_d, values, log, group_labels)
elif density == VIOLIN:
values = [outcomes[key][outcome_to_plot][:, index] for key in
group_labels]
plot_violinplot(ax_d, values, log, group_labels=group_labels)
elif density == KDE:
values = [outcomes[key][outcome_to_plot][:, index] for key in
group_labels]
plot_kde(ax_d, values, log)
else:
raise EMAError("unknown density type: {}".format(density))
def simple_density(density, value, ax_d, ax, log):
'''
Helper function, responsible for producing a density plot
Parameters
----------
density : {HIST, BOXPLOT, VIOLIN, KDE}
value : ndarray
ax_d : axes instance
ax : axes instance
log : bool
'''
if density == KDE:
plot_kde(ax_d, [value[:, -1]], log)
elif density == HIST:
plot_histogram(ax_d, value[:, -1], log)
elif density == BOXPLOT:
plot_boxplots(ax_d, value[:, -1], log)
elif density == VIOLIN:
plot_violinplot(ax_d, [value[:, -1]], log)
else:
raise EMAError("unknown density plot type")
ax_d.get_yaxis().set_view_interval(
ax.get_yaxis().get_view_interval()[0],
ax.get_yaxis().get_view_interval()[1])
ax_d.set_ylim(bottom=ax.get_yaxis().get_view_interval()[0],
top=ax.get_yaxis().get_view_interval()[1])
def simple_kde(outcomes, outcomes_to_show, colormap, log, minima, maxima):
'''
Helper function for generating a density heatmap over time
Parameters
----------
outcomes : dict
outcomes_to_show : list of str
colormap : str
log : bool
minima : dict
maxima : dict
'''
size_kde = 100
fig, axes = plt.subplots(len(outcomes_to_show), squeeze=False)
axes = axes[:, 0]
axes_dict = {}
# do the plotting
for outcome_to_plot, ax in zip(outcomes_to_show, axes):
axes_dict[outcome_to_plot] = ax
outcome = outcomes[outcome_to_plot]
kde_over_time = np.zeros(shape=(size_kde, outcome.shape[1]))
ymin = minima[outcome_to_plot]
ymax = maxima[outcome_to_plot]
# make kde over time
for j in range(outcome.shape[1]):
kde_x = determine_kde(outcome[:, j], size_kde, ymin, ymax)[0]
kde_x = kde_x/np.max(kde_x)
if log:
kde_x = np.log(kde_x+1)
kde_over_time[:, j] = kde_x
sns.heatmap(kde_over_time[::-1,:], ax=ax, cmap=colormap, cbar=True)
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_xlabel("time")
ax.set_ylabel(outcome_to_plot)
return fig, axes_dict
def make_legend(categories,
ax,
ncol=3,
legend_type=LINE,
alpha=1):
'''
Helper function responsible for making the legend
Parameters
----------
categories : str or tuple
the categories in the legend
ax : axes instance
the axes with which the legend is associated
ncol : int
the number of columns to use
legend_type : {LINES, SCATTER, PATCH}
whether the legend is linked to lines, patches, or scatter
plots
alpha : float
the alpha of the artists
'''
some_identifiers = []
labels = []
for i, category in enumerate(categories):
color = get_color(i)
if legend_type == LINE:
artist = plt.Line2D([0, 1], [0, 1], color=color,
alpha=alpha) # TODO
elif legend_type == SCATTER:
# marker_obj = mpl.markers.MarkerStyle('o')
# path = marker_obj.get_path().transformed(
# marker_obj.get_transform())
# artist = mpl.collections.PathCollection((path,),
# sizes = [20],
# facecolors = COLOR_LIST[i],
# edgecolors = 'k',
# offsets = (0,0)
# )
# TODO work arround, should be a proper proxyartist for scatter legends
artist = mpl.lines.Line2D([0], [0], linestyle="none",
c=color, marker='o')
elif legend_type == PATCH:
artist = plt.Rectangle((0, 0), 1, 1, edgecolor=color,
facecolor=color, alpha=alpha)
some_identifiers.append(artist)
if type(category) == tuple:
label = '%.2f - %.2f' % category
else:
label = category
labels.append(str(label))
ax.legend(some_identifiers, labels, ncol=ncol,
loc=3, borderaxespad=0.1,
mode='expand', bbox_to_anchor=(0., 1.1, 1., .102))
def determine_kde(data,
size_kde=1000,
ymin=None,
ymax=None):
'''
Helper function responsible for performing a KDE
Parameters
----------
data : ndarray
size_kde : int, optional
ymin : float, optional
ymax : float, optional
Returns
-------
ndarray
x values for kde
ndarray
y values for kde
..note:: x and y values are based on rotation as used in density
plots for end states.
'''
if not ymin:
ymin = np.min(data)
if not ymax:
ymax = np.max(data)
kde_y = np.linspace(ymin, ymax, size_kde)
try:
kde_x = kde.gaussian_kde(data)
kde_x = kde_x.evaluate(kde_y)
# grid = GridSearchCV(KernelDensity(kernel='gaussian'),
# {'bandwidth': np.linspace(ymin, ymax, 20)},
# cv=20)
# grid.fit(data[:, np.newaxis])
# best_kde = grid.best_estimator_
# kde_x = np.exp(best_kde.score_samples(kde_y[:, np.newaxis]))
except Exception as e:
warning(e)
kde_x = np.zeros(kde_y.shape)
return kde_x, kde_y
def filter_scalar_outcomes(outcomes):
'''
Helper function that removes non time series outcomes from all the
outcomes.
Parameters
----------
outcomes : dict
Returns
-------
dict
the filtered outcomes
'''
temp = {}
for key, value in outcomes.items():
if value.ndim < 2:
info(("{} not shown because it is "
"not time series data").format(key))
else:
temp[key] = value
return temp
def determine_time_dimension(outcomes):
'''
helper function for determining or creating time dimension
Parameters
----------
outcomes : dict
Returns
-------
ndarray
'''
time = None
try:
time = outcomes['TIME']
time = time[0, :]
outcomes.pop('TIME')
except KeyError:
values = iter(outcomes.values())
for value in values:
if value.ndim == 2:
time = np.arange(0, value.shape[1])
break
if time is None:
info("no time dimension found in results")
return time, outcomes
def group_results(experiments, outcomes, group_by, grouping_specifiers,
grouping_labels):
'''
Helper function that takes the experiments and results and returns a list
based on groupings. Each element in the dictionary contains the experiments
and results for a particular group, the key is the grouping specifier.
Parameters
----------
experiments : recarray
outcomes : dict
group_by : str
The column in the experiments array to which the grouping
specifiers apply. If the name is'index' it is assumed that the
grouping specifiers are valid indices for numpy.ndarray.
grouping_specifiers : iterable
An iterable of grouping specifiers. A grouping
specifier is a unique identifier in case of grouping by
categorical uncertainties. It is a tuple in case of
grouping by a parameter uncertainty. In this cose, the code
treats the tuples as half open intervals, apart from the
last entry, which is treated as closed on both sides.
In case of 'index', the iterable should be a dictionary
with the name for each group as key and the value being a
valid index for numpy.ndarray.
Returns
-------
dict
A dictionary with the experiments and results for each group, the
grouping specifier is used as key
..note:: In case of grouping by parameter uncertainty, the list of
grouping specifiers is sorted. The traversal assumes half open
intervals, where the upper limit of each interval is open, except
for the last interval which is closed.
'''
groups = {}
if group_by != 'index':
column_to_group_by = experiments.loc[:, group_by]
for label, specifier in zip(grouping_labels, grouping_specifiers):
if isinstance(specifier, tuple):
# the grouping is a continuous uncertainty
lower_limit, upper_limit = specifier
# check whether it is the last grouping specifier
if grouping_specifiers.index(specifier) ==\
len(grouping_specifiers)-1:
# last case
logical = (column_to_group_by >= lower_limit) &\
(column_to_group_by <= upper_limit)
else:
logical = (column_to_group_by >= lower_limit) &\
(column_to_group_by < upper_limit)
elif group_by == 'index':
# the grouping is based on indices
logical = specifier
else:
# the grouping is an integer or categorical uncertainty
logical = column_to_group_by == specifier
group_outcomes = {}
for key, value in outcomes.items():
value = value[logical]
group_outcomes[key] = value
groups[label] = (experiments.loc[logical,:], group_outcomes)
return groups
def make_continuous_grouping_specifiers(array, nr_of_groups=5):
'''
Helper function for discretesizing a continuous array. By default, the
array is split into 5 equally wide intervals.
Parameters
----------
array : ndarray
a 1-d array that is to be turned into discrete intervals.
nr_of_groups : int, optional
Returns
-------
list of tuples
list of tuples with the lower and upper bound of the intervals.
.. note:: this code only produces intervals. :func:`group_results` uses
these intervals in half-open fashion, apart from the last
interval: [a, b), [b,c), [c,d]. That is, both the end point
and the start point of the range of the continuous array are
included.
'''
minimum = np.min(array)
maximum = np.max(array)
step = (maximum-minimum)/nr_of_groups
a = [(minimum+step*x, minimum+step*(x+1)) for x in range(nr_of_groups)]
assert a[0][0] == minimum
assert a[-1][1] == maximum
return a
def prepare_pairs_data(experiments, outcomes,
outcomes_to_show=None,
group_by=None,
grouping_specifiers=None,
point_in_time=-1,
filter_scalar=True):
'''
Parameters
----------
results : tuple
outcomes_to_show : list of str, optional
group_by : str, optional
grouping_specifiers : iterable, optional
point_in_time : int, optional
filter_scalar : bool, optional
'''
if isinstance(outcomes_to_show, six.string_types):
raise EMAError(
"for pair wise plotting, more than one outcome needs to be provided")
outcomes, outcomes_to_show, time, grouping_labels = prepare_data(experiments, outcomes,
outcomes_to_show,
group_by,
grouping_specifiers,
filter_scalar)
def filter_outcomes(outcomes, point_in_time):
new_outcomes = {}
for key, value in outcomes.items():
if len(value.shape) == 2:
new_outcomes[key] = value[:, point_in_time]
else:
new_outcomes[key] = value
return new_outcomes
if point_in_time:
if point_in_time != -1:
point_in_time = np.where(time == point_in_time)
if group_by:
new_outcomes = {}
for key, value in outcomes.items():
new_outcomes[key] = filter_outcomes(value, point_in_time)
outcomes = new_outcomes
else:
outcomes = filter_outcomes(outcomes, point_in_time)
return outcomes, outcomes_to_show, grouping_labels
def prepare_data(experiments, outcomes, outcomes_to_show=None,
group_by=None, grouping_specifiers=None,
filter_scalar=True):
'''Helper function for preparing datasets prior to plotting
Parameters
----------
experiments : DataFrame
outcomes : dict
outcomes_to_show : list of str, optional
group_by : str, optional
grouping_specifiers : iterable, optional
filter_scalar : bool, optional
'''
experiments = experiments.copy()
outcomes = copy.copy(outcomes)
time, outcomes = determine_time_dimension(outcomes)
temp_outcomes = {}
# remove outcomes that are not to be shown
if outcomes_to_show:
if isinstance(outcomes_to_show, six.string_types):
outcomes_to_show = [outcomes_to_show]
for entry in outcomes_to_show:
temp_outcomes[entry] = outcomes[entry]
# filter the outcomes to exclude scalar values
if filter_scalar:
outcomes = filter_scalar_outcomes(outcomes)
if not outcomes_to_show:
outcomes_to_show = outcomes.keys()
# group the data if desired
if group_by:
if not grouping_specifiers:
# no grouping specifier, so infer from the data
if group_by == 'index':
raise EMAError(("no grouping specifiers provided while "
"trying to group on index"))
else:
column_to_group_by = experiments[group_by]
if (column_to_group_by.dtype == np.object) or\
(column_to_group_by.dtype=='category'):
grouping_specifiers = set(column_to_group_by)
else:
grouping_specifiers = make_continuous_grouping_specifiers(column_to_group_by,
grouping_specifiers)
grouping_labels = grouping_specifiers = sorted(grouping_specifiers)
else:
if isinstance(grouping_specifiers, six.string_types):
grouping_specifiers = [grouping_specifiers]
grouping_labels = grouping_specifiers
elif isinstance(grouping_specifiers, dict):
grouping_labels = sorted(grouping_specifiers.keys())
grouping_specifiers = [grouping_specifiers[key] for key in
grouping_labels]
else:
grouping_labels = grouping_specifiers
outcomes = group_results(experiments, outcomes, group_by,
grouping_specifiers, grouping_labels)
new_outcomes = {}
for key, value in outcomes.items():
new_outcomes[key] = value[1]
outcomes = new_outcomes
else:
grouping_labels = []
return outcomes, outcomes_to_show, time, grouping_labels
def do_titles(ax, titles, outcome):
'''
Helper function for setting the title on an ax
Parameters
----------
ax : axes instance
titles : dict
a dict which maps outcome names to titles
outcome : str
the outcome plotted in the ax.
'''
if isinstance(titles, dict):
if not titles:
ax.set_title(outcome)
else:
try:
ax.set_title(titles[outcome])
except KeyError:
warning(
"key error in do_titles, no title provided for `%s`" % (outcome))
ax.set_title(outcome)
def do_ylabels(ax, ylabels, outcome):
'''
Helper function for setting the y labels on an ax
Parameters
----------
ax : axes instance
titles : dict
a dict which maps outcome names to y labels
outcome : str
the outcome plotted in the ax.
'''
if isinstance(ylabels, dict):
if not ylabels:
ax.set_ylabel(outcome)
else:
try:
ax.set_ylabel(ylabels[outcome])
except KeyError:
warning(
"key error in do_ylabels, no ylabel provided for `%s`" % (outcome))
ax.set_ylabel(outcome)
def make_grid(outcomes_to_show, density=False):
'''
Helper function for making the grid that specifies the size and location
of the various axes.
Parameters
----------
outcomes_to_show : list of str
the list of outcomes to show
density: boolean : bool, optional
'''
# make the plotting grid
if density:
grid = gridspec.GridSpec(len(outcomes_to_show), 2,
width_ratios=[4, 1])
else:
grid = gridspec.GridSpec(len(outcomes_to_show), 1)
grid.update(wspace=0.1,
hspace=0.4)
figure = plt.figure()
return figure, grid
def get_color(index):
'''helper function for cycling over color list if the number of items
is higher than the legnth of the color list
'''
corrected_index = index % len(COLOR_LIST)
return COLOR_LIST[corrected_index]
| 29.473436
| 98
| 0.562851
|
acfe22a396fc75af8f7bc847917ebbb3d5246d06
| 13,551
|
py
|
Python
|
Lib/test/test_userdict.py
|
jimmyyu2004/jython
|
5b4dc2d54d01a6fda8c55d07b2608167e7a40769
|
[
"CNRI-Jython"
] | 332
|
2015-08-22T12:43:56.000Z
|
2022-03-17T01:05:43.000Z
|
Lib/test/test_userdict.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 36
|
2015-05-30T08:39:19.000Z
|
2022-03-04T20:42:33.000Z
|
Lib/test/test_userdict.py
|
Pandinosaurus/jython3
|
def4f8ec47cb7a9c799ea4c745f12badf92c5769
|
[
"CNRI-Jython"
] | 74
|
2015-05-29T17:18:53.000Z
|
2022-01-15T14:06:44.000Z
|
# Check every path through every method of UserDict
import test.support, unittest
from sets import Set
import UserDict
class TestMappingProtocol(unittest.TestCase):
# This base class can be used to check that an object conforms to the
# mapping protocol
# Functions that can be useful to override to adapt to dictionary
# semantics
_tested_class = dict # which class is being tested
def _reference(self):
"""Return a dictionary of values which are invariant by storage
in the object under test."""
return {1:2, "key1":"value1", "key2":(1, 2, 3)}
def _empty_mapping(self):
"""Return an empty mapping object"""
return self._tested_class()
def _full_mapping(self, data):
"""Return a mapping object with the value contained in data
dictionary"""
x = self._empty_mapping()
for key, value in list(data.items()):
x[key] = value
return x
def __init__(self, *args, **kw):
unittest.TestCase.__init__(self, *args, **kw)
self.reference = self._reference().copy()
key, value = self.reference.popitem()
self.other = {key:value}
def test_read(self):
# Test for read only operations on mapping
p = self._empty_mapping()
p1 = dict(p) #workaround for singleton objects
d = self._full_mapping(self.reference)
if d is p:
p = p1
#Indexing
for key, value in list(self.reference.items()):
self.assertEqual(d[key], value)
knownkey = list(self.other.keys())[0]
self.assertRaises(KeyError, lambda:d[knownkey])
#len
self.assertEqual(len(p), 0)
self.assertEqual(len(d), len(self.reference))
#has_key
for k in self.reference:
self.assertTrue(k in d)
self.assertTrue(k in d)
for k in self.other:
self.assertFalse(k in d)
self.assertFalse(k in d)
#cmp
self.assertEqual(cmp(p, p), 0)
self.assertEqual(cmp(d, d), 0)
self.assertEqual(cmp(p, d), -1)
self.assertEqual(cmp(d, p), 1)
#__non__zero__
if p: self.fail("Empty mapping must compare to False")
if not d: self.fail("Full mapping must compare to True")
# keys(), items(), iterkeys() ...
def check_iterandlist(iter, lst, ref):
self.assertTrue(hasattr(iter, 'next'))
self.assertTrue(hasattr(iter, '__iter__'))
x = list(iter)
self.assertTrue(Set(x)==Set(lst)==Set(ref))
check_iterandlist(iter(d.keys()), list(d.keys()), list(self.reference.keys()))
check_iterandlist(iter(d), list(d.keys()), list(self.reference.keys()))
check_iterandlist(iter(d.values()), list(d.values()), list(self.reference.values()))
check_iterandlist(iter(d.items()), list(d.items()), list(self.reference.items()))
#get
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.get(key, knownvalue), value)
self.assertEqual(d.get(knownkey, knownvalue), knownvalue)
self.assertFalse(knownkey in d)
def test_write(self):
# Test for write operations on mapping
p = self._empty_mapping()
#Indexing
for key, value in list(self.reference.items()):
p[key] = value
self.assertEqual(p[key], value)
for key in list(self.reference.keys()):
del p[key]
self.assertRaises(KeyError, lambda:p[key])
p = self._empty_mapping()
#update
p.update(self.reference)
self.assertEqual(dict(p), self.reference)
d = self._full_mapping(self.reference)
#setdefaullt
key, value = next(iter(d.items()))
knownkey, knownvalue = next(iter(self.other.items()))
self.assertEqual(d.setdefault(key, knownvalue), value)
self.assertEqual(d[key], value)
self.assertEqual(d.setdefault(knownkey, knownvalue), knownvalue)
self.assertEqual(d[knownkey], knownvalue)
#pop
self.assertEqual(d.pop(knownkey), knownvalue)
self.assertFalse(knownkey in d)
self.assertRaises(KeyError, d.pop, knownkey)
default = 909
d[knownkey] = knownvalue
self.assertEqual(d.pop(knownkey, default), knownvalue)
self.assertFalse(knownkey in d)
self.assertEqual(d.pop(knownkey, default), default)
#popitem
key, value = d.popitem()
self.assertFalse(key in d)
self.assertEqual(value, self.reference[key])
p=self._empty_mapping()
self.assertRaises(KeyError, p.popitem)
d0 = {}
d1 = {"one": 1}
d2 = {"one": 1, "two": 2}
d3 = {"one": 1, "two": 3, "three": 5}
d4 = {"one": None, "two": None}
d5 = {"one": 1, "two": 1}
class UserDictTest(TestMappingProtocol):
_tested_class = UserDict.IterableUserDict
def test_all(self):
# Test constructors
u = UserDict.UserDict()
u0 = UserDict.UserDict(d0)
u1 = UserDict.UserDict(d1)
u2 = UserDict.IterableUserDict(d2)
uu = UserDict.UserDict(u)
uu0 = UserDict.UserDict(u0)
uu1 = UserDict.UserDict(u1)
uu2 = UserDict.UserDict(u2)
# keyword arg constructor
self.assertEqual(UserDict.UserDict(one=1, two=2), d2)
# item sequence constructor
self.assertEqual(UserDict.UserDict([('one', 1), ('two', 2)]), d2)
self.assertEqual(UserDict.UserDict(dict=[('one', 1), ('two', 2)]), d2)
# both together
self.assertEqual(UserDict.UserDict([('one', 1), ('two', 2)], two=3, three=5), d3)
# alternate constructor
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split()), d4)
self.assertEqual(UserDict.UserDict.fromkeys('one two'.split(), 1), d5)
self.assertEqual(UserDict.UserDict().fromkeys('one two'.split(), 1), d5)
self.assertTrue(u1.fromkeys('one two'.split()) is not u1)
self.assertTrue(isinstance(u1.fromkeys('one two'.split()), UserDict.UserDict))
self.assertTrue(isinstance(u2.fromkeys('one two'.split()), UserDict.IterableUserDict))
# Test __repr__
# zyasoft - the below is not necessarily true, we cannot
# depend on the ordering of how the string is constructed;
# unless we require that it be sorted, or otherwise ordered in
# some consistent fashion
# for repr, we can use eval, so that's what we will do here
# self.assertEqual(str(u0), str(d0))
# self.assertEqual(repr(u1), repr(d1))
# self.assertEqual(`u2`, `d2`)
self.assertEqual(eval(repr(u1)), eval(repr(d1)))
self.assertEqual(eval(repr(u2)), eval(repr(d2)))
# end zyasoft ~
# Test __cmp__ and __len__
all = [d0, d1, d2, u, u0, u1, u2, uu, uu0, uu1, uu2]
for a in all:
for b in all:
self.assertEqual(cmp(a, b), cmp(len(a), len(b)))
# Test __getitem__
self.assertEqual(u2["one"], 1)
self.assertRaises(KeyError, u1.__getitem__, "two")
# Test __setitem__
u3 = UserDict.UserDict(u2)
u3["two"] = 2
u3["three"] = 3
# Test __delitem__
del u3["three"]
self.assertRaises(KeyError, u3.__delitem__, "three")
# Test clear
u3.clear()
self.assertEqual(u3, {})
# Test copy()
u2a = u2.copy()
self.assertEqual(u2a, u2)
u2b = UserDict.UserDict(x=42, y=23)
u2c = u2b.copy() # making a copy of a UserDict is special cased
self.assertEqual(u2b, u2c)
class MyUserDict(UserDict.UserDict):
def display(self): print(self)
m2 = MyUserDict(u2)
m2a = m2.copy()
self.assertEqual(m2a, m2)
# SF bug #476616 -- copy() of UserDict subclass shared data
m2['foo'] = 'bar'
self.assertNotEqual(m2a, m2)
# zyasoft - changed the following three assertions to use sets
# to remove order dependency
# Test keys, items, values
self.assertEqual(set(u2.keys()), set(d2.keys()))
self.assertEqual(set(u2.items()), set(d2.items()))
self.assertEqual(set(u2.values()), set(d2.values()))
# Test has_key and "in".
for i in list(u2.keys()):
self.assertTrue(i in u2)
self.assertTrue(i in u2)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u1, i in d1)
self.assertEqual(i in u0, i in d0)
self.assertEqual(i in u0, i in d0)
# Test update
t = UserDict.UserDict()
t.update(u2)
self.assertEqual(t, u2)
class Items:
def items(self):
return (("x", 42), ("y", 23))
t = UserDict.UserDict()
t.update(Items())
self.assertEqual(t, {"x": 42, "y": 23})
# Test get
for i in list(u2.keys()):
self.assertEqual(u2.get(i), u2[i])
self.assertEqual(u1.get(i), d1.get(i))
self.assertEqual(u0.get(i), d0.get(i))
# Test "in" iteration.
for i in range(20):
u2[i] = str(i)
ikeys = []
for k in u2:
ikeys.append(k)
keys = list(u2.keys())
self.assertEqual(Set(ikeys), Set(keys))
# Test setdefault
t = UserDict.UserDict()
self.assertEqual(t.setdefault("x", 42), 42)
self.assertTrue("x" in t)
self.assertEqual(t.setdefault("x", 23), 42)
# Test pop
t = UserDict.UserDict(x=42)
self.assertEqual(t.pop("x"), 42)
self.assertRaises(KeyError, t.pop, "x")
self.assertEqual(t.pop("x", 1), 1)
t["x"] = 42
self.assertEqual(t.pop("x", 1), 42)
# Test popitem
t = UserDict.UserDict(x=42)
self.assertEqual(t.popitem(), ("x", 42))
self.assertRaises(KeyError, t.popitem)
##########################
# Test Dict Mixin
class SeqDict(UserDict.DictMixin):
"""Dictionary lookalike implemented with lists.
Used to test and demonstrate DictMixin
"""
def __init__(self):
self.keylist = []
self.valuelist = []
def __getitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
return self.valuelist[i]
def __setitem__(self, key, value):
try:
i = self.keylist.index(key)
self.valuelist[i] = value
except ValueError:
self.keylist.append(key)
self.valuelist.append(value)
def __delitem__(self, key):
try:
i = self.keylist.index(key)
except ValueError:
raise KeyError
self.keylist.pop(i)
self.valuelist.pop(i)
def keys(self):
return list(self.keylist)
class UserDictMixinTest(TestMappingProtocol):
_tested_class = SeqDict
def test_all(self):
## Setup test and verify working of the test class
# check init
s = SeqDict()
# exercise setitem
s[10] = 'ten'
s[20] = 'twenty'
s[30] = 'thirty'
# exercise delitem
del s[20]
# check getitem and setitem
self.assertEqual(s[10], 'ten')
# check keys() and delitem
self.assertEqual(list(s.keys()), [10, 30])
## Now, test the DictMixin methods one by one
# has_key
self.assertTrue(10 in s)
self.assertTrue(20 not in s)
# __contains__
self.assertTrue(10 in s)
self.assertTrue(20 not in s)
# __iter__
self.assertEqual([k for k in s], [10, 30])
# __len__
self.assertEqual(len(s), 2)
# iteritems
self.assertEqual(list(s.items()), [(10, 'ten'), (30, 'thirty')])
# iterkeys
self.assertEqual(list(s.keys()), [10, 30])
# itervalues
self.assertEqual(list(s.values()), ['ten', 'thirty'])
# values
self.assertEqual(list(s.values()), ['ten', 'thirty'])
# items
self.assertEqual(list(s.items()), [(10, 'ten'), (30, 'thirty')])
# get
self.assertEqual(s.get(10), 'ten')
self.assertEqual(s.get(15, 'fifteen'), 'fifteen')
self.assertEqual(s.get(15), None)
# setdefault
self.assertEqual(s.setdefault(40, 'forty'), 'forty')
self.assertEqual(s.setdefault(10, 'null'), 'ten')
del s[40]
# pop
self.assertEqual(s.pop(10), 'ten')
self.assertTrue(10 not in s)
s[10] = 'ten'
self.assertEqual(s.pop("x", 1), 1)
s["x"] = 42
self.assertEqual(s.pop("x", 1), 42)
# popitem
k, v = s.popitem()
self.assertTrue(k not in s)
s[k] = v
# clear
s.clear()
self.assertEqual(len(s), 0)
# empty popitem
self.assertRaises(KeyError, s.popitem)
# update
s.update({10: 'ten', 20:'twenty'})
self.assertEqual(s[10], 'ten')
self.assertEqual(s[20], 'twenty')
# cmp
self.assertEqual(s, {10: 'ten', 20:'twenty'})
t = SeqDict()
t[20] = 'twenty'
t[10] = 'ten'
self.assertEqual(s, t)
def test_main():
test.support.run_unittest(
TestMappingProtocol,
UserDictTest,
UserDictMixinTest
)
if __name__ == "__main__":
test_main()
| 32.41866
| 94
| 0.569404
|
acfe22ebd520349224b38b91fecd5c843d71eaa5
| 1,457
|
py
|
Python
|
pytorchtrain.py
|
sebftw/PyTorch
|
b42bb7987020ed5b7c095ee1bd18ebbcc63e9e8a
|
[
"MIT"
] | null | null | null |
pytorchtrain.py
|
sebftw/PyTorch
|
b42bb7987020ed5b7c095ee1bd18ebbcc63e9e8a
|
[
"MIT"
] | null | null | null |
pytorchtrain.py
|
sebftw/PyTorch
|
b42bb7987020ed5b7c095ee1bd18ebbcc63e9e8a
|
[
"MIT"
] | null | null | null |
import torch, torchvision
import torch.nn as nn
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
import numpy as np
import torch.optim as optim
transform = transforms.ToTensor()
trainset = torchvision.datasets.MNIST('mnist', download = True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size = 128, shuffle=True, num_workers=0)
#See for later on PyTorch: https://jacobgil.github.io/deeplearning/pruning-deep-learning
# https://cs231n.github.io/convolutional-networks/
conv = torch.nn.Sequential(
#nn.Dropout(),
nn.Conv2d(1, 20, 5, 1),
nn.ReLU(inplace=True),
nn.MaxPool2d(2, 2),
nn.Conv2d(20, 50, 5, 1),
nn.ReLU(),
nn.MaxPool2d(2, 2)
)
fullc = torch.nn.Sequential(
nn.Linear(4*4*50, 500),
nn.ReLU(),
nn.Linear(500, 10))
loss_fn = torch.nn.CrossEntropyLoss()
parameterlist = list(conv.parameters()) + list(fullc.parameters())
loss_val = np.zeros(len(trainloader))
optimizer = optim.Adagrad(parameterlist, lr=1e-2)
for i, (x, y) in enumerate(trainloader, 0):
optimizer.zero_grad()
y_pred = conv(x)
y_pred = fullc(y_pred.view(-1, 4 * 4 * 50))
loss = loss_fn(y_pred, y)
loss_val[i] = loss.item()
print(i, loss_val[i])
loss.backward()
optimizer.step()
torch.save(conv.state_dict(), 'pytorchtestnet-conv.pt')
torch.save(fullc.state_dict(), 'pytorchtestnet-fullc.pt')
#%%
plt.plot(loss_val[loss_val < 1])
plt.show()
| 24.694915
| 98
| 0.697323
|
acfe23a6427f152c5d8d1f977998c5bf2809c508
| 2,425
|
py
|
Python
|
profiles_api/models.py
|
maze76/profiles-rest-api
|
684829ca064873bd5318a85717bd641c735683b5
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
maze76/profiles-rest-api
|
684829ca064873bd5318a85717bd641c735683b5
|
[
"MIT"
] | null | null | null |
profiles_api/models.py
|
maze76/profiles-rest-api
|
684829ca064873bd5318a85717bd641c735683b5
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import PermissionsMixin
from django.contrib.auth.models import BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError('User must have an email address')
#normalizing e-mail address
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save new superuser with given details"""
user = self.create_user(email, name, password) #self is automatically called when we call another function inside
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
# Create your models here.
class UserProfile(AbstractBaseUser, PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True) #unique only one same user in the system
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True) #by default users are activated but we can deactivate them later if we need to
is_staff = models.BooleanField(default=False) #determine if user hava access to django admin etc.
objects = UserProfileManager()
USERNAME_FIELD = 'email' #we replace default username field with created EmailField
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve full name of user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return string representation of our user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text
| 33.219178
| 128
| 0.696495
|
acfe241d4612118c944c1e45a2eabae8b20389d7
| 1,812
|
py
|
Python
|
moltres/power_mesh.py
|
khurrumsaleem/msr-nts-benchmark
|
d742ce84fc8ef2730f55d27b55104e7ea696d23f
|
[
"CC0-1.0"
] | null | null | null |
moltres/power_mesh.py
|
khurrumsaleem/msr-nts-benchmark
|
d742ce84fc8ef2730f55d27b55104e7ea696d23f
|
[
"CC0-1.0"
] | null | null | null |
moltres/power_mesh.py
|
khurrumsaleem/msr-nts-benchmark
|
d742ce84fc8ef2730f55d27b55104e7ea696d23f
|
[
"CC0-1.0"
] | 1
|
2021-11-09T10:42:06.000Z
|
2021-11-09T10:42:06.000Z
|
with open("power_mesh.i", "w") as f:
f.write("[Mesh]\n")
f.write(" [./file_mesh]\n")
f.write(" type = FileMeshGenerator\n")
f.write(" file = full_mesh.e\n")
f.write(" []\n")
input = "file_mesh"
blk_id = 3
for i in range(1,9):
for j in range(1,9):
if j < i:
continue
i_s, j_s = str(i), str(j)
header = "channel" + i_s + j_s
bl_x, bl_y = 0 + (j-1) * 10, 160 - i * 10
tr_x, tr_y = 0 + j * 10, 160 - (i-1) * 10
f.write(" [./" + header + "]\n")
f.write(" type = SubdomainBoundingBoxGenerator\n")
f.write(" input = " + input + "\n")
f.write(" bottom_left = '" + str(bl_x) + " " + str(bl_y)
+ " 0'\n")
f.write(" top_right = '" + str(tr_x) + " " + str(tr_y)
+ " 0'\n")
f.write(" block_id = " + str(blk_id) + "\n")
f.write(" block_name = fuel" + i_s + j_s + "\n")
f.write(" location = inside\n")
f.write(" restricted_subdomains = '1'\n")
f.write(" []\n")
input = header
blk_id += 1
f.write("[]")
with open("power_postprocessor.i", "w") as f:
for i in range(1,9):
for j in range(1,9):
if j < i:
continue
i_s, j_s = str(i), str(j)
header = "channel" + i_s + j_s
f.write(" [./" + header + "]\n")
f.write(" type = ElmIntegTotFissHeatPostprocessor\n")
f.write(" block = 'fuel" + i_s + j_s + "'\n")
f.write(" []\n")
block = ""
for i in range(1,9):
for j in range(1,9):
if j < i:
continue
block += " fuel" + str(i) + str(j)
print(block)
| 35.529412
| 71
| 0.419426
|
acfe2656352c61dc20b2932c55d282ebd779659c
| 74
|
py
|
Python
|
.history/py/UserInput_20201230123600.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230123600.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
.history/py/UserInput_20201230123600.py
|
minefarmer/Comprehensive-Python
|
f97b9b83ec328fc4e4815607e6a65de90bb8de66
|
[
"Unlicense"
] | null | null | null |
person = input("Enter your name: ")
print("Hello ", person)
richnesscarl
| 14.8
| 35
| 0.702703
|
acfe26573e2ec597c7c9d38533b84cc311d48622
| 25,553
|
py
|
Python
|
docxcompose/composer.py
|
antonio-quarta/docxcompose
|
042e15d24d456d51092e55155a1381c0f1021a69
|
[
"MIT"
] | null | null | null |
docxcompose/composer.py
|
antonio-quarta/docxcompose
|
042e15d24d456d51092e55155a1381c0f1021a69
|
[
"MIT"
] | null | null | null |
docxcompose/composer.py
|
antonio-quarta/docxcompose
|
042e15d24d456d51092e55155a1381c0f1021a69
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from copy import deepcopy
from docx.opc.constants import CONTENT_TYPE as CT
from docx.opc.constants import RELATIONSHIP_TYPE as RT
from docx.opc.oxml import serialize_part_xml
from docx.opc.packuri import PackURI
from docx.opc.part import Part
from docx.oxml import parse_xml
from docx.oxml.section import CT_SectPr
from docx.parts.numbering import NumberingPart
from docxcompose.image import ImageWrapper
from docxcompose.properties import CustomProperties
from docxcompose.utils import NS
from docxcompose.utils import xpath
import os.path
import random
import re
FILENAME_IDX_RE = re.compile('([a-zA-Z/_-]+)([1-9][0-9]*)?')
RID_IDX_RE = re.compile('rId([0-9]*)')
REFERENCED_PARTS_IGNORED_RELTYPES = set([
RT.IMAGE,
RT.HEADER,
RT.FOOTER,
])
PART_RELTYPES_WITH_STYLES = [
RT.FOOTNOTES,
]
class Composer(object):
def __init__(self, doc):
self.doc = doc
self.pkg = doc.part.package
self.restart_numbering = True
self.reset_reference_mapping()
def reset_reference_mapping(self):
self.num_id_mapping = {}
self.anum_id_mapping = {}
self._numbering_restarted = set()
def append(self, doc, remove_property_fields=True):
"""Append the given document."""
index = self.append_index()
self.insert(index, doc, remove_property_fields=remove_property_fields)
def insert(self, index, doc, remove_property_fields=True):
"""Insert the given document at the given index."""
self.reset_reference_mapping()
# Remove custom property fields but keep the values
if remove_property_fields:
cprops = CustomProperties(doc)
for name in cprops.keys():
cprops.dissolve_fields(name)
self._create_style_id_mapping(doc)
for element in doc.element.body:
if isinstance(element, CT_SectPr):
continue
element = deepcopy(element)
self.doc.element.body.insert(index, element)
self.add_referenced_parts(doc.part, self.doc.part, element)
self.add_styles(doc, element)
self.add_numberings(doc, element)
self.restart_first_numbering(doc, element)
self.add_images(doc, element)
self.add_diagrams(doc, element)
self.add_shapes(doc, element)
self.add_footnotes(doc, element)
self.remove_header_and_footer_references(doc, element)
index += 1
self.add_styles_from_other_parts(doc)
self.renumber_bookmarks()
self.renumber_docpr_ids()
self.renumber_nvpicpr_ids()
self.fix_section_types(doc)
def save(self, filename):
self.doc.save(filename)
def append_index(self):
section_props = self.doc.element.body.xpath('w:sectPr')
if section_props:
return self.doc.element.body.index(section_props[0])
return len(self.doc.element.body)
def add_referenced_parts(self, src_part, dst_part, element):
rid_elements = xpath(element, './/*[@r:id]')
for rid_element in rid_elements:
rid = rid_element.get('{%s}id' % NS['r'])
rel = src_part.rels[rid]
if rel.reltype in REFERENCED_PARTS_IGNORED_RELTYPES:
continue
new_rel = self.add_relationship(src_part, dst_part, rel)
rid_element.set('{%s}id' % NS['r'], new_rel.rId)
def add_relationship(self, src_part, dst_part, relationship):
"""Add relationship and it's target part"""
if relationship.is_external:
new_rid = dst_part.rels.get_or_add_ext_rel(
relationship.reltype, relationship.target_ref)
return dst_part.rels[new_rid]
part = relationship.target_part
# Determine next partname
name = FILENAME_IDX_RE.match(part.partname).group(1)
used_part_numbers = [
FILENAME_IDX_RE.match(p.partname).group(2)
for p in dst_part.package.iter_parts()
if p.partname.startswith(name)
]
used_part_numbers = [
int(idx) for idx in used_part_numbers if idx is not None]
for n in range(1, len(used_part_numbers)+2):
if n not in used_part_numbers:
next_part_number = n
break
next_partname = PackURI('%s%d.%s' % (
name, next_part_number, part.partname.ext))
new_part = Part(
next_partname, part.content_type, part.blob,
dst_part.package)
new_rel = dst_part.rels.get_or_add(relationship.reltype, new_part)
# Sort relationships by rId to get the same rId when adding them to the
# new part. This avoids fixing references.
def sort_key(r):
match = RID_IDX_RE.match(r.rId)
return int(match.group(1))
for rel in sorted(part.rels.values(), key=sort_key):
self.add_relationship(part, new_part, rel)
return new_rel
def add_diagrams(self, doc, element):
dgm_rels = xpath(element, './/dgm:relIds[@r:dm]')
for dgm_rel in dgm_rels:
for item, rt_type in (
('dm', RT.DIAGRAM_DATA),
('lo', RT.DIAGRAM_LAYOUT),
('qs', RT.DIAGRAM_QUICK_STYLE),
('cs', RT.DIAGRAM_COLORS)
):
dm_rid = dgm_rel.get('{%s}%s' % (NS['r'], item))
dm_part = doc.part.rels[dm_rid].target_part
new_rid = self.doc.part.relate_to(dm_part, rt_type)
dgm_rel.set('{%s}%s' % (NS['r'], item), new_rid)
def add_images(self, doc, element):
"""Add images from the given document used in the given element."""
blips = xpath(
element, '(.//a:blip|.//asvg:svgBlip)[@r:embed]')
for blip in blips:
rid = blip.get('{%s}embed' % NS['r'])
img_part = doc.part.rels[rid].target_part
new_img_part = self.pkg.image_parts._get_by_sha1(img_part.sha1)
if new_img_part is None:
image = ImageWrapper(img_part)
new_img_part = self.pkg.image_parts._add_image_part(image)
new_rid = self.doc.part.relate_to(new_img_part, RT.IMAGE)
blip.set('{%s}embed' % NS['r'], new_rid)
# handle external reference as images can be embedded and have an
# external reference
rid = blip.get('{%s}link' % NS['r'])
if rid:
rel = doc.part.rels[rid]
new_rel = self.add_relationship(None, self.doc.part, rel)
blip.set('{%s}link' % NS['r'], new_rel.rId)
def add_shapes(self, doc, element):
shapes = xpath(element, './/v:shape/v:imagedata')
for shape in shapes:
rid = shape.get('{%s}id' % NS['r'])
img_part = doc.part.rels[rid].target_part
new_img_part = self.pkg.image_parts._get_by_sha1(img_part.sha1)
if new_img_part is None:
image = ImageWrapper(img_part)
new_img_part = self.pkg.image_parts._add_image_part(image)
new_rid = self.doc.part.relate_to(new_img_part, RT.IMAGE)
shape.set('{%s}id' % NS['r'], new_rid)
def add_footnotes(self, doc, element):
"""Add footnotes from the given document used in the given element."""
footnotes_refs = element.findall('.//w:footnoteReference', NS)
if not footnotes_refs:
return
footnote_part = doc.part.rels.part_with_reltype(RT.FOOTNOTES)
my_footnote_part = self.footnote_part()
footnotes = parse_xml(my_footnote_part.blob)
next_id = len(footnotes) + 1
for ref in footnotes_refs:
id_ = ref.get('{%s}id' % NS['w'])
element = parse_xml(footnote_part.blob)
footnote = deepcopy(element.find('.//w:footnote[@w:id="%s"]' % id_, NS))
footnotes.append(footnote)
footnote.set('{%s}id' % NS['w'], str(next_id))
ref.set('{%s}id' % NS['w'], str(next_id))
next_id += 1
self.add_referenced_parts(footnote_part, my_footnote_part, element)
my_footnote_part._blob = serialize_part_xml(footnotes)
def footnote_part(self):
"""The footnote part of the document."""
try:
footnote_part = self.doc.part.rels.part_with_reltype(RT.FOOTNOTES)
except KeyError:
# Create a new empty footnotes part
partname = PackURI('/word/footnotes.xml')
content_type = CT.WML_FOOTNOTES
xml_path = os.path.join(
os.path.dirname(__file__), 'templates', 'footnotes.xml')
with open(xml_path, 'rb') as f:
xml_bytes = f.read()
footnote_part = Part(
partname, content_type, xml_bytes, self.doc.part.package)
self.doc.part.relate_to(footnote_part, RT.FOOTNOTES)
return footnote_part
def mapped_style_id(self, style_id):
if style_id not in self._style_id2name:
return style_id
return self._style_name2id.get(
self._style_id2name[style_id], style_id)
def _create_style_id_mapping(self, doc):
# Style ids are language-specific, but names not (always), WTF?
# The inserted document may have another language than the composed one.
# Thus we map the style id using the style name.
self._style_id2name = {s.style_id: s.name for s in doc.styles}
self._style_name2id = {s.name: s.style_id for s in self.doc.styles}
def add_styles_from_other_parts(self, doc):
for reltype in PART_RELTYPES_WITH_STYLES:
try:
el = parse_xml(doc.part.rels.part_with_reltype(reltype).blob)
except (KeyError, ValueError):
pass
else:
self.add_styles(doc, el)
def add_styles(self, doc, element):
"""Add styles from the given document used in the given element."""
our_style_ids = [s.style_id for s in self.doc.styles]
# de-duplicate ids and keep order to make sure tests are not flaky
used_style_ids = list(OrderedDict.fromkeys([e.val for e in xpath(
element, './/w:tblStyle|.//w:pStyle|.//w:rStyle')]))
for style_id in used_style_ids:
our_style_id = self.mapped_style_id(style_id)
if our_style_id not in our_style_ids:
style_element = deepcopy(doc.styles.element.get_by_id(style_id))
if style_element is not None:
self.doc.styles.element.append(style_element)
self.add_numberings(doc, style_element)
# Also add linked styles
linked_style_ids = xpath(style_element, './/w:link/@w:val')
if linked_style_ids:
linked_style_id = linked_style_ids[0]
our_linked_style_id = self.mapped_style_id(linked_style_id)
if our_linked_style_id not in our_style_ids:
our_linked_style = doc.styles.element.get_by_id(
linked_style_id)
if our_linked_style is not None:
self.doc.styles.element.append(deepcopy(
our_linked_style))
else:
# Create a mapping for abstractNumIds used in existing styles
# This is used when adding numberings to avoid having multiple
# <w:abstractNum> elements for the same style.
style_element = doc.styles.element.get_by_id(style_id)
if style_element is not None:
num_ids = xpath(style_element, './/w:numId/@w:val')
if num_ids:
anum_ids = xpath(
doc.part.numbering_part.element,
'.//w:num[@w:numId="%s"]/w:abstractNumId/@w:val' % num_ids[0])
if anum_ids:
our_style_element = self.doc.styles.element.get_by_id(our_style_id)
our_num_ids = xpath(our_style_element, './/w:numId/@w:val')
if our_num_ids:
numbering_part = self.numbering_part()
our_anum_ids = xpath(
numbering_part.element,
'.//w:num[@w:numId="%s"]/w:abstractNumId/@w:val' % our_num_ids[0])
if our_anum_ids:
self.anum_id_mapping[int(anum_ids[0])] = int(our_anum_ids[0])
# Replace language-specific style id with our style id
if our_style_id != style_id and our_style_id is not None:
style_elements = xpath(
element,
'.//w:tblStyle[@w:val="%(styleid)s"]|'
'.//w:pStyle[@w:val="%(styleid)s"]|'
'.//w:rStyle[@w:val="%(styleid)s"]' % dict(styleid=style_id))
for el in style_elements:
el.val = our_style_id
# Update our style ids
our_style_ids = [s.style_id for s in self.doc.styles]
def add_numberings(self, doc, element):
"""Add numberings from the given document used in the given element."""
# Search for numbering references
num_ids = set([n.val for n in xpath(element, './/w:numId')])
if not num_ids:
return
next_num_id, next_anum_id = self._next_numbering_ids()
src_numbering_part = doc.part.numbering_part
for num_id in num_ids:
if num_id in self.num_id_mapping:
continue
# Find the referenced <w:num> element
res = src_numbering_part.element.xpath(
'.//w:num[@w:numId="%s"]' % num_id)
if not res:
continue
num_element = deepcopy(res[0])
num_element.numId = next_num_id
self.num_id_mapping[num_id] = next_num_id
anum_id = num_element.xpath('//w:abstractNumId')[0]
if anum_id.val not in self.anum_id_mapping:
# Find the referenced <w:abstractNum> element
res = src_numbering_part.element.xpath(
'.//w:abstractNum[@w:abstractNumId="%s"]' % anum_id.val)
if not res:
continue
anum_element = deepcopy(res[0])
self.anum_id_mapping[anum_id.val] = next_anum_id
anum_id.val = next_anum_id
# anum_element.abstractNumId = next_anum_id
anum_element.set('{%s}abstractNumId' % NS['w'], str(next_anum_id))
# Make sure we have a unique nsid so numberings restart properly
nsid = anum_element.find('.//w:nsid', NS)
if nsid is not None:
nsid.set(
'{%s}val' % NS['w'],
"{0:08X}".format(int(10**8 * random.random()))
)
self._insert_abstract_num(anum_element)
else:
anum_id.val = self.anum_id_mapping[anum_id.val]
self._insert_num(num_element)
# Fix references
for num_id_ref in xpath(element, './/w:numId'):
num_id_ref.val = self.num_id_mapping.get(
num_id_ref.val, num_id_ref.val)
def _next_numbering_ids(self):
numbering_part = self.numbering_part()
# Determine next unused numId (numbering starts with 1)
current_num_ids = [
n.numId for n in xpath(numbering_part.element, './/w:num')]
if current_num_ids:
next_num_id = max(current_num_ids) + 1
else:
next_num_id = 1
# Determine next unused abstractNumId (numbering starts with 0)
current_anum_ids = [
int(n) for n in
xpath(numbering_part.element, './/w:abstractNum/@w:abstractNumId')]
if current_anum_ids:
next_anum_id = max(current_anum_ids) + 1
else:
next_anum_id = 0
return next_num_id, next_anum_id
def _insert_num(self, element):
# Find position of last <w:num> element and insert after that
numbering_part = self.numbering_part()
nums = numbering_part.element.xpath('.//w:num')
if nums:
num_index = numbering_part.element.index(nums[-1])
numbering_part.element.insert(num_index, element)
else:
numbering_part.element.append(element)
def _insert_abstract_num(self, element):
# Find position of first <w:num> element
# We'll insert <w:abstractNum> before that
numbering_part = self.numbering_part()
nums = numbering_part.element.xpath('.//w:num')
if nums:
anum_index = numbering_part.element.index(nums[0])
else:
anum_index = 0
numbering_part.element.insert(anum_index, element)
def _replace_mapped_num_id(self, old_id, new_id):
"""Replace a mapped numId with a new one."""
for key, value in self.num_id_mapping.items():
if value == old_id:
self.num_id_mapping[key] = new_id
return
def numbering_part(self):
"""The numbering part of the document."""
try:
numbering_part = self.doc.part.rels.part_with_reltype(RT.NUMBERING)
except KeyError:
# Create a new empty numbering part
partname = PackURI('/word/numbering.xml')
content_type = CT.WML_NUMBERING
xml_path = os.path.join(
os.path.dirname(__file__), 'templates', 'numbering.xml')
with open(xml_path, 'rb') as f:
xml_bytes = f.read()
element = parse_xml(xml_bytes)
numbering_part = NumberingPart(
partname, content_type, element, self.doc.part.package)
self.doc.part.relate_to(numbering_part, RT.NUMBERING)
return numbering_part
def restart_first_numbering(self, doc, element):
if not self.restart_numbering:
return
style_id = xpath(element, './/w:pStyle/@w:val')
if not style_id:
return
style_id = style_id[0]
if style_id in self._numbering_restarted:
return
style_element = self.doc.styles.element.get_by_id(style_id)
if style_element is None:
return
outline_lvl = xpath(style_element, './/w:outlineLvl')
if outline_lvl:
# Styles with an outline level are probably headings.
# Do not restart numbering of headings
return
# if there is a numId referenced from the paragraph, that numId is
# relevant, otherwise fall back to the style's numId
local_num_id = xpath(element, './/w:numPr/w:numId/@w:val')
if local_num_id:
num_id = local_num_id[0]
else:
style_num_id = xpath(style_element, './/w:numId/@w:val')
if not style_num_id:
return
num_id = style_num_id[0]
numbering_part = self.numbering_part()
num_element = xpath(
numbering_part.element,
'.//w:num[@w:numId="%s"]' % num_id)
if not num_element:
# Styles with no numbering element should not be processed
return
anum_id = xpath(num_element[0], './/w:abstractNumId/@w:val')[0]
anum_element = xpath(
numbering_part.element,
'.//w:abstractNum[@w:abstractNumId="%s"]' % anum_id)
num_fmt = xpath(
anum_element[0], './/w:lvl[@w:ilvl="0"]/w:numFmt/@w:val')
# Do not restart numbering of bullets
if num_fmt and num_fmt[0] == 'bullet':
return
new_num_element = deepcopy(num_element[0])
lvl_override = parse_xml(
'<w:lvlOverride xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main"'
' w:ilvl="0"><w:startOverride w:val="1"/></w:lvlOverride>')
new_num_element.append(lvl_override)
next_num_id, next_anum_id = self._next_numbering_ids()
new_num_element.numId = next_num_id
self._insert_num(new_num_element)
paragraph_props = xpath(element, './/w:pPr/w:pStyle[@w:val="%s"]/parent::w:pPr' % style_id)
num_pr = xpath(paragraph_props[0], './/w:numPr')
if num_pr:
num_pr = num_pr[0]
previous_num_id = num_pr.numId.val
self._replace_mapped_num_id(previous_num_id, next_num_id)
num_pr.numId.val = next_num_id
else:
num_pr = parse_xml(
'<w:numPr xmlns:w="http://schemas.openxmlformats.org/wordprocessingml/2006/main">'
'<w:ilvl w:val="0"/><w:numId w:val="%s"/></w:numPr>' % next_num_id)
paragraph_props[0].append(num_pr)
self._numbering_restarted.add(style_id)
def header_part(self, content=None):
"""The header part of the document."""
header_rels = [
rel for rel in self.doc.part.rels.values() if rel.reltype == RT.HEADER]
next_id = len(header_rels) + 1
# Create a new header part
partname = PackURI('/word/header%s.xml' % next_id)
content_type = CT.WML_HEADER
if not content:
xml_path = os.path.join(
os.path.dirname(__file__), 'templates', 'header.xml')
with open(xml_path, 'rb') as f:
content = f.read()
header_part = Part(
partname, content_type, content, self.doc.part.package)
self.doc.part.relate_to(header_part, RT.HEADER)
return header_part
def footer_part(self, content=None):
"""The footer part of the document."""
footer_rels = [
rel for rel in self.doc.part.rels.values() if rel.reltype == RT.FOOTER]
next_id = len(footer_rels) + 1
# Create a new header part
partname = PackURI('/word/footer%s.xml' % next_id)
content_type = CT.WML_FOOTER
if not content:
xml_path = os.path.join(
os.path.dirname(__file__), 'templates', 'footer.xml')
with open(xml_path, 'rb') as f:
content = f.read()
footer_part = Part(
partname, content_type, content, self.doc.part.package)
self.doc.part.relate_to(footer_part, RT.FOOTER)
return footer_part
def remove_header_and_footer_references(self, doc, element):
refs = xpath(
element, './/w:headerReference|.//w:footerReference')
for ref in refs:
ref.getparent().remove(ref)
def renumber_bookmarks(self):
bookmarks_start = xpath(self.doc.element.body, './/w:bookmarkStart')
bookmark_id = 0
for bookmark in bookmarks_start:
bookmark.set('{%s}id' % NS['w'], str(bookmark_id))
bookmark_id += 1
bookmarks_end = xpath(self.doc.element.body, './/w:bookmarkEnd')
bookmark_id = 0
for bookmark in bookmarks_end:
bookmark.set('{%s}id' % NS['w'], str(bookmark_id))
bookmark_id += 1
def renumber_docpr_ids(self):
# Ensure that non-visual drawing properties have a unique id
doc_prs = xpath(
self.doc.element.body, './/wp:docPr')
doc_pr_id = 1
for doc_pr in doc_prs:
doc_pr.id = doc_pr_id
doc_pr_id += 1
parts = [
rel.target_part for rel in self.doc.part.rels.values()
if rel.reltype in [RT.HEADER, RT.FOOTER, ]
]
for part in parts:
doc_prs = xpath(part.element, './/wp:docPr')
for doc_pr in doc_prs:
doc_pr.id = doc_pr_id
doc_pr_id += 1
def renumber_nvpicpr_ids(self):
# Ensure that non-visual image properties have a unique id
c_nv_prs = xpath(
self.doc.element.body, './/pic:cNvPr')
c_nv_pr_id = 1
for c_nv_pr in c_nv_prs:
c_nv_pr.id = c_nv_pr_id
c_nv_pr_id += 1
parts = [
rel.target_part for rel in self.doc.part.rels.values()
if rel.reltype in [RT.HEADER, RT.FOOTER, ]
]
for part in parts:
c_nv_prs = xpath(part.element, './/pic:cNvPr')
for c_nv_pr in c_nv_prs:
c_nv_pr.id = c_nv_pr_id
c_nv_pr_id += 1
def fix_section_types(self, doc):
# The section type determines how the contents of the section will be
# placed relative to the *previous* section.
# The last section always stays at the end. Therefore we need to adjust
# the type of first new section.
# We also need to change the type of the last section of the composed
# document to the one from the appended document.
# TODO: Support when inserting document at an arbitrary position
if len(self.doc.sections) == 1 or len(doc.sections) == 1:
return
first_new_section_idx = len(self.doc.sections) - len(doc.sections)
self.doc.sections[first_new_section_idx].start_type = self.doc.sections[-1].start_type
self.doc.sections[-1].start_type = doc.sections[-1].start_type
| 40.624801
| 102
| 0.584628
|
acfe28bc0525b439dce26134f5db9a693351fdeb
| 11,369
|
py
|
Python
|
benchmarks/mceditlib/time_relight_manmade.py
|
elcarrion06/mcedit2
|
4bb98da521447b6cf43d923cea9f00acf2f427e9
|
[
"BSD-3-Clause"
] | 673
|
2015-01-02T02:08:13.000Z
|
2022-03-24T19:38:14.000Z
|
benchmarks/mceditlib/time_relight_manmade.py
|
ozzhates/mcedit2
|
4bb98da521447b6cf43d923cea9f00acf2f427e9
|
[
"BSD-3-Clause"
] | 526
|
2015-01-01T02:10:53.000Z
|
2022-02-06T16:24:21.000Z
|
benchmarks/mceditlib/time_relight_manmade.py
|
ozzhates/mcedit2
|
4bb98da521447b6cf43d923cea9f00acf2f427e9
|
[
"BSD-3-Clause"
] | 231
|
2015-01-01T16:47:30.000Z
|
2022-03-31T21:51:55.000Z
|
import numpy
import sys
import time
from benchmarks import bench_temp_level
from mceditlib.selection import BoundingBox
from mceditlib.worldeditor import WorldEditor
from mceditlib import relight
def do_copy(dim, station, relight):
times = 1
boxes = []
for x in range(times):
for z in range(times):
origin = (x * station.bounds.width, 54, z * station.bounds.length)
boxes.append(BoundingBox(origin, station.bounds.size))
dim.copyBlocks(station, station.bounds, origin, create=True, updateLights=relight)
return reduce(lambda a, b: a.union(b), boxes)
def manmade_relight(test):
world = bench_temp_level("AnvilWorld")
dim = world.getDimension()
stationEditor = WorldEditor("test_files/station.schematic")
station = stationEditor.getDimension()
startCopy = time.time()
box = do_copy(dim, station, False)
copyTime = time.time() - startCopy
print("Copy took %f seconds. Reducing relight-in-copyBlocks times by this much." % copyTime)
positions = []
for cx, cz in box.chunkPositions():
for cy in box.sectionPositions(cx, cz):
positions.append((cx, cy, cz))
assert len(positions) > box.chunkCount
if test == "post" or test == "all":
def postCopy(): # profiling
start = time.time()
count = 0
print("Relighting outside of copyBlocks. Updating %d cells" % (len(positions) * 16 * 16 * 16))
for cx, cy, cz in positions:
indices = numpy.indices((16, 16, 16), numpy.int32)
indices.shape = 3, 16*16*16
indices += ([cx << 4], [cy << 4], [cz << 4])
x, y, z = indices
relight.updateLightsByCoord(dim, x, y, z)
count += 1
t = time.time() - start
print "Relight manmade building (outside copyBlocks): " \
"%d (out of %d) chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (count, len(positions), t, count / t, 1000 * t / count)
postCopy()
if test == "smart" or test == "all":
def allSections():
world = bench_temp_level("AnvilWorld")
dim = world.getDimension()
start = time.time()
do_copy(dim, station, "all")
t = time.time() - start - copyTime
print "Relight manmade building (in copyBlocks, all sections): " \
"%d chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (len(positions), t, len(positions) / t, 1000 * t / len(positions))
allSections()
if test == "section" or test == "all":
def perSection():
world = bench_temp_level("AnvilWorld")
dim = world.getDimension()
start = time.time()
do_copy(dim, station, "section")
t = time.time() - start - copyTime
print "Relight manmade building (in copyBlocks, for each section): " \
"%d chunk-sections in %.02f seconds (%f sections per second; %dms per section)" \
% (len(positions), t, len(positions) / t, 1000 * t / len(positions))
perSection()
if __name__ == '__main__':
if len(sys.argv) > 1:
method = sys.argv[1]
print "Using method", method
relight.setMethod(method)
if len(sys.argv) > 2:
test = sys.argv[2]
else:
test = "all"
manmade_relight(test)
"""
Conclusion:
Much time is spent in the "post" method which updates all cells in the selection box, calling
updateLights on cells whose opacity values did not change. This is evidenced by the time spent in
"drawLights", which must be called because updateLights doesn't know the previous block type in
that cell.
copyBlocksFrom has been modified to find the cells whose lighting or opacity value did change,
and passing only those cells to updateLights. This is more than twice as fast, and updating
all changed cells at once is even faster, presumably because changes to following chunks will
invalidate lighting data computed by previous chunks.
Because updateLights does not know what the previous cell's opacity values were (it does know the
cell's current light value, so it can skip spreadLight if the new brightness didn't exceed that),
clients of updateLights should take care to find only cells whose opacity values changed.
copyBlocksFrom stores all changed cell positions, which could lead to MemoryErrors for very large
copies. Instead of storing all positions, it should periodically call updateLights whenever the
position list exceeds a threshold. This "batch-update" method should be an acceptable compromise
between updating for each section (suffering invalidation costs), and updating all sections
at once after the copy (risking MemoryErrors and possibly paying additional chunk loading costs)
Updating lights for chunks whose neighbors have not been copied yet will cause wasted effort.
It helps to describe this graphically. This is the current visitation order:
(area is 24x12, and 34 chunks have been copied so far)
************************
**********..............
........................
........................
........................
........................
........................
........................
........................
........................
........................
........................
'.' represents chunks that are yet to be copied.
'*' represents chunks that have been copied.
If a batched lighting update is called at this point, these are the chunks that, when they are
copied over later, will invalidate parts of the previous update:
************************
**********--------------
----------+.............
........................
........................
........................
........................
........................
........................
........................
........................
........................
'-' represents chunks that when edited will invalidate the previous lighting update applied
to the '*' chunks. There are 24 such chunks.
'+' represents chunks that when edited will invalidate at most half of a previous chunk's
update.
So let's say 24.5 chunks are invalidated later. Out of 34 chunks, that is not very good at all.
That number is roughly proportional to the width of the selection box.
The current visitation order is thus:
1234567890abcdefghijklmn
opqrstuvwx--------------
----------+.............
........................
........................
........................
........................
........................
........................
........................
........................
........................
A possibly improved visitation order:
12efghuvwx-.............
43dcjits--+.............
589bknor-...............
670almpq-...............
--------+...............
........................
........................
........................
........................
........................
........................
........................
13 full chunks and two half-chunks are invalidated, for a total of 15 chunks out of 34.
At least it's less than half.
This number is roughly proportional to the square root of the number of chunks copied so far.
The order of chunks visited by copyBlocksFrom is linear. When it calls updateLights for a chunk,
the chunks adjacent to that chunk (and ahead of that chunk in the order) will have to redo part
of this chunk's lighting for the current chunk when they are copied. To minimize wasted effort,
a chunk order that resembles a space-filling curve such as a Hilbert curve may be
applicable. The goal is to reduce the number of chunks who have neighbors yet to be copied at the
time the batched update is performed.
Maybe we can do better. What if, instead of batch-updating ALL of the chunks copied so far,
we only batch-update the ones we know won't be invalidated later?
The cells that need update are currently just tossed in a list. Instead, associate them with
their chunk position. Keep track of which chunks we have copied, and how many of their
eight neighbors have already been copied too. Only issue a batch update for chunks where all eight
neighbors are copied. If we use the original visitation order, then for very large copies, we may
reach the threshold before any neighbors have been copied. The new visitation order would avoid
this as, for most chunks, it will visit all of a chunk's neighbors very soon after that chunk.
In fact, it may not be necessary to batch-update at all if we can update a chunk as soon as all its
neighbors are ready.
Output:
Using method cython
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
INFO:mceditlib.block_copy:Duration: 1.292s, 120/120 chunks, 10.77ms per chunk (92.88 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
Copy took 1.292000 seconds. Reducing relight-in-copyBlocks times by this much.
Relighting outside of copyBlocks. Updating 3932160 cells
Relight manmade building (outside copyBlocks): 960 (out of 960) chunk-sections in 71.49 seconds (13.428639 sections per second; 74ms per section)
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
INFO:mceditlib.block_copy:Duration: 1.318s, 120/120 chunks, 10.98ms per chunk (91.05 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
INFO:mceditlib.block_copy:Updating all at once for 969 sections (646338 cells)
INFO:mceditlib.block_copy:Lighting complete.
INFO:mceditlib.block_copy:Duration: 16.979s, 968 sections, 17.54ms per section (57.01 sections per second)
Relight manmade building (in copyBlocks, all sections): 960 chunk-sections in 17.01 seconds (56.444027 sections per second; 17ms per section)
INFO:mceditlib.block_copy:Copying 3103771 blocks from BoundingBox(origin=Vector(0, 0, 0), size=Vector(113, 121, 227)) to (0, 54, 0)
INFO:mceditlib.block_copy:Copying: Chunk 20/120...
INFO:mceditlib.block_copy:Copying: Chunk 40/120...
INFO:mceditlib.block_copy:Copying: Chunk 60/120...
INFO:mceditlib.block_copy:Copying: Chunk 80/120...
INFO:mceditlib.block_copy:Copying: Chunk 100/120...
INFO:mceditlib.block_copy:Copying: Chunk 120/120...
Relight manmade building (in copyBlocks, for each section): 960 chunk-sections in 26.12 seconds (36.757667 sections per second; 27ms per section)
INFO:mceditlib.block_copy:Duration: 27.408s, 120/120 chunks, 228.40ms per chunk (4.38 chunks per second)
INFO:mceditlib.block_copy:Copied 0/0 entities and 293/293 tile entities
"""
| 42.901887
| 145
| 0.647814
|
acfe298332f1340b7bf9aa2e8a743742181cd5be
| 17,096
|
py
|
Python
|
paddlenlp/transformers/skep/tokenizer.py
|
haohongxiang/PaddleNLP
|
c862e9c3a4d49caf00f4de81bdfae36aba9b636e
|
[
"Apache-2.0"
] | 1
|
2021-10-14T05:35:00.000Z
|
2021-10-14T05:35:00.000Z
|
paddlenlp/transformers/skep/tokenizer.py
|
haohongxiang/PaddleNLP
|
c862e9c3a4d49caf00f4de81bdfae36aba9b636e
|
[
"Apache-2.0"
] | null | null | null |
paddlenlp/transformers/skep/tokenizer.py
|
haohongxiang/PaddleNLP
|
c862e9c3a4d49caf00f4de81bdfae36aba9b636e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import shutil
from paddle.utils import try_import
from paddlenlp.transformers import BasicTokenizer, PretrainedTokenizer, WordpieceTokenizer
from paddlenlp.utils.log import logger
from paddlenlp.utils.env import MODEL_HOME
__all__ = ['SkepTokenizer', ]
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(33, 126 + 1)) + list(range(161, 172 + 1)) + list(
range(174, 255 + 1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8 + n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
class BpeEncoder(object):
"""BpeEncoder"""
def __init__(self, encoder_json_file, vocab_bpe_file, errors='replace'):
"""
Constructs a BpeEncoder.
Args:
encoder_json_file (`str`): The path to bpe encode json file.
vocab_bpe_file (`str`): The path to bpe vocab file.
"""
self.encoder = self.__get_encoder(encoder_json_file)
self.decoder = {v: k for k, v in self.encoder.items()}
self.errors = errors # how to handle errors in decoding
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
self.bpe_ranks = self.__get_bpe_ranks(vocab_bpe_file)
self.cache = {}
re = try_import("regex")
self.pat = re.compile(
r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+"""
)
def __get_encoder(self, encoder_json_file):
with open(encoder_json_file, 'r') as f:
encoder = json.load(f)
return encoder
def __get_bpe_ranks(self, vocab_bpe_file):
with open(vocab_bpe_file, 'r', encoding="utf-8") as f:
bpe_data = f.read()
bpe_merges = [
tuple(merge_str.split()) for merge_str in bpe_data.split('\n')[1:-1]
]
bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
return bpe_ranks
def bpe(self, token):
"""
bpe
"""
if token in self.cache:
return self.cache[token]
word = tuple(token)
pairs = get_pairs(word)
if not pairs:
return token
while True:
bigram = min(
pairs, key=lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word) - 1 and word[i +
1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
"""
encode
"""
bpe_tokens = []
re = try_import("regex")
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token]
for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
"""
decode
"""
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode(
'utf-8', errors=self.errors)
return text
class SkepTokenizer(PretrainedTokenizer):
r"""
Constructs a Skep tokenizer. It uses a basic tokenizer to do punctuation
splitting, lower casing and so on, and follows a WordPiece tokenizer to
tokenize as subwords.
Args:
vocab_file (str):
The vocabulary file path (ends with '.txt') required to instantiate
a `WordpieceTokenizer`.
bpe_vocab_file (str, optional):
The vocabulary file path of a `BpeTokenizer`. Defaults to `None`.
bpe_json_file (str, optional):
The json file path of a `BpeTokenizer`. Defaults to `None`.
use_bpe_encoder (bool, optional):
Whether or not to use BPE Encoder. Defaults to `False`.
need_token_type_id (bool, optional):
Whether or not to use token type id. Defaults to `True`.
add_two_sep_token_inter (bool, optional):
Whether or not to add two different `sep_token`. Defaults to `False`.
unk_token (str, optional):
The special token for unknown words.
Defaults to "[UNK]".
sep_token (str, optional):
The special token for separator token.
Defaults to "[SEP]".
pad_token (str, optional):
The special token for padding.
Defaults to "[PAD]".
cls_token (str, optional):
The special token for cls.
Defaults to "[CLS]".
mask_token (str, optional):
The special token for mask.
Defaults to "[MASK]".
Examples:
.. code-block::
from paddlenlp.transformers import SkepTokenizer
tokenizer = SkepTokenizer.from_pretrained('skep_ernie_2.0_large_en')
encoded_inputs = tokenizer('He was a puppeteer')
# encoded_inputs:
# {
# 'input_ids': [101, 2002, 2001, 1037, 13997, 11510, 102],
# 'token_type_ids': [0, 0, 0, 0, 0, 0, 0]
# }
"""
resource_files_names = {
"vocab_file": "vocab.txt",
"bpe_vocab_file": "vocab.bpe",
"bpe_json_file": "encoder.json"
} # for save_pretrained
pretrained_resource_files_map = {
"vocab_file": {
"skep_ernie_1.0_large_ch":
"https://paddlenlp.bj.bcebos.com/models/transformers/skep/skep_ernie_1.0_large_ch.vocab.txt",
"skep_ernie_2.0_large_en":
"https://paddlenlp.bj.bcebos.com/models/transformers/skep/skep_ernie_2.0_large_en.vocab.txt",
"skep_roberta_large_en":
"https://paddlenlp.bj.bcebos.com/models/transformers/skep/skep_roberta_large_en.vocab.txt",
},
"bpe_vocab_file": {
"skep_ernie_1.0_large_ch": None,
"skep_ernie_2.0_large_en": None,
"skep_roberta_large_en":
"https://paddlenlp.bj.bcebos.com/models/transformers/skep/skep_roberta_large_en.vocab.bpe",
},
"bpe_json_file": {
"skep_ernie_1.0_large_ch": None,
"skep_ernie_2.0_large_en": None,
"skep_roberta_large_en":
"https://paddlenlp.bj.bcebos.com/models/transformers/skep/skep_roberta_large_en.encoder.json",
}
}
pretrained_init_configuration = {
"skep_ernie_1.0_large_ch": {
"do_lower_case": True,
"use_bpe_encoder": False,
"need_token_type_id": True,
"add_two_sep_token_inter": False,
},
"skep_ernie_2.0_large_en": {
"do_lower_case": True,
"use_bpe_encoder": False,
"need_token_type_id": True,
"add_two_sep_token_inter": False,
},
"skep_roberta_large_en": {
"do_lower_case": True,
"use_bpe_encoder": True,
"need_token_type_id": False,
"add_two_sep_token_inter": True,
},
}
def __init__(self,
vocab_file,
bpe_vocab_file=None,
bpe_json_file=None,
do_lower_case=True,
use_bpe_encoder=False,
need_token_type_id=True,
add_two_sep_token_inter=False,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]"):
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the "
"vocabulary from a pretrained model please use "
"`tokenizer = SkepTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
.format(vocab_file))
self.vocab_file = vocab_file
self.bpe_vocab_file = bpe_vocab_file
self.bpe_json_file = bpe_json_file
self.vocab = self.load_vocabulary(
vocab_file, unk_token=unk_token, pad_token=pad_token)
self.use_bpe_encoder = use_bpe_encoder
self.need_token_type_id = need_token_type_id
self.add_two_sep_token_inter = add_two_sep_token_inter
if not self.use_bpe_encoder:
self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
self.wordpiece_tokenizer = WordpieceTokenizer(
vocab=self.vocab, unk_token=unk_token)
else:
assert (bpe_vocab_file and bpe_json_file) is not None, (
f"bpe_vocab_file and bpe_json_file must be not None.")
if os.path.isfile(bpe_vocab_file) and os.path.isfile(bpe_json_file):
self.bpe_tokenizer = BpeEncoder(bpe_json_file, bpe_vocab_file)
@property
def vocab_size(self):
r"""
Return the size of vocabulary.
Returns:
int: the size of vocabulary.
"""
return len(self.vocab)
def _tokenize(self, text):
r"""
End-to-end tokenization for Skep models.
Args:
text (str): The text to be tokenized.
Returns:
list: A list of string representing converted tokens.
"""
split_tokens = []
if not self.use_bpe_encoder:
for token in self.basic_tokenizer.tokenize(text):
for sub_token in self.wordpiece_tokenizer.tokenize(token):
split_tokens.append(sub_token)
else:
for token in self.bpe_tokenizer.encode(text):
split_tokens.append(str(token))
return split_tokens
def tokenize(self, text):
"""
Converts a string to a list of tokens.
Args:
text (str): The text to be tokenized.
Returns:
List(str): A list of string representing converted tokens.
Examples:
.. code-block::
from paddlenlp.transformers import SkepTokenizer
tokenizer = SkepTokenizer.from_pretrained('skep_ernie_2.0_large_en')
tokens = tokenizer.tokenize('He was a puppeteer')
'''
['he', 'was', 'a', 'puppet', '##eer']
'''
"""
return self._tokenize(text)
def num_special_tokens_to_add(self, pair=False):
r"""
Returns the number of added tokens when encoding a sequence with special tokens.
Args:
pair (bool, optional):
Returns the number of added tokens in the case of a sequence
pair if set to True, returns the number of added tokens in the case of a single sequence if set to False.
Defaults to False.
Returns:
int: Number of tokens added to sequences
"""
token_ids_0 = []
token_ids_1 = []
return len(
self.build_inputs_with_special_tokens(token_ids_0, token_ids_1
if pair else None))
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
r"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens.
A skep_ernie_1.0_large_ch/skep_ernie_2.0_large_en sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] B [SEP]``
A skep_roberta_large_en sequence has the following format:
- single sequence: ``[CLS] X [SEP]``
- pair of sequences: ``[CLS] A [SEP] [SEP] B [SEP]``
Args:
token_ids_0 (List[int]):
List of IDs to which the special tokens will be added.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Defaults to `None`.
Returns:
list[int]: List of input_id with the appropriate special tokens.
"""
if not self.add_two_sep_token_inter:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + token_ids_1 + _sep
else:
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
_cls = [self.cls_token_id]
_sep = [self.sep_token_id]
return _cls + token_ids_0 + _sep + _sep + token_ids_1 + _sep
def create_token_type_ids_from_sequences(self,
token_ids_0,
token_ids_1=None):
r"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task.
A skep_ernie_1.0_large_ch/skep_ernie_2.0_large_en sequence pair mask has the following format:
::
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
note: There is no need token type ids for skep_roberta_large_ch model.
Args:
token_ids_0 (List[int]):
List of IDs.
token_ids_1 (List[int], optional):
Optional second list of IDs for sequence pairs.
Defaults to `None`.
Returns:
List[int]: List of token_type_id according to the given sequence(s).
"""
if self.need_token_type_id:
_sep = [self.sep_token_id]
_cls = [self.cls_token_id]
if token_ids_1 is None:
return len(_cls + token_ids_0 + _sep) * [0]
return len(_cls + token_ids_0 + _sep) * [0] + len(token_ids_1 +
_sep) * [1]
else:
# For model skep-roberta-large-en, token type ids is no need.
return None
def save_resources(self, save_directory):
"""
Save tokenizer related resources to files under `save_directory`.
Args:
save_directory (str): Directory to save files into.
"""
for name, file_name in self.resource_files_names.items():
save_path = os.path.join(save_directory, file_name)
source_file = getattr(self, name)
if source_file is not None:
shutil.copyfile(source_file, save_path)
| 36.765591
| 121
| 0.573117
|
acfe298b41bb34f45c7c31913bea74fc33ae90d5
| 5,600
|
py
|
Python
|
Assignment 2/Code/main.py
|
Palak-Dhanadia/Artificial-Intelligence
|
a6acf9c2bccab3f6b0ce71b485b8b9d1e575e2ed
|
[
"MIT"
] | null | null | null |
Assignment 2/Code/main.py
|
Palak-Dhanadia/Artificial-Intelligence
|
a6acf9c2bccab3f6b0ce71b485b8b9d1e575e2ed
|
[
"MIT"
] | null | null | null |
Assignment 2/Code/main.py
|
Palak-Dhanadia/Artificial-Intelligence
|
a6acf9c2bccab3f6b0ce71b485b8b9d1e575e2ed
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import torchvision.datasets as dsets
import torch.nn.functional as F
import matplotlib.pyplot as plt
train_set = torchvision.datasets.FashionMNIST(root=".", train=True,
download=True, transform=transforms.ToTensor())
test_set = torchvision.datasets.FashionMNIST(root=".", train=False,
download=True, transform=transforms.ToTensor())
training_loader = torch.utils.data.DataLoader(train_set, batch_size=32,
shuffle=False)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=32,
shuffle=False)
torch.manual_seed(0)
# set true if using on google colab with runtime type = GPU
use_cuda = True
# defining convolutional neural network and the learnable parameters
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
# 1 input image channel, 32 output channels with a 5x5 square convolution kernel
self.conv1 = nn.Conv2d(1, 32, 5)
# xavier initialisation for weights
nn.init.xavier_normal_(self.conv1.weight)
# 32 input image channel, 64 output channels with a 5x5 square convolution kernel
self.conv2 = nn.Conv2d(32, 64, 5)
nn.init.xavier_normal_(self.conv2.weight)
# an affine operation: y = Wx + b
# fully connected layer 1
self.fc1 = nn.Linear(64 * 4 * 4, 256)
nn.init.xavier_normal_(self.fc1.weight)
# fully connected layer 2
self.fc2 = nn.Linear(256, 10)
nn.init.xavier_normal_(self.fc2.weight)
# dropout layer
# self.dropout = nn.Dropout(0.3)
def forward(self, x):
# using relu activation after convolution and then max pooling over a 2x2 window
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
# flatten the tensors
x = x.view(-1, self.num_flat_features(x))
# using relu activation after fully connected layer
x = F.relu(self.fc1(x))
# output of the last layer - doesn't require softmax activation
x = self.fc2(x)
# dropout on the 2nd fully connected layer
# x = self.dropout(self.fc2(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
model = CNN()
# use google colab gpu if resources are available
if use_cuda and torch.cuda.is_available():
model.cuda()
# defining learning rate
learning_rate = 0.1
# defining loss function
criterion = nn.CrossEntropyLoss()
# defining stochastic gradient descent for weights update
optimizer = optim.SGD(model.parameters(), lr=learning_rate)
# evaluation method to calculate the accuracy of the model on training and test set
def evaluation(model, data_loader):
# change the mode of the model to evaluation
model.eval()
total, correct = 0, 0
for data in data_loader:
inputs, label = data
# use google colab gpu if resources are available
if use_cuda and torch.cuda.is_available():
inputs = inputs.cuda()
label = label.cuda()
outputs = model(inputs)
_, pred = torch.max(outputs.data, 1)
total = total + label.size(0)
correct = correct + (pred == label).sum().item()
# return accuracy
return 100 * (correct / total)
# total loss list for epochs
total_loss_list = []
# training accuracy list
train_acc_list = []
# testing accuracy list
test_acc_list = []
# epochs
num_epochs = 50
# training loop
for epoch in range(num_epochs):
# loss list for batches
loss_list = []
for i, (images, labels) in enumerate(training_loader):
# change the mode of the model to training
model.train()
# use google colab gpu if resources are available
if use_cuda and torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
# Run the forward pass
outputs = model(images)
# calculate loss
loss = criterion(outputs, labels)
# append loss to the loss list
loss_list.append(loss.item())
# set gradient buffers to zero
optimizer.zero_grad()
# Backprop and perform SGD optimisation
loss.backward()
optimizer.step()
# Training Accuracy
train_acc = evaluation(model, training_loader)
# Testing Accuracy
test_acc = evaluation(model, test_loader)
train_acc_list.append(train_acc)
test_acc_list.append(test_acc)
# get loss for the epoch and append to the total loss list
total_loss_list.append(sum(loss_list))
print('Epoch [{}/{}], Loss: {:.4f}, Train Accuracy: {:.2f}%, Test Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, sum(loss_list), train_acc, test_acc))
plt.plot(train_acc_list, label="Train Acc")
plt.plot(test_acc_list, label="Test Acc")
plt.title('Test and Train Accuracy at LR={}'.format(learning_rate))
plt.legend()
plt.show()
plt.plot(total_loss_list, label="Loss")
plt.title('Loss per epoch at LR={}'.format(learning_rate))
plt.legend()
plt.show()
| 35.897436
| 94
| 0.631786
|
acfe29e1035c3997f2a2ae8a465d19b99c5497d9
| 177
|
py
|
Python
|
problem0233.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0233.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
problem0233.py
|
kmarcini/Project-Euler-Python
|
d644e8e1ec4fac70a9ab407ad5e1f0a75547c8d3
|
[
"BSD-3-Clause"
] | null | null | null |
###########################
#
# #233 Lattice points on a circle - Project Euler
# https://projecteuler.net/problem=233
#
# Code by Kevin Marciniak
#
###########################
| 19.666667
| 49
| 0.491525
|
acfe2ac94af4287c9eea02da6e36771b0e7aa4cc
| 3,128
|
py
|
Python
|
sc2/common/utils.py
|
srsohn/msgi
|
e665861f2d08f41b7dad16588447203e5010145a
|
[
"MIT"
] | 10
|
2020-04-14T01:56:43.000Z
|
2022-03-25T12:55:30.000Z
|
sc2/common/utils.py
|
srsohn/msgi
|
e665861f2d08f41b7dad16588447203e5010145a
|
[
"MIT"
] | null | null | null |
sc2/common/utils.py
|
srsohn/msgi
|
e665861f2d08f41b7dad16588447203e5010145a
|
[
"MIT"
] | 1
|
2020-11-02T18:07:59.000Z
|
2020-11-02T18:07:59.000Z
|
import torch
import numpy as np
class dotdict(dict):
"""dot.notation access to dictionary attributes"""
__getattr__ = dict.get
__setattr__ = dict.__setitem__
__delattr__ = dict.__delitem__
def _sample_int_layer_wise(nbatch, high, low):
assert(high.dim()==1 and low.dim()==1)
ndim = len(high)
out_list = []
for d in range(ndim):
out_list.append( np.random.randint(low[d], high[d]+1, (nbatch,1 ) ) )
return np.concatenate(out_list, axis=1)
def _sample_layer_wise(nbatch, high, low):
assert(high.dim()==1 and low.dim()==1)
nsample = len(high)
base = torch.rand( nbatch, nsample )
return base*(high - low) + low
def _transform(input_tensor, mapping):
if input_tensor.dim()==1:
input_tensor = input_tensor.unsqueeze(-1)
return torch.gather(mapping, 1, input_tensor)
def _to_multi_hot(index_tensor, max_dim, device): # number-to-onehot or numbers-to-multihot
if type(index_tensor)==np.ndarray:
index_tensor = torch.from_numpy(index_tensor)
if len(index_tensor.shape)==1:
out = (index_tensor.unsqueeze(1) == torch.arange(max_dim).reshape(1, max_dim).to(device))
else:
out = (index_tensor == torch.arange(max_dim).reshape(1, max_dim).to(device))
return out
def batch_bin_encode(bin_tensor, device):
dim = len(bin_tensor.shape)
feat_dim = bin_tensor.shape[-1]
bias = 0
unit = 50
if dim==2:
NB = bin_tensor.shape[0]
output = [0]*NB
num_iter = feat_dim//unit + 1
for i in range(num_iter):
ed = min(feat_dim, bias + unit)
out = batch_bin_encode_64( bin_tensor[:, bias:ed], device)
out_list = out.tolist()
output = [output[j]*pow(2,unit) + val for j, val in enumerate(out_list)]
bias += unit
if ed==feat_dim:
break
return output
elif dim==1:
output = 0
num_iter = feat_dim//unit + 1
for i in range(num_iter):
ed = min(feat_dim, bias + unit)
out = batch_bin_encode_64( bin_tensor[bias:ed], device)
output = output*pow(2,unit)+out
bias += unit
if ed==feat_dim:
break
return output
else:
print('Input type error!')
print('input_type=')
print(type(bin_tensor))
print('input_shape=', bin_tensor.shape)
assert(False)
def batch_bin_encode_64(bin_tensor, device):
# bin_tensor: Nbatch x dim
if type(bin_tensor)==torch.Tensor:
if bin_tensor.dim()==2:
return torch.mv(bin_tensor.type(torch.long), torch.from_numpy(1 << np.arange(bin_tensor.shape[-1])).to(device))
else:
return torch.dot(bin_tensor.type(torch.long), torch.from_numpy(1 << np.arange(bin_tensor.shape[-1])).to(device)).item()
elif type(bin_tensor)==np.ndarray:
return bin_tensor.dot(1 << np.arange(bin_tensor.shape[-1]))
else:
print('Input type error!')
print('input_type=')
print(type(bin_tensor))
print('input_shape=', bin_tensor.shape)
assert(False)
| 34
| 131
| 0.61445
|
acfe2be19e644b99973038225182474c17a7594c
| 14,197
|
py
|
Python
|
lasagne/networks/lenet.py
|
kzhai/Lasagne
|
67b0cd4ea4920f0339892e979b62413605e9fb71
|
[
"MIT"
] | null | null | null |
lasagne/networks/lenet.py
|
kzhai/Lasagne
|
67b0cd4ea4920f0339892e979b62413605e9fb71
|
[
"MIT"
] | null | null | null |
lasagne/networks/lenet.py
|
kzhai/Lasagne
|
67b0cd4ea4920f0339892e979b62413605e9fb71
|
[
"MIT"
] | null | null | null |
import logging
import numpy
from lasagne import Xinit
from lasagne import init
from lasagne import layers
logger = logging.getLogger(__name__)
__all__ = [
"LeNetFromSpecifications",
"LeNetFromPretrainedModel",
#
"AdaptiveLeNetFromSpecifications",
"AdaptiveLeNetFromPretrainedModel",
]
def LeNetFromSpecifications(input_layer,
conv_filters,
conv_nonlinearities,
# convolution_filter_sizes=None,
# maxpooling_sizes=None,
pool_modes,
dense_dimensions,
dense_nonlinearities,
layer_activation_types,
layer_activation_parameters,
layer_activation_styles,
conv_kernel_sizes=(5, 5),
conv_strides=(1, 1),
conv_pads=2,
pool_kernel_sizes=(3, 3),
pool_strides=(2, 2),
):
assert len(layer_activation_types) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(layer_activation_parameters) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(layer_activation_styles) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(conv_filters) == len(conv_nonlinearities)
assert len(conv_filters) == len(pool_modes)
dropout_layer_index = 0
neural_network = input_layer
for conv_layer_index in range(len(conv_filters)):
input_layer_shape = layers.get_output_shape(neural_network)[1:]
previous_layer_shape = numpy.prod(input_layer_shape)
activation_probability = layers.sample_activation_probability(previous_layer_shape,
layer_activation_styles[dropout_layer_index],
layer_activation_parameters[
dropout_layer_index])
activation_probability = numpy.reshape(activation_probability, input_layer_shape)
neural_network = layer_activation_types[dropout_layer_index](neural_network,
activation_probability=activation_probability)
dropout_layer_index += 1
conv_filter = conv_filters[conv_layer_index]
conv_nonlinearity = conv_nonlinearities[conv_layer_index]
# conv_filter_size = convolution_filter_sizes[conv_layer_index]
conv_kernel_size = conv_kernel_sizes
conv_stride = conv_strides
conv_pad = conv_pads
# Convolutional layer with 32 kernels of size 5x5. Strided and padded convolutions are supported as well see the docstring.
neural_network = layers.Conv2DLayer(neural_network,
W=init.GlorotUniform(gain=Xinit.GlorotUniformGain[conv_nonlinearity]),
b=init.Constant(0.),
nonlinearity=conv_nonlinearity,
num_filters=conv_filter,
filter_size=conv_kernel_size,
stride=conv_stride,
pad=conv_pad,
)
pool_mode = pool_modes[conv_layer_index]
if pool_mode is not None:
pool_kernel_size = pool_kernel_sizes
pool_stride = pool_strides
# Max-pooling layer of factor 2 in both dimensions:
filter_size_for_pooling = layers.get_output_shape(neural_network)[2:]
if numpy.any(filter_size_for_pooling < pool_kernel_size):
print("warning: filter size %s is smaller than pooling size %s, skip pooling layer" % (
layers.get_output_shape(neural_network), pool_kernel_size))
continue
neural_network = layers.Pool2DLayer(neural_network, pool_size=pool_kernel_size, stride=pool_stride,
mode=pool_mode)
# neural_network = layers.ReshapeLayer(neural_network, (-1, numpy.prod(layers.get_output_shape(neural_network)[1:])))
assert len(dense_dimensions) == len(dense_nonlinearities)
for dense_layer_index in range(len(dense_dimensions)):
input_layer_shape = layers.get_output_shape(neural_network)[1:]
previous_layer_shape = numpy.prod(input_layer_shape)
activation_probability = layers.sample_activation_probability(previous_layer_shape,
layer_activation_styles[dropout_layer_index],
layer_activation_parameters[
dropout_layer_index])
activation_probability = numpy.reshape(activation_probability, input_layer_shape)
neural_network = layer_activation_types[dropout_layer_index](neural_network,
activation_probability=activation_probability)
dropout_layer_index += 1
layer_shape = dense_dimensions[dense_layer_index]
layer_nonlinearity = dense_nonlinearities[dense_layer_index]
neural_network = layers.DenseLayer(neural_network,
layer_shape,
W=init.GlorotUniform(gain=Xinit.GlorotUniformGain[layer_nonlinearity]),
# This is ONLY for CIFAR-10 dataset.
# W=init.HeNormal('relu'),
nonlinearity=layer_nonlinearity)
return neural_network
def LeNetFromPretrainedModel(input_layer, pretrained_network):
neural_network = input_layer
for layer in layers.get_all_layers(pretrained_network):
if isinstance(layer, layers.DenseLayer):
# print(neural_network, layer.num_units, layer.W, layer.b, layer.nonlinearity)
neural_network = layers.DenseLayer(neural_network, layer.num_units, W=layer.W, b=layer.b,
nonlinearity=layer.nonlinearity)
if isinstance(layer, layers.BernoulliDropoutLayer):
# print(neural_network, layer.activation_probability)
neural_network = layers.BernoulliDropoutLayer(neural_network,
activation_probability=layer.activation_probability)
if isinstance(layer, layers.Conv2DLayer):
neural_network = layers.Conv2DLayer(neural_network,
W=init.GlorotUniform(gain=Xinit.GlorotUniformGain[layer.nonlinearity]),
b=init.Constant(0.),
nonlinearity=layer.nonlinearity,
num_filters=layer.num_filters,
filter_size=layer.filter_size,
stride=layer.stride,
pad=layer.pad,
)
if isinstance(layer, layers.Pool2DLayer):
neural_network = layers.Pool2DLayer(neural_network, pool_size=layer.pool_size, stride=layer.stride,
mode=layer.mode)
return neural_network
AdaptiveLeNetFromSpecifications = LeNetFromSpecifications
def AdaptiveLeNetFromPretrainedModel(input_layer, pretrained_network):
neural_network = input_layer
for layer in layers.get_all_layers(pretrained_network):
if isinstance(layer, layers.DenseLayer):
neural_network = layers.DenseLayer(neural_network, layer.num_units, W=layer.W, b=layer.b,
nonlinearity=layer.nonlinearity)
if isinstance(layer, layers.BernoulliDropoutLayer):
neural_network = layers.AdaptiveDropoutLayer(neural_network,
activation_probability=layer.activation_probability)
if isinstance(layer, layers.Conv2DLayer):
neural_network = layers.Conv2DLayer(neural_network,
W=init.GlorotUniform(gain=Xinit.GlorotUniformGain[layer.nonlinearity]),
b=init.Constant(0.),
nonlinearity=layer.nonlinearity,
num_filters=layer.num_filters,
filter_size=layer.filter_size,
stride=layer.stride,
pad=layer.pad,
)
if isinstance(layer, layers.Pool2DLayer):
neural_network = layers.Pool2DLayer(neural_network, pool_size=layer.pool_size, stride=layer.stride,
mode=layer.mode)
return neural_network
'''
def AdaptiveLeNet(input_layer,
conv_filters,
conv_nonlinearities,
# convolution_filter_sizes=None,
pool_modes,
dense_dimensions,
dense_nonlinearities,
layer_activation_types,
layer_activation_parameters,
layer_activation_styles,
conv_kernel_sizes=(5, 5),
conv_strides=(1, 1),
conv_pads=2,
pool_kernel_sizes=(3, 3),
pool_strides=(2, 2),
):
assert len(layer_activation_types) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(layer_activation_parameters) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(layer_activation_styles) == len(dense_nonlinearities) + len(conv_nonlinearities)
assert len(conv_filters) == len(conv_nonlinearities)
assert len(conv_filters) == len(pool_modes)
dropout_layer_index = 0
neural_network = input_layer
for conv_layer_index in range(len(conv_filters)):
input_layer_shape = layers.get_output_shape(neural_network)[1:]
previous_layer_shape = numpy.prod(input_layer_shape)
activation_probability = layers.sample_activation_probability(previous_layer_shape,
layer_activation_styles[dropout_layer_index],
layer_activation_parameters[
dropout_layer_index])
activation_probability = numpy.reshape(activation_probability, input_layer_shape)
neural_network = layer_activation_types[dropout_layer_index](neural_network,
activation_probability=activation_probability)
dropout_layer_index += 1
conv_filter = conv_filters[conv_layer_index]
conv_nonlinearity = conv_nonlinearities[conv_layer_index]
# conv_filter_size = convolution_filter_sizes[conv_layer_index]
conv_kernel_size = conv_kernel_sizes
conv_stride = conv_strides
conv_pad = conv_pads
# Convolutional layer with 32 kernels of size 5x5. Strided and padded convolutions are supported as well see the docstring.
neural_network = layers.Conv2DLayer(neural_network,
W=init.GlorotUniform(gain=init.GlorotUniformGain[conv_nonlinearity]),
# This is ONLY for CIFAR-10 dataset.
# W=init.Uniform(0.1**(1+len(convolution_filters)-conv_layer_index)),
# W=init.HeNormal(gain=0.1),
# b=init.Constant(1.0 * (conv_layer_index!=0)),
b=init.Constant(0.),
nonlinearity=conv_nonlinearity,
num_filters=conv_filter,
filter_size=conv_kernel_size,
stride=conv_stride,
pad=conv_pad,
)
pool_mode = pool_modes[conv_layer_index]
if pool_mode is not None:
pool_kernel_size = pool_kernel_sizes
pool_stride = pool_strides
# Max-pooling layer of factor 2 in both dimensions:
filter_size_for_pooling = layers.get_output_shape(neural_network)[2:]
if numpy.any(filter_size_for_pooling < pool_kernel_size):
print("warning: filter size %s is smaller than pooling size %s, skip pooling layer" % (
layers.get_output_shape(neural_network), pool_kernel_size))
continue
neural_network = layers.Pool2DLayer(neural_network, pool_size=pool_kernel_size, stride=pool_stride,
mode=pool_mode)
#neural_network = layers.ReshapeLayer(neural_network, (-1, numpy.prod(layers.get_output_shape(neural_network)[1:])))
assert len(dense_dimensions) == len(dense_nonlinearities)
for dense_layer_index in range(len(dense_dimensions)):
input_layer_shape = layers.get_output_shape(neural_network)[1:]
previous_layer_shape = numpy.prod(input_layer_shape)
activation_probability = layers.sample_activation_probability(previous_layer_shape,
layer_activation_styles[dropout_layer_index],
layer_activation_parameters[
dropout_layer_index])
activation_probability = numpy.reshape(activation_probability, input_layer_shape)
neural_network = layer_activation_types[dropout_layer_index](neural_network,
activation_probability=activation_probability)
dropout_layer_index += 1
layer_shape = dense_dimensions[dense_layer_index]
layer_nonlinearity = dense_nonlinearities[dense_layer_index]
neural_network = layers.DenseLayer(neural_network,
layer_shape,
W=init.GlorotUniform(gain=init.GlorotUniformGain[layer_nonlinearity]),
# This is ONLY for CIFAR-10 dataset.
# W=init.HeNormal('relu'),
nonlinearity=layer_nonlinearity)
return neural_network
'''
| 49.295139
| 126
| 0.599775
|
acfe2c290cacc543b8927e74e725cb4c219469d2
| 13,473
|
py
|
Python
|
jsonpickle/util.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
jsonpickle/util.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
jsonpickle/util.py
|
EnjoyLifeFund/macHighSierra-py36-pkgs
|
5668b5785296b314ea1321057420bcd077dba9ea
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008 John Paulett (john -at- paulett.org)
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""Helper functions for pickling and unpickling. Most functions assist in
determining the type of an object.
"""
from __future__ import absolute_import, division, unicode_literals
import base64
import collections
import io
import operator
import time
import types
import inspect
from . import tags
from .compat import set, unicode, long, bytes, PY3
if not PY3:
import __builtin__
SEQUENCES = (list, set, tuple)
SEQUENCES_SET = set(SEQUENCES)
PRIMITIVES = set((unicode, bool, float, int, long))
def is_type(obj):
"""Returns True is obj is a reference to a type.
>>> is_type(1)
False
>>> is_type(object)
True
>>> class Klass: pass
>>> is_type(Klass)
True
"""
# use "isinstance" and not "is" to allow for metaclasses
if PY3:
return isinstance(obj, type)
else:
return isinstance(obj, (type, types.ClassType))
def has_method(obj, name):
# false if attribute doesn't exist
if not hasattr(obj, name):
return False
func = getattr(obj, name)
# builtin descriptors like __getnewargs__
if isinstance(func, types.BuiltinMethodType):
return True
# note that FunctionType has a different meaning in py2/py3
if not isinstance(func, (types.MethodType, types.FunctionType)):
return False
# need to go through __dict__'s since in py3 methods are essentially descriptors
base_type = obj if is_type(obj) else obj.__class__ # __class__ for old-style classes
original = None
for subtype in inspect.getmro(base_type): # there is no .mro() for old-style classes
original = vars(subtype).get(name)
if original is not None:
break
# name not found in the mro
if original is None:
return False
# static methods are always fine
if isinstance(original, staticmethod):
return True
# at this point, the method has to be an instancemthod or a classmethod
self_attr = '__self__' if PY3 else 'im_self'
if not hasattr(func, self_attr):
return False
bound_to = getattr(func, self_attr)
# class methods
if isinstance(original, classmethod):
return issubclass(base_type, bound_to)
# bound methods
return isinstance(obj, type(bound_to))
def is_object(obj):
"""Returns True is obj is a reference to an object instance.
>>> is_object(1)
True
>>> is_object(object())
True
>>> is_object(lambda x: 1)
False
"""
return (isinstance(obj, object) and
not isinstance(obj, (type, types.FunctionType)))
def is_primitive(obj):
"""Helper method to see if the object is a basic data type. Unicode strings,
integers, longs, floats, booleans, and None are considered primitive
and will return True when passed into *is_primitive()*
>>> is_primitive(3)
True
>>> is_primitive([4,4])
False
"""
if obj is None:
return True
elif type(obj) in PRIMITIVES:
return True
return False
def is_dictionary(obj):
"""Helper method for testing if the object is a dictionary.
>>> is_dictionary({'key':'value'})
True
"""
return type(obj) is dict
def is_sequence(obj):
"""Helper method to see if the object is a sequence (list, set, or tuple).
>>> is_sequence([4])
True
"""
return type(obj) in SEQUENCES_SET
def is_list(obj):
"""Helper method to see if the object is a Python list.
>>> is_list([4])
True
"""
return type(obj) is list
def is_set(obj):
"""Helper method to see if the object is a Python set.
>>> is_set(set())
True
"""
return type(obj) is set
def is_bytes(obj):
"""Helper method to see if the object is a bytestring.
>>> is_bytes(b'foo')
True
"""
return type(obj) is bytes
def is_unicode(obj):
"""Helper method to see if the object is a unicode string"""
return type(obj) is unicode
def is_tuple(obj):
"""Helper method to see if the object is a Python tuple.
>>> is_tuple((1,))
True
"""
return type(obj) is tuple
def is_dictionary_subclass(obj):
"""Returns True if *obj* is a subclass of the dict type. *obj* must be
a subclass and not the actual builtin dict.
>>> class Temp(dict): pass
>>> is_dictionary_subclass(Temp())
True
"""
# TODO: add UserDict
return (hasattr(obj, '__class__') and
issubclass(obj.__class__, dict) and not is_dictionary(obj))
def is_sequence_subclass(obj):
"""Returns True if *obj* is a subclass of list, set or tuple.
*obj* must be a subclass and not the actual builtin, such
as list, set, tuple, etc..
>>> class Temp(list): pass
>>> is_sequence_subclass(Temp())
True
"""
return (hasattr(obj, '__class__') and
(issubclass(obj.__class__, SEQUENCES) or
is_list_like(obj)) and
not is_sequence(obj))
def is_noncomplex(obj):
"""Returns True if *obj* is a special (weird) class, that is more complex
than primitive data types, but is not a full object. Including:
* :class:`~time.struct_time`
"""
if type(obj) is time.struct_time:
return True
return False
def is_function(obj):
"""Returns true if passed a function
>>> is_function(lambda x: 1)
True
>>> is_function(locals)
True
>>> def method(): pass
>>> is_function(method)
True
>>> is_function(1)
False
"""
if type(obj) in (types.FunctionType,
types.MethodType,
types.LambdaType,
types.BuiltinFunctionType,
types.BuiltinMethodType):
return True
if not hasattr(obj, '__class__'):
return False
module = translate_module_name(obj.__class__.__module__)
name = obj.__class__.__name__
return (module == '__builtin__' and
name in ('function',
'builtin_function_or_method',
'instancemethod',
'method-wrapper'))
def is_module_function(obj):
"""Return True if `obj` is a module-global function
>>> import os
>>> is_module_function(os.path.exists)
True
>>> is_module_function(lambda: None)
False
"""
return (hasattr(obj, '__class__') and
isinstance(obj, types.FunctionType) and
hasattr(obj, '__module__') and
hasattr(obj, '__name__') and
obj.__name__ != '<lambda>')
def is_module(obj):
"""Returns True if passed a module
>>> import os
>>> is_module(os)
True
"""
return isinstance(obj, types.ModuleType)
def is_picklable(name, value):
"""Return True if an object can be pickled
>>> import os
>>> is_picklable('os', os)
True
>>> def foo(): pass
>>> is_picklable('foo', foo)
True
>>> is_picklable('foo', lambda: None)
False
"""
if name in tags.RESERVED:
return False
return is_module_function(value) or not is_function(value)
def is_installed(module):
"""Tests to see if ``module`` is available on the sys.path
>>> is_installed('sys')
True
>>> is_installed('hopefullythisisnotarealmodule')
False
"""
try:
__import__(module)
return True
except ImportError:
return False
def is_list_like(obj):
return hasattr(obj, '__getitem__') and hasattr(obj, 'append')
def is_iterator(obj):
is_file = False
if not PY3:
is_file = isinstance(obj, __builtin__.file)
return (isinstance(obj, collections.Iterator) and
not isinstance(obj, io.IOBase) and not is_file)
def is_collections(obj):
try:
return type(obj).__module__ == 'collections'
except:
return False
IteratorType = type(iter(''))
def is_reducible(obj):
"""
Returns false if of a type which have special casing, and should not have their
__reduce__ methods used
"""
# defaultdicts may contain functions which we cannot serialise
if is_collections(obj) and not isinstance(obj, collections.defaultdict):
return True
return (not
(is_list(obj) or
is_list_like(obj) or
is_primitive(obj) or
is_bytes(obj) or
is_unicode(obj) or
is_dictionary(obj) or
is_sequence(obj) or
is_set(obj) or
is_tuple(obj) or
is_dictionary_subclass(obj) or
is_sequence_subclass(obj) or
is_function(obj) or
is_module(obj) or
is_iterator(obj) or
type(getattr(obj, '__slots__', None)) is IteratorType or
type(obj) is object or
obj is object or
(is_type(obj) and obj.__module__ == 'datetime')
))
def in_dict(obj, key, default=False):
"""
Returns true if key exists in obj.__dict__; false if not in.
If obj.__dict__ is absent, return default
"""
return (key in obj.__dict__) if getattr(obj, '__dict__', None) else default
def in_slots(obj, key, default=False):
"""
Returns true if key exists in obj.__slots__; false if not in.
If obj.__slots__ is absent, return default
"""
return (key in obj.__slots__) if getattr(obj, '__slots__', None) else default
def has_reduce(obj):
"""
Tests if __reduce__ or __reduce_ex__ exists in the object dict or
in the class dicts of every class in the MRO *except object*.
Returns a tuple of booleans (has_reduce, has_reduce_ex)
"""
if not is_reducible(obj) or is_type(obj):
return (False, False)
# in this case, reduce works and is desired
# notwithstanding depending on default object
# reduce
if is_noncomplex(obj):
return (False, True)
has_reduce = False
has_reduce_ex = False
REDUCE = '__reduce__'
REDUCE_EX = '__reduce_ex__'
# For object instance
has_reduce = in_dict(obj, REDUCE) or in_slots(obj, REDUCE)
has_reduce_ex = in_dict(obj, REDUCE_EX) or in_slots(obj, REDUCE_EX)
# turn to the MRO
for base in type(obj).__mro__:
if is_reducible(base):
has_reduce = has_reduce or in_dict(base, REDUCE)
has_reduce_ex = has_reduce_ex or in_dict(base, REDUCE_EX)
if has_reduce and has_reduce_ex:
return (has_reduce, has_reduce_ex)
# for things that don't have a proper dict but can be getattred (rare, but includes some
# builtins)
cls = type(obj)
object_reduce = getattr(object, REDUCE)
object_reduce_ex = getattr(object, REDUCE_EX)
if not has_reduce:
has_reduce_cls = getattr(cls, REDUCE, False)
if not has_reduce_cls is object_reduce:
has_reduce = has_reduce_cls
if not has_reduce_ex:
has_reduce_ex_cls = getattr(cls, REDUCE_EX, False)
if not has_reduce_ex_cls is object_reduce_ex:
has_reduce_ex = has_reduce_ex_cls
return (has_reduce, has_reduce_ex)
def translate_module_name(module):
"""Rename builtin modules to a consistent (Python2) module name
This is used so that references to Python's `builtins` module can
be loaded in both Python 2 and 3. We remap to the "__builtin__"
name and unmap it when importing.
See untranslate_module_name() for the reverse operation.
"""
if (PY3 and module == 'builtins') or module == 'exceptions':
# We map the Python2 `exceptions` module to `__builtin__` because
# `__builtin__` is a superset and contains everything that is
# available in `exceptions`, which makes the translation simpler.
return '__builtin__'
else:
return module
def untranslate_module_name(module):
"""Rename module names mention in JSON to names that we can import
This reverses the translation applied by translate_module_name() to
a module name available to the current version of Python.
"""
if PY3:
# remap `__builtin__` and `exceptions` to the `builtins` module
if module == '__builtin__':
module = 'builtins'
elif module == 'exceptions':
module = 'builtins'
return module
def importable_name(cls):
"""
>>> class Example(object):
... pass
>>> ex = Example()
>>> importable_name(ex.__class__) == 'jsonpickle.util.Example'
True
>>> importable_name(type(25)) == '__builtin__.int'
True
>>> importable_name(None.__class__) == '__builtin__.NoneType'
True
>>> importable_name(False.__class__) == '__builtin__.bool'
True
>>> importable_name(AttributeError) == '__builtin__.AttributeError'
True
"""
name = cls.__name__
module = translate_module_name(cls.__module__)
return '%s.%s' % (module, name)
def b64encode(data):
payload = base64.b64encode(data)
if PY3 and type(payload) is bytes:
payload = payload.decode('ascii')
return payload
def b64decode(payload):
if PY3 and type(payload) is not bytes:
payload = bytes(payload, 'ascii')
return base64.b64decode(payload)
def itemgetter(obj, getter=operator.itemgetter(0)):
return unicode(getter(obj))
| 25.859885
| 92
| 0.634083
|
acfe2c63a73aa50afc31d572fed61d762d5c3c23
| 4,330
|
py
|
Python
|
benchmark/startQiskit3288.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3288.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
benchmark/startQiskit3288.py
|
UCLA-SEAL/QDiff
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
[
"BSD-3-Clause"
] | null | null | null |
# qubit number=4
# total number=44
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=19
prog.cz(input_qubit[0],input_qubit[3]) # number=20
prog.h(input_qubit[3]) # number=21
prog.cx(input_qubit[0],input_qubit[3]) # number=23
prog.x(input_qubit[3]) # number=24
prog.cx(input_qubit[0],input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=17
prog.rx(-0.48380526865282825,input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.y(input_qubit[3]) # number=18
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[3]) # number=12
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[1]) # number=34
prog.cz(input_qubit[0],input_qubit[1]) # number=35
prog.h(input_qubit[1]) # number=36
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.cx(input_qubit[0],input_qubit[1]) # number=38
prog.cx(input_qubit[0],input_qubit[1]) # number=41
prog.x(input_qubit[1]) # number=42
prog.cx(input_qubit[0],input_qubit[1]) # number=43
prog.cx(input_qubit[0],input_qubit[1]) # number=40
prog.cx(input_qubit[0],input_qubit[1]) # number=33
prog.cx(input_qubit[0],input_qubit[1]) # number=30
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[3]) # number=37
prog.h(input_qubit[0]) # number=9
prog.y(input_qubit[2]) # number=10
prog.x(input_qubit[2]) # number=22
prog.y(input_qubit[2]) # number=11
prog.x(input_qubit[0]) # number=13
prog.x(input_qubit[0]) # number=14
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('qasm_simulator')
sample_shot =8000
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit3288.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| 35.203252
| 140
| 0.652425
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.