hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acfdae6d28062786239d941a733a2e4bf3fe0e36
| 3,101
|
py
|
Python
|
src/tests/t_renew.py
|
Bhanuprakash-ch/kerberos
|
bb3c878d5034210c656a97562065612611c5a6d2
|
[
"Apache-2.0"
] | 2
|
2018-01-09T18:23:08.000Z
|
2018-07-24T23:14:15.000Z
|
src/tests/t_renew.py
|
Bhanuprakash-ch/kerberos
|
bb3c878d5034210c656a97562065612611c5a6d2
|
[
"Apache-2.0"
] | null | null | null |
src/tests/t_renew.py
|
Bhanuprakash-ch/kerberos
|
bb3c878d5034210c656a97562065612611c5a6d2
|
[
"Apache-2.0"
] | 3
|
2017-03-21T18:34:02.000Z
|
2020-01-22T19:11:53.000Z
|
#!/usr/bin/python
from k5test import *
conf = {'realms': {'$realm': {'max_life': '20h', 'max_renewable_life': '20h'}}}
realm = K5Realm(create_host=False, get_creds=False, kdc_conf=conf)
def test(testname, life, rlife, expect_renewable, env=None):
global realm
flags = ['-l', life]
if rlife is not None:
flags += ['-r', rlife]
realm.kinit(realm.user_princ, password('user'), flags=flags, env=env)
out = realm.run([klist])
if ('Default principal: %s\n' % realm.user_princ) not in out:
fail('%s: did not get tickets' % testname)
renewable = 'renew until' in out
if renewable and not expect_renewable:
fail('%s: tickets unexpectedly renewable' % testname)
elif not renewable and expect_renewable:
fail('%s: tickets unexpectedly non-renewable' % testname)
# Get renewable tickets.
test('simple', '1h', '2h', True)
# Renew twice, to test that renewed tickets are renewable.
realm.kinit(realm.user_princ, flags=['-R'])
realm.kinit(realm.user_princ, flags=['-R'])
realm.klist(realm.user_princ)
# Make sure we can't renew non-renewable tickets.
test('non-renewable', '1h', '1h', False)
out = realm.kinit(realm.user_princ, flags=['-R'], expected_code=1)
if "KDC can't fulfill requested option" not in out:
fail('expected error not seen renewing non-renewable ticket')
# Test that -allow_renewable on the client principal works.
realm.run_kadminl('modprinc -allow_renewable user')
test('disallowed client', '1h', '2h', False)
realm.run_kadminl('modprinc +allow_renewable user')
# Test that -allow_renewable on the server principal works.
realm.run_kadminl('modprinc -allow_renewable %s' % realm.krbtgt_princ)
test('disallowed server', '1h', '2h', False)
realm.run_kadminl('modprinc +allow_renewable %s' % realm.krbtgt_princ)
# Test that non-renewable tickets are issued if renew_till < till.
test('short', '2h', '1h', False)
# Test that renewable tickets are issued if till > max life by
# default, but not if we configure away the RENEWABLE-OK option.
no_opts_conf = {'libdefaults': {'kdc_default_options': '0'}}
no_opts = realm.special_env('no_opts', False, krb5_conf=no_opts_conf)
realm.run_kadminl('modprinc -maxlife "10 hours" user')
test('long', '15h', None, True)
test('long noopts', '15h', None, False, env=no_opts)
realm.run_kadminl('modprinc -maxlife "20 hours" user')
# Test maximum renewable life on the client principal.
realm.run_kadminl('modprinc -maxrenewlife "5 hours" user')
test('maxrenewlife client yes', '4h', '5h', True)
test('maxrenewlife client no', '6h', '10h', False)
# Test maximum renewable life on the server principal.
realm.run_kadminl('modprinc -maxrenewlife "3 hours" %s' % realm.krbtgt_princ)
test('maxrenewlife server yes', '2h', '3h', True)
test('maxrenewlife server no', '4h', '8h', False)
# Test realm maximum life.
realm.run_kadminl('modprinc -maxrenewlife "40 hours" user')
realm.run_kadminl('modprinc -maxrenewlife "40 hours" %s' % realm.krbtgt_princ)
test('maxrenewlife realm yes', '10h', '20h', True)
test('maxrenewlife realm no', '21h', '40h', False)
success('Renewing credentials')
| 41.346667
| 79
| 0.716221
|
acfdaf805993832142d78251f2fc2ce01b6da5df
| 1,677
|
py
|
Python
|
algorithms/ror13_add_sub1.py
|
everybody-lies/hashdb
|
5539a3b3db48a9c36ec2d1c460b0057c5e13ce3f
|
[
"Apache-2.0"
] | 95
|
2021-09-17T02:55:07.000Z
|
2022-03-29T10:54:40.000Z
|
algorithms/ror13_add_sub1.py
|
everybody-lies/hashdb
|
5539a3b3db48a9c36ec2d1c460b0057c5e13ce3f
|
[
"Apache-2.0"
] | 7
|
2021-10-13T20:18:21.000Z
|
2022-03-09T23:33:42.000Z
|
algorithms/ror13_add_sub1.py
|
everybody-lies/hashdb
|
5539a3b3db48a9c36ec2d1c460b0057c5e13ce3f
|
[
"Apache-2.0"
] | 11
|
2021-09-25T00:07:41.000Z
|
2022-03-22T17:26:37.000Z
|
#!/usr/bin/env python
########################################################################
# Copyright 2012 Mandiant
# Copyright 2014 FireEye
#
# Mandiant licenses this file to you under the Apache License, Version
# 2.0 (the "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# Reference:
# https://github.com/mandiant/flare-ida/blob/master/shellcode_hashes/make_sc_hash_db.py
#
########################################################################
DESCRIPTION = "ROR 13 and ADD and SUB 1"
TYPE = 'unsigned_int'
TEST_1 = 2879724915
ROTATE_BITMASK = {
8: 0xff,
16: 0xffff,
32: 0xffffffff,
64: 0xffffffffffffffff,
}
def ror(inVal, numShifts, dataSize=32):
'''rotate right instruction emulation'''
if numShifts == 0:
return inVal
if (numShifts < 0) or (numShifts > dataSize):
raise ValueError('Bad numShifts')
if (dataSize != 8) and (dataSize != 16) and (dataSize != 32) and (dataSize != 64):
raise ValueError('Bad dataSize')
bitMask = ROTATE_BITMASK[dataSize]
return bitMask & ((inVal >> numShifts) | (inVal << (dataSize-numShifts)))
def hash(data):
val = 0
for i in data:
val = ror(val, 0xd, 32)
val += i
return (val - 1) & 0xffffffff
| 31.055556
| 87
| 0.628503
|
acfdafa7fb16ec5f5fe30f62b2a44efc8d7a8605
| 265
|
py
|
Python
|
ELAB03/03-03.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
ELAB03/03-03.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
ELAB03/03-03.py
|
tawanchaiii/01204111_63
|
edf1174f287f5174d93729d9b5c940c74d3b6553
|
[
"WTFPL"
] | null | null | null |
prime = list()
for i in range(0,10000000):
prime.append(True)
for i in range(2,1000):
for j in range(2,1000):
prime[i*j] = False
n = int(input("N: "))
while True:
if(prime[n]and prime[n+2] ):
print(f"({n}, {n+2})")
break
n=n+1
| 16.5625
| 31
| 0.535849
|
acfdb0cf2b3947bda82b7f5ee751eb3022dc939d
| 22,544
|
py
|
Python
|
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:30:57.000Z
|
2016-08-27T18:30:57.000Z
|
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | null | null | null |
pgmpy/models/DynamicBayesianNetwork.py
|
NunoEdgarGFlowHub/pgmpy
|
ac0ecc8f5bdd14999c386c6b00a3ce77407b83ce
|
[
"MIT"
] | 1
|
2016-08-27T18:31:00.000Z
|
2016-08-27T18:31:00.000Z
|
from itertools import combinations
from collections import defaultdict
import numpy as np
import networkx as nx
from pgmpy.factors import TabularCPD
from pgmpy.base import DirectedGraph, UndirectedGraph
class DynamicBayesianNetwork(DirectedGraph):
def __init__(self, ebunch=None):
"""
Base class for Dynamic Bayesian Network
This is a time variant model of the static Bayesian model, where each
time-slice has some static nodes and is then replicated over a certain
time period.
The nodes can be any hashable python objects.
Parameters:
----------
ebunch: Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any NetworkX
graph object
Examples:
--------
Create an empty Dynamic Bayesian Network with no nodes and no edges:
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
Adding nodes and edges inside the dynamic bayesian network. A single
node can be added using the method below. For adding edges we need to
specify the time slice since edges can be across different time slices.
For example for a network as [image](http://s8.postimg.org/aaybw4x2t/Blank_Flowchart_New_Page_1.png),
we will need to add all the edges in the 2-TBN as:
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
We can query the edges and nodes in the network as:
>>> dbn.nodes()
['G', 'D', 'I', 'L']
>>> dbn.edges()
[(('D', 1), ('G', 1)), (('I', 0), ('G', 0)), (('I', 0), ('I', 1)),
(('I', 1), ('G', 1)), (('G', 0), ('L', 0)), (('G', 0), ('G', 1)),
(('G', 0), ('L', 1)), (('D', 0), ('G', 0)), (('D', 0), ('D', 1)),
(('L', 0), ('L', 1)), (('G', 1), ('L', 1))]
If any variable is not present in the network while adding an edge,
pgmpy will automatically add that variable to the network.
But for adding nodes to the model we don't need to specify the time
slice as it is common in all the time slices. And therefore pgmpy
automatically replicated it all the time slices. For example, for
adding a new variable `S` in the above network we can simply do:
>>> dbn.add_node('S')
>>> dbn.nodes()
['S', 'G', 'D', 'I', 'L']
Public Methods:
---------------
add_node
add_nodes_from
add_edges
add_edges_from
add_cpds
initialize_initial_state
inter_slice
intra_slice
"""
super(DynamicBayesianNetwork, self).__init__()
if ebunch:
self.add_edges_from(ebunch)
self.cpds = []
self.cardinalities = defaultdict(int)
def add_node(self, node, **attr):
"""
Adds a single node to the Network
Parameters
----------
node: node
A node can be any hashable Python object.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_node('A')
['A']
"""
super(DynamicBayesianNetwork, self).add_node((node, 0), **attr)
def add_nodes_from(self, nodes, **attr):
"""
Add multiple nodes to the Network.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
"""
for node in nodes:
self.add_node(node)
def nodes(self):
"""
Returns the list of nodes present in the network
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['A', 'B', 'C'])
>>> dbn.nodes()
['B', 'A', 'C']
"""
return list(set([node for node, timeslice in
super(DynamicBayesianNetwork, self).nodes()]))
def add_edge(self, start, end, **kwargs):
"""
Add an edge between two nodes.
The nodes will be automatically added if they are not present in the network.
Parameters
----------
start: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
end: tuple
Both the start and end nodes should specify the time slice as
(node_name, time_slice). Here, node_name can be any hashable
python object while the time_slice is an integer value,
which denotes the time slice that the node belongs to.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> model = DBN()
>>> model.add_nodes_from(['D', 'I'])
>>> model.add_edge(('D',0), ('I',0))
>>> model.edges()
[(('D', 1), ('I', 1)), (('D', 0), ('I', 0))]
"""
try:
if len(start) != 2 or len(end) !=2:
raise ValueError('Nodes must be of type (node, time_slice).')
elif not isinstance(start[1], int) or not isinstance(end[1], int):
raise ValueError('Nodes must be of type (node, time_slice).')
elif start[1] == end[1]:
start = (start[0], 0)
end = (end[0], 0)
elif start[1] == end[1] - 1:
start = (start[0], 0)
end = (end[0], 1)
elif start[1] > end[1]:
raise NotImplementedError('Edges in backward direction are not allowed.')
elif start[1] != end[1]:
raise ValueError("Edges over multiple time slices is not currently supported")
except TypeError:
raise ValueError('Nodes must be of type (node, time_slice).')
if start == end:
raise ValueError('Self Loops are not allowed')
elif start in super(DynamicBayesianNetwork, self).nodes() and end \
in super(DynamicBayesianNetwork, self).nodes() and \
nx.has_path(self, end, start):
raise ValueError(
'Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format(
start=str(start), end=str(end)))
super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs)
if start[1] == end[1]:
super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1]))
def add_edges_from(self, ebunch, **kwargs):
"""
Add all the edges in ebunch.
If nodes referred in the ebunch are not already present, they
will be automatically added. Node names can be any hashable python object.
Parameters
----------
ebunch : list, array-like
List of edges to add. Each edge must be of the form of
((start, time_slice), (end, time_slice)).
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0))])
>>> dbn.nodes()
['G', 'I', 'D']
>>> dbn.edges()
[(('D', 1), ('G', 1)),
(('I', 1), ('G', 1)),
(('D', 0), ('G', 0)),
(('I', 0), ('G', 0))]
"""
for edge in ebunch:
self.add_edge(edge[0], edge[1])
def get_intra_edges(self, time_slice=0):
"""
Returns the intra slice edges present in the 2-TBN.
Parameter
---------
time_slice: int (whole number)
The time slice for which to get intra edges. The timeslice
should be a positive value or zero.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_intra_edges()
[(('D', 0), ('G', 0)), (('G', 0), ('L', 0)), (('I', 0), ('G', 0))
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [tuple((x[0], time_slice) for x in edge) for edge in self.edges() if edge[0][1] == edge[1][1] == 0]
def get_inter_edges(self):
"""
Returns the inter-slice edges present in the 2-TBN.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0), ('G', 0)), (('I', 0), ('G', 0)),
... (('G', 0), ('L', 0)), (('D', 0), ('D', 1)),
... (('I', 0), ('I', 1)), (('G', 0), ('G', 1)),
... (('G', 0), ('L', 1)), (('L', 0), ('L', 1))])
>>> dbn.get_inter_edges()
[(('D', 0), ('D', 1)),
(('G', 0), ('G', 1)),
(('G', 0), ('L', 1)),
(('I', 0), ('I', 1)),
(('L', 0), ('L', 1))]
"""
return [edge for edge in self.edges() if edge[0][1] != edge[1][1]]
def get_interface_nodes(self, time_slice=0):
"""
Returns the nodes in the first timeslice whose children are present in the first timeslice.
Parameter
---------
time_slice:int
The timeslice should be a positive value greater than or equal to zero
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('G',0),('L',0)),(('D',0),('D',1))])
>>> dbn.get_interface_nodes()
[('D', 0)]
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [(edge[0][0], time_slice) for edge in self.get_inter_edges()]
def get_slice_nodes(self, time_slice=0):
"""
Returns the nodes present in a particular timeslice
Parameter
---------
time_slice:int
The timeslice should be a positive value greater than or equal to zero
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN()
>>> dbn.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('G', 0),('L', 0)),(('D', 0),('D', 1))])
>>> dbn.get_slice_nodes()
"""
if not isinstance(time_slice, int) or time_slice < 0:
raise ValueError("The timeslice should be a positive value greater than or equal to zero")
return [(node, time_slice) for node in self.nodes()]
def add_cpds(self, *cpds):
"""
This method adds the cpds to the dynamic bayesian network.
Note that while adding variables and the evidence in cpd,
they have to be of the following form
(node_name, time_slice)
Here, node_name is the node that is inserted
while the time_slice is an integer value, which denotes
the index of the time_slice that the node belongs to.
Parameter
---------
cpds : list, set, tuple (array-like)
List of CPDs which are to be associated with the model. Each CPD
should be an instance of `TabularCPD`.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('D', 0),('D', 1)),(('I', 0),('I', 1))])
>>> grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.9, 0.5],
... [0.4, 0.25, 0.8, 0.03],
... [0.3, 0.7, 0.02, 0.2]],
... evidence=[('I', 0),('D', 0)],
... evidence_card=[2, 2])
>>> d_i_cpd = TabularCPD(('D',1), 2, [[0.6, 0.3],
... [0.4, 0.7]],
... evidence=[('D',0)],
... evidence_card=2)
>>> diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]])
>>> intel_cpd = TabularCPD(('I', 0), 2, [[0.7, 0.3]])
>>> i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4],
... [0.5, 0.6]],
... evidence=[('I', 0)],
... evidence_card=2)
>>> dbn.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd)
>>> dbn.get_cpds()
[<TabularCPD representing P(('G', 0):3 | ('I', 0):2, ('D', 0):2) at 0x7ff7f27b0cf8>,
<TabularCPD representing P(('D', 1):2 | ('D', 0):2) at 0x7ff810b9c2e8>,
<TabularCPD representing P(('D', 0):2) at 0x7ff7f27e6f98>,
<TabularCPD representing P(('I', 0):2) at 0x7ff7f27e6ba8>,
<TabularCPD representing P(('I', 1):2 | ('I', 0):2) at 0x7ff7f27e6668>]
"""
for cpd in cpds:
if not isinstance(cpd, TabularCPD):
raise ValueError('cpd should be an instance of TabularCPD')
if set(cpd.variables) - set(cpd.variables).intersection(set(
super(DynamicBayesianNetwork, self).nodes())):
raise ValueError('CPD defined on variable not in the model', cpd)
self.cpds.extend(cpds)
def get_cpds(self, node=None, time_slice=0):
"""
Returns the CPDs that have been associated with the network.
Parameter
---------
node: tuple (node_name, time_slice)
The node should be in the following form (node_name, time_slice).
Here, node_name is the node that is inserted while the time_slice is
an integer value, which denotes the index of the time_slice that the
node belongs to.
time_slice: int
The time_slice should be a positive integer greater than or equal to zero.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds()
"""
# TODO: fix bugs in this
if node:
if node not in super(DynamicBayesianNetwork, self).nodes():
raise ValueError('Node not present in the model.')
else:
for cpd in self.cpds:
if cpd.variable == node:
return cpd
else:
return [cpd for cpd in self.cpds if set(list(cpd.variables)).issubset(self.get_slice_nodes(time_slice))]
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds : list, set, tuple (array-like)
List of CPDs which are to be associated with the model. Each CPD
should be an instance of `TabularCPD`.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> dbn = DBN()
>>> dbn.add_edges_from([(('D',0),('G',0)),(('I',0),('G',0)),(('D',0),('D',1)),(('I',0),('I',1))])
>>> grade_cpd = TabularCPD(('G',0), 3, [[0.3,0.05,0.9,0.5],
... [0.4,0.25,0.8,0.03],
... [0.3,0.7,0.02,0.2]], [('I', 0),('D', 0)],[2,2])
>>> dbn.add_cpds(grade_cpd)
>>> dbn.get_cpds()
[<TabularCPD representing P(('G', 0):3 | ('I', 0):2, ('D', 0):2) at 0x3348ab0>]
>>> dbn.remove_cpds(grade_cpd)
>>> dbn.get_cpds()
[]
"""
for cpd in cpds:
if isinstance(cpd, (tuple, list)):
cpd = self.get_cpds(cpd)
self.cpds.remove(cpd)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors.
* Checks if the sum of the probabilities in each associated CPD for each
state is equal to 1 (tol=0.01).
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
boolean: True if everything seems to be order. Otherwise raises error
according to the problem.
"""
for node in super(DynamicBayesianNetwork, self).nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, TabularCPD):
evidence = cpd.evidence
parents = self.get_parents(node)
if set(evidence if evidence else []) != set(parents if parents else []):
raise ValueError("CPD associated with {node} doesn't have "
"proper parents associated with it.".format(node=node))
if not np.allclose(cpd.to_factor().marginalize([node], inplace=False).values.flatten('C'),
np.ones(np.product(cpd.evidence_card)),
atol=0.01):
raise ValueError('Sum of probabilities of states for node {node}'
' is not equal to 1'.format(node=node))
return True
def initialize_initial_state(self):
"""
This method will automatically re-adjust the cpds and the edges added to the bayesian network.
If an edge that is added as an intra time slice edge in the 0th timeslice, this method will
automatically add it in the 1st timeslice. It will also add the cpds. However, to call this
method, one needs to add cpds as well as the edges in the bayesian network of the whole
skeleton including the 0th and the 1st timeslice,.
Examples:
-------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> from pgmpy.factors import TabularCPD
>>> student = DBN()
>>> student.add_nodes_from(['D', 'G', 'I', 'S', 'L'])
>>> student.add_edges_from([(('D', 0),('G', 0)),(('I', 0),('G', 0)),(('D', 0),('D', 1)),(('I', 0),('I', 1))])
>>> grade_cpd = TabularCPD(('G', 0), 3, [[0.3, 0.05, 0.9, 0.5],
... [0.4, 0.25, 0.8, 0.03],
... [0.3, 0.7, 0.02, 0.2]],
... evidence=[('I', 0),('D', 0)],
... evidence_card=[2, 2])
>>> d_i_cpd = TabularCPD(('D', 1), 2, [[0.6, 0.3],
... [0.4, 0.7]],
... evidence=[('D', 0)],
... evidence_card=2)
>>> diff_cpd = TabularCPD(('D', 0), 2, [[0.6, 0.4]])
>>> intel_cpd = TabularCPD(('I',0), 2, [[0.7, 0.3]])
>>> i_i_cpd = TabularCPD(('I', 1), 2, [[0.5, 0.4],
... [0.5, 0.6]],
... evidence=[('I', 0)],
... evidence_card=2)
>>> student.add_cpds(grade_cpd, d_i_cpd, diff_cpd, intel_cpd, i_i_cpd)
>>> student.initialize_initial_state()
"""
for cpd in self.cpds:
temp_var = (cpd.variable[0], 1 - cpd.variable[1])
parents = self.get_parents(temp_var)
if not any(x.variable == temp_var for x in self.cpds):
if all(x[1] == parents[0][1] for x in parents):
if parents:
new_cpd = TabularCPD(temp_var, cpd.variable_card,
cpd.values.reshape(cpd.variable_card, np.prod(cpd.evidence_card)),
parents, cpd.evidence_card)
else:
new_cpd = TabularCPD(temp_var, cpd.variable_card, np.split(cpd.values, cpd.variable_card))
self.add_cpds(new_cpd)
self.check_model()
def moralize(self):
"""
Removes all the immoralities in the Network and creates a moral
graph (UndirectedGraph).
A v-structure X->Z<-Y is an immorality if there is no directed edge
between X and Y.
Examples
--------
>>> from pgmpy.models import DynamicBayesianNetwork as DBN
>>> dbn = DBN([(('D',0), ('G',0)), (('I',0), ('G',0))])
>>> moral_graph = dbn.moralize()
>>> moral_graph.edges()
[(('G', 0), ('I', 0)),
(('G', 0), ('D', 0)),
(('D', 1), ('I', 1)),
(('D', 1), ('G', 1)),
(('I', 0), ('D', 0)),
(('G', 1), ('I', 1))]
"""
moral_graph = self.to_undirected()
for node in super(DynamicBayesianNetwork, self).nodes():
moral_graph.add_edges_from(combinations(
self.get_parents(node), 2))
return moral_graph
| 41.365138
| 117
| 0.490596
|
acfdb1416a982403235c499bafe29755d8a82462
| 1,246
|
py
|
Python
|
Content/Middleware/Oracle Weblogic/WorkloadManager/src/weblogic/utils.py
|
saikirangurijal/cloudcentersuite
|
5fd1907fdd1c32a8a575e2671b6a9ed9f68f2875
|
[
"Apache-2.0"
] | 8
|
2018-12-19T00:37:59.000Z
|
2020-07-16T15:05:40.000Z
|
Content/Middleware/Oracle Weblogic/WorkloadManager/src/weblogic/utils.py
|
saikirangurijal/cloudcentersuite
|
5fd1907fdd1c32a8a575e2671b6a9ed9f68f2875
|
[
"Apache-2.0"
] | 2
|
2019-03-26T17:53:20.000Z
|
2019-11-26T15:26:00.000Z
|
Content/Middleware/Oracle Weblogic/WorkloadManager/src/weblogic/utils.py
|
saikirangurijal/cloudcentersuite
|
5fd1907fdd1c32a8a575e2671b6a9ed9f68f2875
|
[
"Apache-2.0"
] | 9
|
2019-01-09T06:55:48.000Z
|
2019-11-27T17:55:03.000Z
|
import sys
import os
from os.path import exists
# Get File Path
def get_script_path():
return os.path.dirname(os.path.realpath(sys.argv[0]))
# Parse Properties File
def parse_file(path):
_dict = {}
if exists(path):
try:
fo = open(path, 'r+')
lines = fo.readlines()
for line in lines:
if "=" in line:
line = line.rstrip()
key = line.split('=')[0]
value = line.split('=')[1]
_dict[key] = value
except Exception, e:
print e
return _dict
# Get All nodes as list
def get_nodes():
nodes = []
try:
app_tier_name = os.environ.get("cliqrAppTierName", False)
print app_tier_name
if not app_tier_name:
sys.exit(127)
names = str(os.environ['CliqrTier_' + app_tier_name + '_HOSTNAME']).split(',')
ips = str(os.environ['CliqrTier_' + app_tier_name + '_IP']).split(',')
for i in range(0, len(names)):
_node = {}
_node["name"] = names[i]
_node["ip"] = ips[i]
nodes.append(_node)
except Exception, err:
print err
sys.exit(127)
return nodes
| 25.428571
| 86
| 0.517657
|
acfdb14ffb6b454a1b81998d2394c692295aab8c
| 16,531
|
py
|
Python
|
pysnmp-with-texts/IPX-RIP-PRIVATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 8
|
2019-05-09T17:04:00.000Z
|
2021-06-09T06:50:51.000Z
|
pysnmp-with-texts/IPX-RIP-PRIVATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 4
|
2019-05-31T16:42:59.000Z
|
2020-01-31T21:57:17.000Z
|
pysnmp-with-texts/IPX-RIP-PRIVATE-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module IPX-RIP-PRIVATE-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/IPX-RIP-PRIVATE-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:57:00 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ConstraintsUnion, ValueSizeConstraint, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ConstraintsUnion", "ValueSizeConstraint", "ConstraintsIntersection", "ValueRangeConstraint")
cjnProtocol, = mibBuilder.importSymbols("Cajun-ROOT", "cjnProtocol")
cjnIpxIfIndex, = mibBuilder.importSymbols("IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex")
NetNumber, = mibBuilder.importSymbols("IPX-PRIVATE-MIB", "NetNumber")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter32, Integer32, Counter64, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, IpAddress, Bits, NotificationType, TimeTicks, iso, Gauge32, ModuleIdentity, MibIdentifier = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "Integer32", "Counter64", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "IpAddress", "Bits", "NotificationType", "TimeTicks", "iso", "Gauge32", "ModuleIdentity", "MibIdentifier")
RowStatus, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "DisplayString", "TextualConvention")
cjnIpxRip = ModuleIdentity((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20))
if mibBuilder.loadTexts: cjnIpxRip.setLastUpdated('9904010000Z')
if mibBuilder.loadTexts: cjnIpxRip.setOrganization("Lucent's Concord Technology Center (CTC)")
if mibBuilder.loadTexts: cjnIpxRip.setContactInfo('Marc Cochran -- mcochran@lucent.com')
if mibBuilder.loadTexts: cjnIpxRip.setDescription('Cajun IPX RIP Private MIB')
class FilterPrec(Integer32):
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 9999)
cjnIpxRipGlobalGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 1))
cjnIpxRipEnabled = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enabled", 1), ("disabled", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpxRipEnabled.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipEnabled.setDescription('Enable / Disable IPX RIP on this system.')
cjnIpxRipFilterGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2))
cjnIpxRipFilterTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1), )
if mibBuilder.loadTexts: cjnIpxRipFilterTable.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterTable.setDescription('A list of Cajun IPX RIP filters.')
cjnIpxRipFilterEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1), ).setIndexNames((0, "IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex"), (0, "IPX-RIP-PRIVATE-MIB", "cjnIpxRipFilterPrec"))
if mibBuilder.loadTexts: cjnIpxRipFilterEntry.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterEntry.setDescription('A Cajun IPX RIP filter instance.')
cjnIpxRipFilterPrec = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 1), FilterPrec()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipFilterPrec.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterPrec.setDescription('The precedence of this RIP filter. The precedence is relative to other RIP filters on the same interface.')
cjnIpxRipFilterRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterRowStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterRowStatus.setDescription('The status of this row, by which new entries may be created, or old entries deleted from this table.')
cjnIpxRipFilterNetStart = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 3), NetNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterNetStart.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterNetStart.setDescription('The first IPX network number in the range which this filter matches.')
cjnIpxRipFilterNetEnd = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 4), NetNumber()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterNetEnd.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterNetEnd.setDescription('The last IPX network number in the range which this filter matches.')
cjnIpxRipFilterDirection = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("inbound", 1), ("outbound", 2), ("both", 3)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterDirection.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterDirection.setDescription('The direction of IPX RIP packets to which this filter applies. Inbound applies the filter only to RIP packets received on the interface. Outbound applies the filter only to RIP packets sent on the interface. Both applies the filter to RIP packets sent and received on the interface.')
cjnIpxRipFilterAction = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("filter", 1), ("allow", 2)))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterAction.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterAction.setDescription('The action to take if this filter matches an IPX RIP entry. Filter causes the RIP entry to be ignored in received RIP packets or suppressed in sent RIP packets. Allow causes the RIP entry to be accepted in received RIP packets or advertised in sent RIP packets.')
cjnIpxRipFilterTicks = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 7), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterTicks.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterTicks.setDescription('Used to override the delay, in ticks, to reach the IPX network specified in the RIP entry.')
cjnIpxRipFilterHops = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 2, 1, 1, 8), Integer32()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipFilterHops.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipFilterHops.setDescription('Used to override the hops to reach the IPX network specified in the RIP entry.')
cjnIpxRipIfGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3))
cjnIpxRipIfTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1), )
if mibBuilder.loadTexts: cjnIpxRipIfTable.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfTable.setDescription('A list of Cajun IPX RIP interface entries.')
cjnIpxRipIfEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1), ).setIndexNames((0, "IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex"))
if mibBuilder.loadTexts: cjnIpxRipIfEntry.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfEntry.setDescription('A Cajun IPX RIP interface instance.')
cjnIpxRipIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 1), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfRowStatus.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfRowStatus.setDescription('The status of this row, by which new entries may be created, or old entries deleted from this table.')
cjnIpxRipIfInterpacketGap = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfInterpacketGap.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfInterpacketGap.setDescription('If set to enable(1), IPX RIP packets from periodic advertisements are sent with an interpacket gap of 55 milliseconds. If set to disable(2), no interpacket gap is used.')
cjnIpxRipIfUseMaximumPacketSize = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 3), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfUseMaximumPacketSize.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfUseMaximumPacketSize.setDescription('If set to enable(1), IPX RIP packets will contain as many entries as will fit in the maximum packet size allowable on the interface given the configured encapsulation type. If set to disable(2), IPX RIP packets will contain at most 50 entries.')
cjnIpxRipIfUpdateInterval = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 4), Integer32().clone(60)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfUpdateInterval.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfUpdateInterval.setDescription('The RIP periodic update interval, in seconds.')
cjnIpxRipIfAgeMultiplier = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 5), Integer32().clone(3)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfAgeMultiplier.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfAgeMultiplier.setDescription('The holding multiplier for information received in RIP updates. RIP information will be kept for the number of seconds indicated by the cjnIpxRipIfUpdateInterval multiplied by the cjnIpxRipIfAgeMultiplier.')
cjnIpxRipIfTriggeredUpdates = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('enable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfTriggeredUpdates.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfTriggeredUpdates.setDescription('Specified whether or not RIP updates are immediately sent on the interface in response to changes in the routing table.')
cjnIpxRipIfAdvertiseDefaultRouteOnly = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 7), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2))).clone('disable')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfAdvertiseDefaultRouteOnly.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfAdvertiseDefaultRouteOnly.setDescription('Specifies whether or not ONLY the default route (FFFFFFFE) is advertised in RIP updates sent on the interface.')
cjnIpxRipIfMode = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 3, 1, 1, 8), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3))).clone(namedValues=NamedValues(("talk", 1), ("listen", 2), ("both", 3))).clone('both')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: cjnIpxRipIfMode.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfMode.setDescription('The handling of RIP packets on the interface. If set to talk(1), RIP packets may be sent on the interface but not received. If set to listen(2), RIP packets may be received but not sent. If set to both(3), RIP packets may be sent and received.')
cjnIpxRipIfStatGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4))
cjnIpxRipIfStatTable = MibTable((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1), )
if mibBuilder.loadTexts: cjnIpxRipIfStatTable.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatTable.setDescription('A list of Cajun IPX RIP interface statistics entries.')
cjnIpxRipIfStatEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1), ).setIndexNames((0, "IPX-INTERFACE-MANAGEMENT-PRIVATE-MIB", "cjnIpxIfIndex"))
if mibBuilder.loadTexts: cjnIpxRipIfStatEntry.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatEntry.setDescription('A Cajun IPX RIP interface statistics instance.')
cjnIpxRipIfStatTriggeredUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 1), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipIfStatTriggeredUpdatesSent.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatTriggeredUpdatesSent.setDescription('The number of RIP triggered updates sent on the interface.')
cjnIpxRipIfStatPeriodicUpdatesSent = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 2), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipIfStatPeriodicUpdatesSent.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatPeriodicUpdatesSent.setDescription('The number of periodic RIP updates sent on the interface.')
cjnIpxRipIfStatUpdatesReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipIfStatUpdatesReceived.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatUpdatesReceived.setDescription('The number of RIP updates received on the interface.')
cjnIpxRipIfStatRequestsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 4), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipIfStatRequestsReceived.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatRequestsReceived.setDescription('The number of RIP requests received on the interface.')
cjnIpxRipIfStatBadPacketsReceived = MibTableColumn((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 5), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cjnIpxRipIfStatBadPacketsReceived.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatBadPacketsReceived.setDescription('The number of incorrectly formatted RIP packets received on the interface.')
cjnIpxRipIfStatsReset = MibScalar((1, 3, 6, 1, 4, 1, 1751, 2, 43, 2, 20, 4, 1, 1, 6), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("enable", 1), ("disable", 2)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cjnIpxRipIfStatsReset.setStatus('current')
if mibBuilder.loadTexts: cjnIpxRipIfStatsReset.setDescription('When set to the value enable (1) all IPX RIP statistics for this interface are reset to zero after which the value of this MIB object returns to disable(2).')
mibBuilder.exportSymbols("IPX-RIP-PRIVATE-MIB", cjnIpxRipIfStatEntry=cjnIpxRipIfStatEntry, cjnIpxRipGlobalGroup=cjnIpxRipGlobalGroup, cjnIpxRipFilterTicks=cjnIpxRipFilterTicks, cjnIpxRipIfAgeMultiplier=cjnIpxRipIfAgeMultiplier, cjnIpxRipEnabled=cjnIpxRipEnabled, cjnIpxRipFilterPrec=cjnIpxRipFilterPrec, cjnIpxRipFilterGroup=cjnIpxRipFilterGroup, cjnIpxRipIfStatRequestsReceived=cjnIpxRipIfStatRequestsReceived, cjnIpxRipFilterNetEnd=cjnIpxRipFilterNetEnd, cjnIpxRipIfStatGroup=cjnIpxRipIfStatGroup, cjnIpxRipIfEntry=cjnIpxRipIfEntry, cjnIpxRipIfInterpacketGap=cjnIpxRipIfInterpacketGap, cjnIpxRipFilterRowStatus=cjnIpxRipFilterRowStatus, cjnIpxRipFilterTable=cjnIpxRipFilterTable, cjnIpxRipIfAdvertiseDefaultRouteOnly=cjnIpxRipIfAdvertiseDefaultRouteOnly, cjnIpxRipIfUpdateInterval=cjnIpxRipIfUpdateInterval, cjnIpxRipFilterEntry=cjnIpxRipFilterEntry, cjnIpxRipIfStatTriggeredUpdatesSent=cjnIpxRipIfStatTriggeredUpdatesSent, cjnIpxRipFilterHops=cjnIpxRipFilterHops, cjnIpxRipIfMode=cjnIpxRipIfMode, cjnIpxRipIfStatPeriodicUpdatesSent=cjnIpxRipIfStatPeriodicUpdatesSent, FilterPrec=FilterPrec, PYSNMP_MODULE_ID=cjnIpxRip, cjnIpxRipFilterAction=cjnIpxRipFilterAction, cjnIpxRip=cjnIpxRip, cjnIpxRipIfUseMaximumPacketSize=cjnIpxRipIfUseMaximumPacketSize, cjnIpxRipIfStatUpdatesReceived=cjnIpxRipIfStatUpdatesReceived, cjnIpxRipIfTriggeredUpdates=cjnIpxRipIfTriggeredUpdates, cjnIpxRipIfTable=cjnIpxRipIfTable, cjnIpxRipIfStatTable=cjnIpxRipIfStatTable, cjnIpxRipFilterNetStart=cjnIpxRipFilterNetStart, cjnIpxRipIfStatBadPacketsReceived=cjnIpxRipIfStatBadPacketsReceived, cjnIpxRipFilterDirection=cjnIpxRipFilterDirection, cjnIpxRipIfGroup=cjnIpxRipIfGroup, cjnIpxRipIfStatsReset=cjnIpxRipIfStatsReset, cjnIpxRipIfRowStatus=cjnIpxRipIfRowStatus)
| 141.290598
| 1,742
| 0.788156
|
acfdb167142d4b60b476801c5c824005c83c40c9
| 7,093
|
py
|
Python
|
classifier.py
|
guybartal/Binha
|
ca08d4d19f83b03dd2ba005a80967631c76cbb29
|
[
"MIT"
] | null | null | null |
classifier.py
|
guybartal/Binha
|
ca08d4d19f83b03dd2ba005a80967631c76cbb29
|
[
"MIT"
] | null | null | null |
classifier.py
|
guybartal/Binha
|
ca08d4d19f83b03dd2ba005a80967631c76cbb29
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# -------------------------------------------------------------
"""
Skeleton code showing how to load and run the TensorFlow SavedModel export package from Lobe.
"""
import argparse
from publisher import Publisher
import os
import json
import tensorflow as tf
from PIL import Image
import numpy as np
import cv2
import time
MODEL_DIR = os.path.join(os.path.dirname(__file__), "..") # default assume that our export is in this file's parent directory
def gstreamer_pipeline(
capture_width=1280,
capture_height=720,
display_width=1280,
display_height=720,
framerate=60,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
class Model(object):
def __init__(self, model_dir=MODEL_DIR):
# make sure our exported SavedModel folder exists
model_path = os.path.realpath(model_dir)
if not os.path.exists(model_path):
raise ValueError(f"Exported model folder doesn't exist {model_dir}")
self.model_path = model_path
# load our signature json file, this shows us the model inputs and outputs
# you should open this file and take a look at the inputs/outputs to see their data types, shapes, and names
with open(os.path.join(model_path, "signature.json"), "r") as f:
self.signature = json.load(f)
self.inputs = self.signature.get("inputs")
self.outputs = self.signature.get("outputs")
# placeholder for the tensorflow session
self.session = None
self.publisher = Publisher()
def load(self):
self.cleanup()
# create a new tensorflow session
#self.session = tf.compat.v1.Session(graph=tf.Graph())
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
self.session = tf.compat.v1.Session(graph=tf.Graph(), config=tf.ConfigProto(gpu_options = gpu_options))
# load our model into the session
tf.compat.v1.saved_model.loader.load(sess=self.session, tags=self.signature.get("tags"), export_dir=self.model_path)
def predict(self, image: Image.Image):
# load the model if we don't have a session
if self.session is None:
self.load()
# get the image width and height
width, height = image.size
# center crop image (you can substitute any other method to make a square image, such as just resizing or padding edges with 0)
if width != height:
square_size = min(width, height)
left = (width - square_size) / 2
top = (height - square_size) / 2
right = (width + square_size) / 2
bottom = (height + square_size) / 2
# Crop the center of the image
image = image.crop((left, top, right, bottom))
# now the image is square, resize it to be the right shape for the model input
if "Image" not in self.inputs:
raise ValueError("Couldn't find Image in model inputs - please report issue to Lobe!")
input_width, input_height = self.inputs["Image"]["shape"][1:3]
if image.width != input_width or image.height != input_height:
image = image.resize((input_width, input_height))
# make 0-1 float instead of 0-255 int (that PIL Image loads by default)
image = np.asarray(image) / 255.0
# create the feed dictionary that is the input to the model
# first, add our image to the dictionary (comes from our signature.json file)
feed_dict = {self.inputs["Image"]["name"]: [image]}
# list the outputs we want from the model -- these come from our signature.json file
# since we are using dictionaries that could have different orders, make tuples of (key, name) to keep track for putting
# the results back together in a dictionary
fetches = [(key, output["name"]) for key, output in self.outputs.items()]
# run the model! there will be as many outputs from session.run as you have in the fetches list
outputs = self.session.run(fetches=[name for _, name in fetches], feed_dict=feed_dict)
# do a bit of postprocessing
results = {}
# since we actually ran on a batch of size 1, index out the items from the returned numpy arrays
for i, (key, _) in enumerate(fetches):
val = outputs[i].tolist()[0]
if isinstance(val, bytes):
val = val.decode()
results[key] = val
return results
def publish(self, msg):
self.publisher.publish(msg)
def cleanup(self):
# close our tensorflow session if one exists
if self.session is not None:
self.session.close()
self.session = None
def __del__(self):
self.cleanup()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Predict labels from csi camera.")
parser.add_argument("--model_dir", type=str, dest='model_dir', default='/data/models/tf/hands', help="your model directory.")
parser.add_argument("--output_dir", type=str, dest='output_dir', default='/data/output', help="your model directory.")
args = parser.parse_args()
model = Model(args.model_dir)
model.publish({"Prediction":"loading"})
print("Loading Model")
model.load()
print("Starting Video Capture")
#unmark to capture USB camera
#videoCapture = cv2.VideoCapture(0)
print(gstreamer_pipeline(flip_method=0))
videoCapture = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
i = 0
font = cv2.FONT_HERSHEY_SIMPLEX
timeA = time.time()
while True:
ret,image =videoCapture.read()
image_RGB = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image_pil = Image.fromarray(image_RGB)
outputs = model.predict(image_pil)
print(f"Predicted: {outputs}")
model.publish(outputs)
if (i%30==0):
cv2.putText(image,str(outputs),(20,20), font, .5,(255,255,255),2,cv2.LINE_AA)
file_name = f'{args.output_dir}/frame{i}_{outputs["Prediction"]}.jpg'
print (f'storing file {file_name}')
cv2.imwrite(file_name,image)
i+=1
timeB= time.time()
print("Elapsed Time Per Frame: {} Microsec".format(timeB-timeA) )
timeA=timeB
| 41.479532
| 136
| 0.610743
|
acfdb18bfe00e2a5f8635bee4d3f21064a4edb59
| 7,801
|
py
|
Python
|
.history/src/data/data_20191028100503.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191028100503.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
.history/src/data/data_20191028100503.py
|
bkraft4257/kaggle_titanic
|
f29ea1773773109a867278c001dbd21a9f7b21dd
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
from typing import Union
from pathlib import Path
from nameparser import HumanName
class ExtractData:
"""Extract Titanic data from the Kaggle's train.csv file.
"""
def __init__(self, filename: Union[str, Path], age_bins=None, drop_columns=None):
"""Extract Training Data from filename (string or Path object)
Arguments:
filename {[str]} -- Filename of CSV data file containing data.
drop_columns -- Columns in dataframe that should be dropped.
"""
if drop_columns is None:
drop_columns = ["age", "cabin", "name", "ticket"]
self.filename = filename
self.drop_columns = drop_columns
self.all_label_columns = ["survived"]
self.all_feature_columns = [
"pclass",
"name",
"sex",
"age",
"sibsp",
"parch",
"ticket",
"fare",
"cabin",
"embarked",
]
self.Xy_raw = None
self.extract_raw()
def extract_raw(self):
"""
Extracts data from a CSV file.
Returns:
pd.DataFrame -- [description]
"""
Xy_raw = pd.read_csv(self.filename)
Xy_raw.columns = Xy_raw.columns.str.lower().str.replace(" ", "_")
Xy_raw = Xy_raw.rename(columns={"age": "age_known"})
Xy_raw["pclass"] = Xy_raw["pclass"].astype("category")
self.Xy_raw = Xy_raw.set_index("passengerid")
class TransformData:
"""
TransformData takes the raw extracted data cleans and creates new features before
returning a new dataframe.
The training and test data contain the following:
* 1 Lady. She was traveling with a sibling and no husband. Set title to Miss
* 2 Mlle and 1 Mme. All 3 were 24 years old and travelling alone. Retitled as Miss.
* 1 Sir. Male 49 years old. Travelling with a sibling.
* Revs were all males.
* 8 Drs. (7 male, 1 female) changed to Mr. and Mrs. respectively.
"""
title_translator = {
"Mlle.": "Miss.",
"Mme.": "Miss.",
"Sir.": "Mr.",
"Ms.": "Mrs.",
"Rev.": "Mr.",
"Col.": "Mr.",
"Capt.": "Mr.",
"Lady.": "Miss.",
"the Countess. of": "Mrs.",
"Dr.": np.nan,
}
def __init__(
self,
raw_data,
adult_age_threshold_min=13,
age_bins=None,
fare_mode=None,
embarked_mode=None,
Xy_age_estimate=None,
drop_columns=None,
):
"""Extract Training Data from file or Path
Arguments:
filename {[str]} -- Filename of CSV data file containing data.
drop_columns -- Columns in dataframe that should be dropped.
"""
if age_bins is None:
age_bins = [0, 10, 20, 30, 40, 50, 60, np.inf]
self.raw = raw_data
self.adult_age_threshold_min = adult_age_threshold_min
self.drop_columns = drop_columns
self.Xy_age_estimate = Xy_age_estimate
self.age_bins = age_bins
self.Xy = self.raw.Xy_raw.copy()
if fare_mode is None:
fare_mode = self.Xy["fare"].mode()[0]
if embarked_mode is None:
embarked_mode = self.Xy["embarked"].mode()[0]
self.fare_mode = fare_mode
self.embarked_mode = embarked_mode
self.impute_missing_fare()
self.impute_missing_embarked()
self.extract_title()
self.extract_last_name()
self.extract_cabin_number()
self.extract_cabin_prefix()
self.calc_family_size()
self.estimate_age()
self.calc_age_bins()
self.calc_is_child()
self.calc_is_travelling_alone()
self.Xy = self.Xy.sort_index()
def calc_family_size(self):
"""Create feature family size, which is the number of people (including
self) that are traveling together.
"""
self.Xy["family_size"] = self.Xy.sibsp + self.Xy.parch + 1
def calc_is_travelling_alone(self):
"""Create Boolean feature if passenger is travelling alone. (True=Traveling alone, False=Traveling in group)
"""
self.Xy["is_travelling_alone"] = self.Xy["family_size"] == 1
def calc_is_child(self):
"""Calculate Boolean feature if passenger is a child as determined by the self.adult_age_threshold_min
"""
self.Xy["is_child"] = self.Xy.age < self.adult_age_threshold_min
def extract_cabin_number(self):
"""
Extracts cabin number from ticket.
"""
self.Xy["cabin_number"] = self.Xy.ticket.str.extract("(\d+)$")
def extract_cabin_prefix(self):
"""Extracts cabin prefix from ticket.
"""
self.Xy["cabin_prefix"] = self.Xy.ticket.str.extract("^(.+) ")
def extract_title(self):
"""Extract title from the name using nameparser.
If the Title is empty then we will fill the title with either Mr or Mrs depending upon the sex. This
is adequate for the train and holdout data sets. The title being empty only occurs for passenger 1306
in the holdout data set. A more appropriate way to do this is to check on the sex and age to correctly
assign the title
"""
title = (
self.Xy.name.apply(lambda x: HumanName(x).title)
.replace(self.title_translator)
.replace({"\.": ""}, regex=True)
.replace({"": np.nan})
.fillna(self.Xy["sex"])
.replace({"female": "Mrs", "male": "Mr"})
)
self.Xy["title"] = title
def extract_last_name(self):
"Extracts last name from name feature using nameparser."
self.Xy["last_name"] = self.Xy.name.apply(lambda x: HumanName(x).last)
def calc_age_bins(self):
"""Calculates age bins.
"""
self.Xy["age_bin"] = pd.cut(self.Xy.age, bins=self.age_bins)
def clean(self,):
"""Clean data to remove missing data and "unnecessary" features.
Arguments:
in_raw_df {pd.DataFrame} -- Dataframe containing all columns and rows Kaggle Titanic Training Data set
"""
self.Xy = self.Xy_raw.drop(self.drop_columns, axis=1)
def estimate_age(self, groupby_columns=["sex", "title"]):
"""Estimate age of passenger when age is unknown. The age will be estimated according to the
group as specified in the groupby_columns.
Keyword Arguments:
groupby_columns {list} -- [description] (default: {["sex", "title"]})
"""
if self.Xy_age_estimate is None:
self.Xy_age_estimate = (
self.Xy.groupby(groupby_columns).age_known.mean().to_frame().round(1)
)
self.Xy_age_estimate = self.Xy_age_estimate.rename(
columns={"age_known": "age_estimate"}
)
out_df = (
self.Xy.reset_index()
.merge(self.Xy_age_estimate, on=groupby_columns)
.set_index("passengerid")
)
out_df["age"] = out_df["age_known"].fillna(out_df["age_estimate"])
self.Xy = out_df
def impute_missing_fare(self):
"""Imputes missing fare based upon only the most frequent fare. This
could be improved by looking at additional features. In particular,
the number of passengers in the party and pclass.
"""
self.Xy["fare"] = self.Xy["fare"].fillna(self.fare_mode)
def impute_missing_embarked(self):
"""Imputes missing embarkment location based upon the most frequent
place to embark.
"""
self.Xy["embarked"] = self.Xy["embarked"].fillna(self.embarked_mode)
| 32.777311
| 116
| 0.590822
|
acfdb30e2376af410b7c02630f9effff66200fbd
| 7,380
|
py
|
Python
|
mmdet/core/anchor/anchor_target.py
|
LiGangszu/PedestrianDetection-HGPD
|
3874e331c8afe4cc20fc49de7ebdbe77db277c98
|
[
"Apache-2.0"
] | 9
|
2021-04-02T12:21:38.000Z
|
2021-08-19T07:55:19.000Z
|
mmdet/core/anchor/anchor_target.py
|
LiGangszu/PedestrianDetection-HGPD
|
3874e331c8afe4cc20fc49de7ebdbe77db277c98
|
[
"Apache-2.0"
] | 1
|
2021-05-02T18:34:06.000Z
|
2021-05-12T04:04:57.000Z
|
mmdet/core/anchor/anchor_target.py
|
LiGangszu/PedestrianDetection-HGPD
|
3874e331c8afe4cc20fc49de7ebdbe77db277c98
|
[
"Apache-2.0"
] | 2
|
2021-04-28T09:27:45.000Z
|
2021-06-07T12:02:01.000Z
|
import torch
from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner
from ..utils import multi_apply
import pdb
def anchor_target(anchor_list,
valid_flag_list,
gt_bboxes_list,
img_metas,
target_means,
target_stds,
cfg,
gt_bboxes_ignore_list=None,
gt_labels_list=None,
label_channels=1,
sampling=True,
unmap_outputs=True):
"""Compute regression and classification targets for anchors.
Args:
anchor_list (list[list]): Multi level anchors of each image.
valid_flag_list (list[list]): Multi level valid flags of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
target_means (Iterable): Mean value of regression targets.
target_stds (Iterable): Std value of regression targets.
cfg (dict): RPN train configs.
Returns:
tuple
"""
num_imgs = len(img_metas)
assert len(anchor_list) == len(valid_flag_list) == num_imgs
# anchor number of multi levels
num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]
# concat all level anchors and flags to a single tensor
for i in range(num_imgs):
assert len(anchor_list[i]) == len(valid_flag_list[i])
anchor_list[i] = torch.cat(anchor_list[i])
valid_flag_list[i] = torch.cat(valid_flag_list[i])
# compute targets for each image
if gt_bboxes_ignore_list is None:
gt_bboxes_ignore_list = [None for _ in range(num_imgs)]
if gt_labels_list is None:
gt_labels_list = [None for _ in range(num_imgs)]
(all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,
pos_inds_list, neg_inds_list) = multi_apply(
anchor_target_single,
anchor_list,
valid_flag_list,
gt_bboxes_list,
gt_bboxes_ignore_list,
gt_labels_list,
img_metas,
target_means=target_means,
target_stds=target_stds,
cfg=cfg,
label_channels=label_channels,
sampling=sampling,
unmap_outputs=unmap_outputs)
# no valid anchors
if any([labels is None for labels in all_labels]):
return None
# sampled anchors of all images
num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
# split targets to a list w.r.t. multiple levels
labels_list = images_to_levels(all_labels, num_level_anchors)
label_weights_list = images_to_levels(all_label_weights, num_level_anchors)
bbox_targets_list = images_to_levels(all_bbox_targets, num_level_anchors)
bbox_weights_list = images_to_levels(all_bbox_weights, num_level_anchors)
return (labels_list, label_weights_list, bbox_targets_list,
bbox_weights_list, num_total_pos, num_total_neg)
def images_to_levels(target, num_level_anchors):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...]
"""
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_level_anchors:
end = start + n
level_targets.append(target[:, start:end].squeeze(0))
start = end
return level_targets
def anchor_target_single(flat_anchors,
valid_flags,
gt_bboxes,
gt_bboxes_ignore,
gt_labels,
img_meta,
target_means,
target_stds,
cfg,
label_channels=1,
sampling=True,
unmap_outputs=True):
inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
img_meta['img_shape'][:2],
cfg.allowed_border)
if not inside_flags.any():
return (None, ) * 6
# assign gt and sample anchors
anchors = flat_anchors[inside_flags.type(torch.bool), :]
if sampling:
assign_result, sampling_result = assign_and_sample(
anchors, gt_bboxes, gt_bboxes_ignore, None, cfg)
else:
bbox_assigner = build_assigner(cfg.assigner)
assign_result = bbox_assigner.assign(anchors, gt_bboxes,
gt_bboxes_ignore, gt_labels)
bbox_sampler = PseudoSampler()
sampling_result = bbox_sampler.sample(assign_result, anchors,
gt_bboxes)
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
neg_inds = sampling_result.neg_inds
if len(pos_inds) > 0:
pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes,
sampling_result.pos_gt_bboxes,
target_means, target_stds)
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1.0
if gt_labels is None:
labels[pos_inds] = 1
else:
labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]
if cfg.pos_weight <= 0:
label_weights[pos_inds] = 1.0
else:
label_weights[pos_inds] = cfg.pos_weight
if len(neg_inds) > 0:
label_weights[neg_inds] = 1.0
# map up to original set of anchors
if unmap_outputs:
num_total_anchors = flat_anchors.size(0)
labels = unmap(labels, num_total_anchors, inside_flags)
label_weights = unmap(label_weights, num_total_anchors, inside_flags)
bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)
return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,
neg_inds)
def anchor_inside_flags(flat_anchors,
valid_flags,
img_shape,
allowed_border=0):
img_h, img_w = img_shape[:2]
if allowed_border >= 0:
inside_flags = valid_flags & \
(flat_anchors[:, 0] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 1] >= -allowed_border).type(torch.uint8) & \
(flat_anchors[:, 2] < img_w + allowed_border).type(torch.uint8) & \
(flat_anchors[:, 3] < img_h + allowed_border).type(torch.uint8)
else:
inside_flags = valid_flags
return inside_flags
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if data.dim() == 1:
ret = data.new_full((count, ), fill)
ret[inds.type(torch.bool)] = data
else:
new_size = (count, ) + data.size()[1:]
ret = data.new_full(new_size, fill)
ret[inds.type(torch.bool), :] = data
return ret
| 38.842105
| 79
| 0.616802
|
acfdb4510a793803e1496df55b890ddca807bd7a
| 9,848
|
py
|
Python
|
sdk/python/pulumi_azure/databasemigration/project.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/databasemigration/project.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure/databasemigration/project.py
|
suresh198526/pulumi-azure
|
bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Project']
class Project(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
source_platform: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_platform: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Manage a Azure Database Migration Project.
> **NOTE:** Destroying a Database Migration Project will leave any outstanding tasks untouched. This is to avoid unexpectedly deleting any tasks managed outside of this provider.
## Import
Database Migration Projects can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:databasemigration/project:Project example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example-rg/providers/Microsoft.DataMigration/services/example-dms/projects/project1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specify the name of the database migration project. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which to create the database migration project. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_name: Name of the database migration service where resource belongs to. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_platform: The platform type of the migration source. Currently only support: `SQL`(on-premises SQL Server). Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assigned to the resource.
:param pulumi.Input[str] target_platform: The platform type of the migration target. Currently only support: `SQLDB`(Azure SQL Database). Changing this forces a new resource to be created.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
__props__['name'] = name
if resource_group_name is None:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if service_name is None:
raise TypeError("Missing required property 'service_name'")
__props__['service_name'] = service_name
if source_platform is None:
raise TypeError("Missing required property 'source_platform'")
__props__['source_platform'] = source_platform
__props__['tags'] = tags
if target_platform is None:
raise TypeError("Missing required property 'target_platform'")
__props__['target_platform'] = target_platform
super(Project, __self__).__init__(
'azure:databasemigration/project:Project',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
source_platform: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_platform: Optional[pulumi.Input[str]] = None) -> 'Project':
"""
Get an existing Project resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specify the name of the database migration project. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Name of the resource group in which to create the database migration project. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_name: Name of the database migration service where resource belongs to. Changing this forces a new resource to be created.
:param pulumi.Input[str] source_platform: The platform type of the migration source. Currently only support: `SQL`(on-premises SQL Server). Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assigned to the resource.
:param pulumi.Input[str] target_platform: The platform type of the migration target. Currently only support: `SQLDB`(Azure SQL Database). Changing this forces a new resource to be created.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["location"] = location
__props__["name"] = name
__props__["resource_group_name"] = resource_group_name
__props__["service_name"] = service_name
__props__["source_platform"] = source_platform
__props__["tags"] = tags
__props__["target_platform"] = target_platform
return Project(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specify the name of the database migration project. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
Name of the resource group in which to create the database migration project. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Output[str]:
"""
Name of the database migration service where resource belongs to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter(name="sourcePlatform")
def source_platform(self) -> pulumi.Output[str]:
"""
The platform type of the migration source. Currently only support: `SQL`(on-premises SQL Server). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "source_platform")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assigned to the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetPlatform")
def target_platform(self) -> pulumi.Output[str]:
"""
The platform type of the migration target. Currently only support: `SQLDB`(Azure SQL Database). Changing this forces a new resource to be created.
"""
return pulumi.get(self, "target_platform")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 51.831579
| 223
| 0.67171
|
acfdb46da4f5f788ad6a1a8749bd7ea3878b09d0
| 1,085
|
py
|
Python
|
pyburstlib/wallet_api/utils.py
|
beatsbears/pyburstlib
|
27722f7f3bb0bc5739110f6c99435d13fa54a1e0
|
[
"MIT"
] | 7
|
2018-03-24T17:26:27.000Z
|
2020-06-09T10:38:44.000Z
|
pyburstlib/wallet_api/utils.py
|
MrPilotMan/pyburstlib
|
27722f7f3bb0bc5739110f6c99435d13fa54a1e0
|
[
"MIT"
] | 2
|
2019-09-09T17:06:43.000Z
|
2021-06-01T21:20:08.000Z
|
pyburstlib/wallet_api/utils.py
|
MrPilotMan/pyburstlib
|
27722f7f3bb0bc5739110f6c99435d13fa54a1e0
|
[
"MIT"
] | 4
|
2018-04-10T12:50:55.000Z
|
2022-03-14T20:30:01.000Z
|
'''
pyburstlib
:author: drownedcoast
:date: 4-26-2018
'''
from json import loads
from pyburstlib.wallet_api.base import BaseApi
from pyburstlib.wallet_api.models.utils import *
from pyburstlib.constants import BASE_WALLET_PATH
class UtilsApi(BaseApi):
def rs_convert(self, account_id=None):
'''
Converts from numeric id to account address (rs format).
:param account_id: Numeric ID for the account (required)
:type account_id: str
:returns: An instance of :class:`AccountRS`
'''
response = self._client.post(uri=BASE_WALLET_PATH,
params={'requestType': 'rsConvert',
'account': account_id})
return AccountRS.from_json(response.text)
def long_convert(self, id=None):
'''
'''
response = self._client.post(uri=BASE_WALLET_PATH,
params={'requestType': 'longConvert',
'id': id})
return AccountLong.from_json(response.text)
| 31
| 72
| 0.584332
|
acfdb54b25040463e71a95ddffabb3fa61d75efb
| 158
|
py
|
Python
|
src/keyboard/views.py
|
myth/overflow
|
269f950b6584b327832deb9f9309c2eea527612b
|
[
"MIT"
] | 4
|
2018-08-21T05:33:40.000Z
|
2019-05-06T09:03:06.000Z
|
src/keyboard/views.py
|
myth/overflow
|
269f950b6584b327832deb9f9309c2eea527612b
|
[
"MIT"
] | 1
|
2020-08-09T10:33:58.000Z
|
2020-08-09T10:33:58.000Z
|
src/keyboard/views.py
|
myth/overflow
|
269f950b6584b327832deb9f9309c2eea527612b
|
[
"MIT"
] | 1
|
2019-05-06T13:33:06.000Z
|
2019-05-06T13:33:06.000Z
|
"""
Keyboard views
"""
from django.views.generic.base import TemplateView
class KeyboardIndexView(TemplateView):
template_name = 'keyboard/index.html'
| 15.8
| 50
| 0.765823
|
acfdb5555d92d85b2ccec052f6f0ba7fae7ec838
| 9,320
|
py
|
Python
|
diofant/integrals/rationaltools.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/integrals/rationaltools.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
diofant/integrals/rationaltools.py
|
diofant/omg
|
72fd45f832240d1ded6f0a411e97bb9f7aa9f1d2
|
[
"BSD-3-Clause"
] | null | null | null |
"""This module implements tools for integrating rational functions."""
from ..core import Dummy, I, Integer, Lambda, Symbol, symbols, sympify
from ..domains import ZZ
from ..functions import atan, log
from ..polys import Poly, RootSum, cancel, resultant, roots
from ..simplify import collect
from ..solvers import solve
def ratint(f, x, **flags):
"""Performs indefinite integration of rational functions.
Given a field `K` and a rational function `f = p/q`,
where `p` and `q` are polynomials in `K[x]`,
returns a function `g` such that `f = g'`.
>>> ratint(36/(x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2), x)
(12*x + 6)/(x**2 - 1) + 4*log(x - 2) - 4*log(x + 1)
References
==========
* :cite:`Bronstein2005integration`, pp. 35-70
See Also
========
diofant.integrals.integrals.Integral.doit
ratint_logpart
ratint_ratpart
"""
if type(f) is not tuple:
p, q = f.as_numer_denom()
else:
p, q = f
p, q = p.as_poly(x, composite=False, field=True), q.as_poly(x, composite=False, field=True)
coeff, p, q = p.cancel(q)
poly, p = p.div(q)
result = poly.integrate(x).as_expr()
if p.is_zero:
return coeff*result
g, h = ratint_ratpart(p, q, x)
P, Q = h.as_numer_denom()
P = P.as_poly(x)
Q = Q.as_poly(x)
q, r = P.div(Q)
result += g + q.integrate(x).as_expr()
if not r.is_zero:
symbol = flags.get('symbol', 't')
if not isinstance(symbol, Symbol):
t = Dummy(symbol)
else:
t = symbol.as_dummy()
L = ratint_logpart(r, Q, x, t)
ereal = flags.get('extended_real')
if ereal is None:
if type(f) is not tuple:
atoms = f.atoms()
else:
p, q = f
atoms = p.atoms() | q.atoms()
for elt in atoms - {x}:
if not elt.is_extended_real:
ereal = False
break
else:
ereal = True
eps = Integer(0)
if not ereal:
for h, q in L:
_, h = h.primitive()
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
else:
for h, q in L:
_, h = h.primitive()
R = log_to_real(h, q, x, t)
if R is not None:
eps += R
else:
eps += RootSum(
q, Lambda(t, t*log(h.as_expr())), quadratic=True)
result += eps
return coeff*result
def ratint_ratpart(f, g, x):
"""Horowitz-Ostrogradsky algorithm.
Given a field K and polynomials f and g in K[x], such that f and g
are coprime and deg(f) < deg(g), returns fractions A and B in K(x),
such that f/g = A' + B and B has square-free denominator.
Examples
========
>>> ratint_ratpart(1, x + 1, x)
(0, 1/(x + 1))
>>> ratint_ratpart(1, x**2 + y**2, x)
(0, 1/(x**2 + y**2))
>>> ratint_ratpart(36, x**5 - 2*x**4 - 2*x**3 + 4*x**2 + x - 2, x)
((12*x + 6)/(x**2 - 1), 12/(x**2 - x - 2))
See Also
========
ratint
ratint_logpart
"""
f = sympify(f).as_poly(x)
g = sympify(g).as_poly(x)
u, v, _ = g.cofactors(g.diff(x))
n = u.degree()
m = v.degree()
A_coeffs = [Dummy('a' + str(n - i)) for i in range(n)]
B_coeffs = [Dummy('b' + str(m - i)) for i in range(m)]
C_coeffs = A_coeffs + B_coeffs
A = Poly(A_coeffs, x, domain=ZZ.inject(*C_coeffs))
B = Poly(B_coeffs, x, domain=ZZ.inject(*C_coeffs))
H = f - A.diff(x)*v + A*(u.diff(x)*v).quo(u) - B*u
result = solve(H.coeffs(), C_coeffs)[0]
A = A.as_expr().subs(result)
B = B.as_expr().subs(result)
rat_part = cancel(A/u.as_expr(), x)
log_part = cancel(B/v.as_expr(), x)
return rat_part, log_part
def ratint_logpart(f, g, x, t=None):
r"""Lazard-Rioboo-Trager algorithm.
Given a field K and polynomials f and g in K[x], such that f and g
are coprime, deg(f) < deg(g) and g is square-free, returns a list
of tuples (s_i, q_i) of polynomials, for i = 1..n, such that s_i
in K[t, x] and q_i in K[t], and::
___ ___
d f d \ ` \ `
-- - = -- ) ) a log(s_i(a, x))
dx g dx /__, /__,
i=1..n a | q_i(a) = 0
Examples
========
>>> ratint_logpart(1, x**2 + x + 1, x)
[(Poly(x + 3*_t/2 + 1/2, x, domain='QQ[_t]'),
Poly(3*_t**2 + 1, _t, domain='ZZ'))]
>>> ratint_logpart(12, x**2 - x - 2, x)
[(Poly(x - 3*_t/8 - 1/2, x, domain='QQ[_t]'),
Poly(_t**2 - 16, _t, domain='ZZ'))]
See Also
========
ratint
ratint_ratpart
"""
f, g = sympify(f).as_poly(x), sympify(g).as_poly(x)
t = t or Dummy('t')
a, b = g, f - g.diff(x)*t.as_poly(x)
res, R = resultant(a, b, includePRS=True)
res = res.as_poly(t, composite=False)
assert res, f"BUG: resultant({a}, {b}) can't be zero"
R_map, H = {}, []
for r in R:
R_map[r.degree()] = r
def _include_sign(c, sqf):
if c.is_negative:
h, k = sqf[0]
sqf[0] = h*c, k
C, res_sqf = res.sqf_list()
_include_sign(C, res_sqf)
for q, i in res_sqf:
_, q = q.primitive()
if g.degree() == i:
H.append((g, q))
else:
h = R_map[i]
h_lc = h.LC().as_poly(t, field=True)
c, h_lc_sqf = h_lc.sqf_list()
_include_sign(c, h_lc_sqf)
for a, j in h_lc_sqf:
h = h.quo((a.gcd(q)**j).as_poly(x))
inv, coeffs = h_lc.invert(q), [Integer(1)]
for coeff in h.coeffs()[1:]:
T = (inv*coeff).rem(q)
coeffs.append(T.as_expr())
h = Poly(dict(zip(h.monoms(), coeffs)), x)
H.append((h, q))
return H
def log_to_atan(f, g):
"""Convert complex logarithms to real arctangents.
Given a real field K and polynomials f and g in K[x], with g != 0,
returns a sum h of arctangents of polynomials in K[x], such that:
dh d f + I g
-- = -- I log( ------- )
dx dx f - I g
Examples
========
>>> log_to_atan(x.as_poly(), Integer(1).as_poly(x))
2*atan(x)
>>> log_to_atan((x + Rational(1, 2)).as_poly(x), (sqrt(3)/2).as_poly(x))
2*atan(2*sqrt(3)*x/3 + sqrt(3)/3)
See Also
========
log_to_real
"""
if f.degree() < g.degree():
f, g = -g, f
f = f.to_field()
g = g.to_field()
p, q = f.div(g)
if q.is_zero:
return 2*atan(p.as_expr())
else:
s, t, h = g.gcdex(-f)
u = (f*s + g*t).quo(h)
A = 2*atan(u.as_expr())
return A + log_to_atan(s, t)
def log_to_real(h, q, x, t):
r"""Convert complex logarithms to real functions.
Given real field K and polynomials h in K[t,x] and q in K[t],
returns real function f such that:
___
df d \ `
-- = -- ) a log(h(a, x))
dx dx /__,
a | q(a) = 0
Examples
========
>>> log_to_real((x + 3*y/2 + Rational(1, 2)).as_poly(x),
... (3*y**2 + 1).as_poly(y), x, y)
2*sqrt(3)*atan(2*sqrt(3)*x/3 + sqrt(3)/3)/3
>>> log_to_real((x**2 - 1).as_poly(), (-2*y + 1).as_poly(y), x, y)
log(x**2 - 1)/2
See Also
========
log_to_atan
"""
u, v = symbols('u,v', cls=Dummy)
H = h.as_expr().subs({t: u + I*v}).expand()
Q = q.as_expr().subs({t: u + I*v}).expand()
H_map = collect(H, I, evaluate=False)
Q_map = collect(Q, I, evaluate=False)
a, b = H_map.get(Integer(1), Integer(0)), H_map.get(I, Integer(0))
c, d = Q_map.get(Integer(1), Integer(0)), Q_map.get(I, Integer(0))
R = resultant(c, d, v).as_poly(u)
R_u_all = roots(R)
R_q_all = roots(q)
if sum(R_u_all.values()) < R.degree() or sum(R_q_all.values()) < q.degree():
return
R_u = {k: v for k, v in R_u_all.items() if k.is_extended_real}
R_q = {k: v for k, v in R_q_all.items() if k.is_extended_real}
result = Integer(0)
for r_u in R_u:
C = c.subs({u: r_u}).as_poly(v, extension=False)
R_v_all = roots(C)
if sum(R_v_all.values()) < C.degree():
return
R_v = {k: v for k, v in R_v_all.items() if k.is_extended_real is not False}
R_v_paired = [] # take one from each pair of conjugate roots
for r_v in R_v:
if all(_ not in R_v_paired for _ in [+r_v, -r_v]):
if r_v.could_extract_minus_sign():
R_v_paired.append(-r_v)
for r_v in R_v_paired:
D = d.subs({u: r_u, v: r_v})
if D.cancel().evalf(2, chop=True) != 0:
continue
A = a.subs({u: r_u, v: r_v}).as_poly(x, extension=False)
B = b.subs({u: r_u, v: r_v}).as_poly(x, extension=False)
AB = (A**2 + B**2).as_expr()
result += r_u*log(AB) + r_v*log_to_atan(A, B)
for r in R_q:
result += r*log(h.as_expr().subs({t: r}))
return result
| 25.326087
| 95
| 0.492167
|
acfdb5d85535e8be75c8c63268254c423e869ac8
| 124
|
py
|
Python
|
setup.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | 4
|
2020-06-13T04:51:27.000Z
|
2021-01-06T15:00:51.000Z
|
setup.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | 10
|
2021-09-20T15:42:47.000Z
|
2021-09-23T12:49:51.000Z
|
setup.py
|
krasm/python-onapsdk
|
87cd3017fc542a8afd3be51fbd89934ed87ed3a7
|
[
"Apache-2.0"
] | 8
|
2020-08-28T10:56:02.000Z
|
2022-02-11T17:06:03.000Z
|
#!/usr/bin/env python3
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
from setuptools import setup
setup()
| 15.5
| 37
| 0.685484
|
acfdb5d88d7820ee6112e95eef13369efcbd638b
| 59,504
|
py
|
Python
|
pybandit/optproblems/continuous.py
|
chunjenpeng/pyBandit
|
df14bf0cc263d8fa0ad0a539e94327ac35e33d1c
|
[
"MIT"
] | 1
|
2018-07-12T08:30:44.000Z
|
2018-07-12T08:30:44.000Z
|
pybandit/optproblems/continuous.py
|
PENGChunJen/pyBandit
|
df14bf0cc263d8fa0ad0a539e94327ac35e33d1c
|
[
"MIT"
] | null | null | null |
pybandit/optproblems/continuous.py
|
PENGChunJen/pyBandit
|
df14bf0cc263d8fa0ad0a539e94327ac35e33d1c
|
[
"MIT"
] | null | null | null |
"""
This module contains miscellaneous test problems with continuous/real-valued
search space. The problems are mostly from the early days of research on
optimization.
"""
import math
import random
import itertools
from optproblems.base import TestProblem, BoundConstraintsChecker
from optproblems.base import Individual
TWO_PI = 2.0 * math.pi
HALF_PI = math.pi / 2.0
class SequenceChecker:
"""A pre-processor raising exceptions if the phenome has the wrong length.
.. note:: This class makes use of the decorator design pattern for
potential chaining of pre-processors, see
https://en.wikipedia.org/wiki/Decorator_pattern
"""
def __init__(self, num_variables, data_type=None, previous_preprocessor=None):
"""Constructor.
Parameters
----------
num_variables : int
The expected number of variables.
data_type : class, optional
If given, it is tested if all elements belong to this type.
previous_preprocessor : callable, optional
Another callable that processes the phenome before this one
does.
"""
self.num_variables = num_variables
self.data_type = data_type
self.previous_preprocessor = previous_preprocessor
def __call__(self, phenome):
"""Check the phenome and raise exception if necessary.
Raises
------
Exception
If the length or data type is wrong.
"""
if self.previous_preprocessor is not None:
phenome = self.previous_preprocessor(phenome)
assert len(phenome) == self.num_variables
data_type = self.data_type
if data_type is not None:
for phene in phenome:
assert isinstance(phene, data_type)
return phenome
class Shekel(TestProblem):
"""Shekel's family of test problems.
As defined in [Dixon1978]_. The problems have four variables with lower
and upper bounds of 0 and 10, respectively.
"""
def __init__(self, num_optima, phenome_preprocessor=None, **kwargs):
"""Constructor.
Parameters
----------
num_optima : int
The number of local optima. Must be between 1 and 10.
kwargs
Arbitrary keyword arguments, passed through to the constructor
of the super class.
"""
assert num_optima > 0 and num_optima <= 10
self.num_optima = num_optima
self.num_variables = 4
self._min_bounds = [0.0] * self.num_variables
self._max_bounds = [10.0] * self.num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self,
self.objective_function,
num_objectives=1,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.a = [[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, 1.0],
[8.0, 8.0, 8.0, 8.0],
[6.0, 6.0, 6.0, 6.0],
[3.0, 7.0, 3.0, 7.0],
[2.0, 9.0, 2.0, 9.0],
[5.0, 5.0, 3.0, 3.0],
[8.0, 1.0, 8.0, 1.0],
[6.0, 2.0, 6.0, 2.0],
[7.0, 3.6, 7.0, 3.6]][:num_optima]
self.c = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5][:num_optima]
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def objective_function(self, phenome):
num_variables = self.num_variables
assert num_variables == len(phenome)
a = self.a
ret = 0.0
for i in range(self.num_optima):
diff_vector = [phenome[j] - a[i][j] for j in range(num_variables)]
sum_of_squares = sum(diff ** 2 for diff in diff_vector)
ret -= 1.0 / (sum_of_squares + self.c[i])
return ret
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
local_optima = self.get_locally_optimal_solutions()
objective_values = [self.objective_function(opt.phenome) for opt in local_optima]
minimum = float("inf")
opt_solutions = []
for obj_value, individual in zip(objective_values, local_optima):
if obj_value == minimum:
opt_solutions.append(individual)
elif obj_value < minimum:
opt_solutions = [individual]
minimum = obj_value
return opt_solutions
def get_locally_optimal_solutions(self, max_number=None):
"""Return locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
approx_optima = self.a
rand_gen = random.Random()
rand_gen.seed(2)
optima = []
for approx_opt in approx_optima:
current_opt = approx_opt
current_opt_objective = self.objective_function(current_opt)
for _ in range(10000):
lower = [x - 0.0001 for x in current_opt]
upper = [x + 0.0001 for x in current_opt]
rand_point = []
for low, high in zip(lower, upper):
next_value = rand_gen.uniform(low, high)
next_value += rand_gen.uniform(low, high)
rand_point.append(next_value * 0.5)
is_feasible = True
for i in range(len(rand_point)):
is_feasible &= rand_point[i] >= self.min_bounds[i]
is_feasible &= rand_point[i] <= self.max_bounds[i]
if is_feasible:
obj_value = self.objective_function(rand_point)
if obj_value < current_opt_objective:
current_opt_objective = obj_value
current_opt = rand_point
optima.append(Individual(list(current_opt)))
if max_number is not None:
optima = optima[:max_number]
return optima
class Hartman3(TestProblem):
"""A 3-D instance of Hartman's family.
The principle for defining problems of this family was presented in
[Hartman1972]_. The numbers for this instance can be found in
[Dixon1978]_. The search space is the unit hypercube.
References
----------
.. [Hartman1972] Hartman, James K. (1972). Some Experiments in Global
Optimization. Technical report NP5 55HH72051A, Naval Postgraduate
School, Monterey, California.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 3
self._min_bounds = [0.0] * self.num_variables
self._max_bounds = [1.0] * self.num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, self.objective_function,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
self.a = [[3.0, 10.0, 30.0],
[0.1, 10.0, 35.0],
[3.0, 10.0, 30.0],
[0.1, 10.0, 35.0]]
self.p = [[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.7470],
[0.1091, 0.8732, 0.5547],
[0.03815, 0.5743, 0.8828]]
self.c = [1.0, 1.2, 3.0, 3.2]
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def objective_function(self, phenome):
num_variables = self.num_variables
assert len(phenome) == num_variables
ret = 0.0
p = self.p
a = self.a
for i in range(4):
temp_sum = sum(a[i][j] * (phenome[j] - p[i][j]) ** 2 for j in range(num_variables))
ret -= self.c[i] * math.exp(-temp_sum)
return ret
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
local_optima = self.get_locally_optimal_solutions()
objective_values = [self.objective_function(opt.phenome) for opt in local_optima]
minimum = float("inf")
opt_solutions = []
for obj_value, individual in zip(objective_values, local_optima):
if obj_value == minimum:
opt_solutions.append(individual)
elif obj_value < minimum:
opt_solutions = [individual]
minimum = obj_value
return opt_solutions
def get_locally_optimal_solutions(self, max_number=None):
"""Return locally optimal solutions.
According to [Toern1999]_, this problem has four local optima.
However, only three could be found experimentally.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
References
----------
.. [Toern1999] A. Toern; M.M. Ali; S. Viitanen (1999). Stochastic
Global Optimization: Problem Classes and Solution Techniques.
Journal of Global Optimization, vol. 14, pp. 437-447.
"""
optima = []
phenomes = [[0.36872272, 0.11756162, 0.26757374],
[0.10933749, 0.86052422, 0.56412316],
[0.11461436, 0.55564884, 0.85254695]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max_number]
return optima
class Hartman6(TestProblem):
"""A 6-D instance of Hartman's family.
The principle for defining problems of this family was presented in
[Hartman1972]_. The numbers for this instance can be found in
[Dixon1978]_. The search space is the unit hypercube.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 6
self._min_bounds = [0.0] * self.num_variables
self._max_bounds = [1.0] * self.num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, self.objective_function,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
self.a = [[10.00, 3.00, 17.00, 3.50, 1.70, 8.00],
[0.05, 10.00, 17.00, 0.10, 8.00, 14.00],
[3.00, 3.50, 1.70, 10.00, 17.00, 8.00],
[17.00, 8.00, 0.05, 10.00, 0.10, 14.00]]
self.p = [[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.665],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381]]
self.c = [1.0, 1.2, 3.0, 3.2]
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def objective_function(self, phenome):
num_variables = self.num_variables
assert num_variables == len(phenome)
ret = 0.0
p = self.p
a = self.a
for i in range(4):
temp_sum = sum(a[i][j] * (phenome[j] - p[i][j]) ** 2 for j in range(num_variables))
ret -= self.c[i] * math.exp(-temp_sum)
return ret
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
local_optima = self.get_locally_optimal_solutions()
objective_values = [self.objective_function(opt.phenome) for opt in local_optima]
minimum = float("inf")
opt_solutions = []
for obj_value, individual in zip(objective_values, local_optima):
if obj_value == minimum:
opt_solutions.append(individual)
elif obj_value < minimum:
opt_solutions = [individual]
minimum = obj_value
return opt_solutions
def get_locally_optimal_solutions(self, max_number=None):
"""Return locally optimal solutions.
According to [Toern1999]_, this problem has four local optima.
However, only two could be found experimentally.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
optima = []
phenomes = [[0.20168951, 0.15001068, 0.47687397, 0.27533242, 0.31165161, 0.65730053],
[0.40465312, 0.88244492, 0.84610156, 0.57398968, 0.13892656, 0.03849589]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max_number]
return optima
def branin(phenome):
"""The bare-bones Branin function."""
b = 5.1 / (4.0 * math.pi * math.pi)
c = 5.0 / math.pi
f = 1.0 / (8.0 * math.pi)
ret = (phenome[1] - b * phenome[0] ** 2 + c * phenome[0] - 6.0) ** 2
ret += 10.0 * (1.0 - f) * math.cos(phenome[0]) + 10.0
return ret
class Branin(TestProblem):
"""Branin's test problem 'RCOS'.
The search space is :math:`[-5, 0] \\times [10, 15]`. Every optimum is
a global optimum.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
self._min_bounds = [-5.0, 0.0]
self._max_bounds = [10.0, 15.0]
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, branin,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the three global optima."""
optima = []
phenomes = [[-math.pi, 12.275], [math.pi, 2.275], [9.424778, 2.475]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max_number]
return optima
get_locally_optimal_solutions = get_optimal_solutions
def goldstein_price(phenome):
"""The bare-bones Goldstein-Price function."""
x = phenome[0]
y = phenome[1]
long_part1 = 19.0 - 14.0 * x + 3.0 * x ** 2 - 14.0 * y
long_part1 += 6.0 * x * y + 3.0 * y ** 2
long_part1 *= (x + y + 1.0) ** 2
long_part2 = 18.0 - 32.0 * x + 12.0 * x ** 2
long_part2 += 48.0 * y - 36.0 * x * y + 27.0 * y ** 2
long_part2 *= (2.0 * x - 3.0 * y) ** 2
return (1.0 + long_part1) * (30.0 + long_part2)
class GoldsteinPrice(TestProblem):
"""A test problem by Goldstein and Price.
The search space is :math:`[-2, 2] \\times [-2, 2]`.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
self._min_bounds = [-2.0] * self.num_variables
self._max_bounds = [2.0] * self.num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, goldstein_price,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0, -1.0])]
def get_locally_optimal_solutions(self, max_number=None):
"""Return the four locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
optima = []
phenomes = [[0.0, -1.0], [-0.6, -0.4], [1.2, 0.8], [1.8, 0.2]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max_number]
return optima
class DixonSzegoe(list):
"""The test problem collection of Dixon and Szegoe for global optimization.
This class inherits from :class:`list` and fills itself with the seven
problems Shekel5, Shekel7, Shekel10, Hartman3, Hartman6, Branin, and
GoldsteinPrice. The arguments to the constructor are passed through to
the problem classes.
References
----------
.. [Dixon1978] L.C.W. Dixon and G.P. Szegoe, The global optimization
problem: an introduction, pp. 1-15 in: in L.C.W. Dixon and G.P.
Szegoe (eds.), Towards Global Optimisation 2, North-Holland,
Amsterdam 1978.
"""
def __init__(self, **kwargs):
shekel5 = Shekel(5, name="Shekel5", **kwargs)
shekel7 = Shekel(7, name="Shekel7", **kwargs)
shekel10 = Shekel(10, name="Shekel10", **kwargs)
hart3 = Hartman3(**kwargs)
hart6 = Hartman6(**kwargs)
brn = Branin(**kwargs)
gp = GoldsteinPrice(**kwargs)
list.__init__(self, [shekel5, shekel7, shekel10, hart3, hart6, brn, gp])
def ackley(phenome):
"""The bare-bones Ackley function."""
num_variables = len(phenome)
a = 20.0
b = 0.2
sum1 = 0.0
sum2 = 0.0
for i in range(num_variables):
sum1 += phenome[i] ** 2
sum2 += math.cos(TWO_PI * phenome[i])
value = -a * math.exp(-b * math.sqrt(1.0 / num_variables * sum1))
value += -math.exp(1.0 / num_variables * sum2) + a + math.e
return value
class Ackley(TestProblem):
"""Ackley's test problem.
No bound constraints are pre-defined for this problem.
"""
def __init__(self, num_variables=10, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, ackley,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
def doublesum(phenome):
"""Schwefel's problem 1.2"""
ret = 0.0
slice_sum = 0.0
for x in phenome:
slice_sum += x
ret += slice_sum ** 2
return ret
class DoubleSum(TestProblem):
"""Schwefel's double-sum problem."""
def __init__(self, num_variables=30, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, doublesum,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
class EllipsoidFunction:
"""A configurable Ellipsoid function.
The basic one-dimensional formula reads ``a ** exponent * x ** 2``.
"""
def __init__(self, a=1.0e6):
self.a = a
def __call__(self, phenome):
"""Evaluate the function."""
num_variables = len(phenome)
result = 0.0
for i in range(num_variables):
exponent = float(i) / (num_variables - 1)
result += self.a ** exponent * phenome[i] ** 2
return result
class Ellipsoid(TestProblem):
"""A configurable ellipsoidal test problem.
No bound constraints are pre-defined for this problem.
"""
def __init__(self, num_variables=30, a=1.0e6, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, EllipsoidFunction(a),
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
class FletcherPowell(TestProblem):
"""Fletcher and Powell's test problem.
Each decision variable is restricted to :math:`[-\\pi, \\pi]` and the
search space is periodic.
References
----------
.. [Fletcher1963] R. Fletcher and M. J. D. Powell (1963). A Rapidly
Convergent Descent Method for Minimization. The Computer Journal
6(2): 163-168, https://dx.doi.org/10.1093/comjnl/6.2.163
"""
def __init__(self, num_variables=10,
rand_gen=None,
phenome_preprocessor=None,
**kwargs):
"""Constructor.
Parameters
----------
num_variables : int, optional
The number of decision variables.
rand_gen : random.Random, optional
A generator for random numbers. If omitted, the global instance
of the module :mod:`random` is used.
phenome_preprocessor : callable, optional
A callable potentially applying transformations or checks to
the phenome.
kwargs
Arbitrary keyword arguments, passed through to the constructor
of the super class.
"""
if rand_gen is None:
rand_gen = random
self.num_variables = num_variables
self._min_bounds = [-math.pi] * num_variables
self._max_bounds = [math.pi] * num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, self.objective_function,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
self.alpha = [rand_gen.uniform(-math.pi, math.pi) for _ in range(num_variables)]
self.a = [[rand_gen.randint(-100, 100) for _ in range(num_variables)] for _ in range(num_variables)]
self.b = [[rand_gen.randint(-100, 100) for _ in range(num_variables)] for _ in range(num_variables)]
self.init_vector_e()
def init_vector_e(self):
self.e = [0.0] * self.num_variables
for i in range(self.num_variables):
self.e[i] = 0.0
for j in range(self.num_variables):
self.e[i] += self.a[i][j] * math.sin(self.alpha[j])
self.e[i] += self.b[i][j] * math.cos(self.alpha[j])
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual(list(self.alpha))]
def objective_function(self, phenome):
# shortcuts & initialization
a = self.a
b = self.b
e = self.e
sin = math.sin
cos = math.cos
ret = 0.0
lhs = [0.0] * self.num_variables
# calculate
for i in range(self.num_variables):
for j in range(self.num_variables):
lhs[i] += a[i][j] * sin(phenome[j]) + b[i][j] * cos(phenome[j])
ret += (e[i] - lhs[i]) ** 2
return ret
def griewank(phenome):
"""The bare-bones Griewank function."""
ssum = 0.0
product = 1.0
for i in range(len(phenome)):
ssum += phenome[i] ** 2 / 4000.0
product *= math.cos(phenome[i] / math.sqrt(i + 1.0))
return ssum - product + 1.0
class Griewank(TestProblem):
"""Griewank's test problem.
No bound constraints are pre-defined for this problem. A possible choice
is :math:`[-600, 600]` for each variable.
"""
def __init__(self, num_variables=10, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, griewank,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
def himmelblau(phenome):
"""The bare-bones Himmelblau function."""
x = phenome[0]
y = phenome[1]
return (x ** 2 + y - 11.0) ** 2 + (x + y ** 2 - 7.0) ** 2
class Himmelblau(TestProblem):
"""Himmelblau's test problem.
No bound constraints are pre-defined for this problem. Possible choices
including all the optima are :math:`[-4, 4] \\times [-4, 4]` or
larger rectangles.
References
----------
.. [Himmelblau1972] David M. Himmelblau, Applied Nonlinear Programming,
McGraw Hill, 1972
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
preprocessor = SequenceChecker(self.num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, himmelblau,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the four optimal solutions.
All local optima are global optima in this problem.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
optima = []
phenomes = [[3.0, 2.0],
[-3.779310253377747, -3.283185991286170],
[-2.805118086952745, 3.131312518250573],
[3.584428340330492, -1.848126526964404]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max(1, max_number)]
return optima
get_locally_optimal_solutions = get_optimal_solutions
class LunacekTwoSpheres(TestProblem):
"""Lunacek's two spheres.
References
----------
.. [Lunacek2008] M. Lunacek, D. Whitley, and A. Sutton (2008). The
Impact of Global Structure on Search. In: Parallel Problem Solving
from Nature, Lecture Notes in Computer Science, vol. 5199,
pp. 498-507, Springer.
"""
def __init__(self, num_variables=10,
depth=0.0,
size=1.0,
phenome_preprocessor=None,
**kwargs):
"""Constructor.
Parameters
----------
num_variables : int, optional
Number of decision variables of the problem.
depth : float, optional
Depth parameter of the worse basin.
size : float, optional
Size parameter of the worse basin.
kwargs
Arbitrary keyword arguments, passed through to the constructor
of the super class.
"""
self.depth = depth
self.size = size
self.offset1 = 2.5
self.offset2 = -math.sqrt((self.offset1 ** 2 - depth) / size)
self.num_variables = num_variables
self._min_bounds = [-5.0] * num_variables
self._max_bounds = [5.0] * num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, self.objective_function,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def objective_function(self, phenome):
# shortcuts
shifted1 = [phene - self.offset1 for phene in phenome]
shifted2 = [phene - self.offset2 for phene in phenome]
value1 = sphere(shifted1)
value2 = self.depth * self.num_variables + self.size * sphere(shifted2)
return min(value1, value2)
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
optima = []
phenomes = [[self.offset1] * self.num_variables]
if self.depth == 0.0:
phenomes.append([self.offset2] * self.num_variables)
elif self.depth < 0.0:
phenomes = [[self.offset2] * self.num_variables]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max(1, max_number)]
return optima
def get_locally_optimal_solutions(self, max_number=None):
"""Return the locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
optima = []
phenomes = [[self.offset1] * self.num_variables,
[self.offset2] * self.num_variables]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max(1, max_number)]
return optima
class LunacekTwoRastrigins(LunacekTwoSpheres):
"""Lunacek's two Rastrigin functions."""
def __init__(self, num_variables=10,
depth=0.0,
size=1.0,
a=10.0,
omega=TWO_PI,
**kwargs):
"""Constructor.
Parameters
----------
num_variables : int, optional
Number of decision variables of the problem.
depth : float, optional
Depth parameter of the worse basin.
size : float, optional
Size parameter of the worse basin.
a : float, optional
Amplitude of the cosine term of the rastrigin function.
omega : float, optional
Controls the period length of the cosine term of the rastrigin
function.
kwargs
Arbitrary keyword arguments, passed through to the constructor
of the super class.
"""
LunacekTwoSpheres.__init__(self, num_variables,
depth,
size,
**kwargs)
self.a = a
self.omega = omega
def objective_function(self, phenome):
# shortcuts
a = self.a
omega = self.omega
# calculate
sphere_obj_value = LunacekTwoSpheres.objective_function(self, phenome)
shifted1 = [phene - self.offset1 for phene in phenome]
rastrigin_part = a * len(shifted1)
for x in shifted1:
rastrigin_part -= a * math.cos(omega * x)
return sphere_obj_value + rastrigin_part
def get_locally_optimal_solutions(self, max_number=None):
raise NotImplementedError("Locally optimal solutions are unknown.")
class ModifiedRastriginFunction:
"""A function similar to the Rastrigin function.
The basic one-dimensional formula reads
``2.0 * k * x ** 2 + 10.0 * cos(omega * x)``. Further information can
be found in [Saha2010]_.
"""
def __init__(self, num_variables, omegas, k_values):
self.num_variables = num_variables
if omegas is None:
omegas = [TWO_PI] * num_variables
self.omegas = omegas
self.k_values = k_values
def __call__(self, phenome):
"""Evaluate the function."""
# shortcuts
omegas = self.omegas
k_values = self.k_values
cos = math.cos
# calculate
ret = 10.0 * self.num_variables
for i, x in enumerate(phenome):
ret += 10.0 * cos(omegas[i] * x) + 2.0 * k_values[i] * x ** 2
return ret
class ModifiedRastrigin(TestProblem):
"""A test problem similar to the Rastrigin problem.
The modification consists of a configurable number of local optima per
dimension, so that the total number of local optima becomes less
dependent on the dimension. The problem was defined in [Saha2010]_.
There are three pre-defined instances with 4, 8, and 16 variables,
respectively, which can be obtained from the
:func:`create_instance <optproblems.real.ModifiedRastrigin.create_instance>`
method. The search space is the unit hypercube.
References
----------
.. [Saha2010] Amit Saha, Kalyanmoy Deb (2010). A Bi-criterion Approach
to Multimodal Optimization: Self-adaptive Approach. In: Simulated
Evolution and Learning, vol. 6457 of Lecture Notes in Computer
Science, pp. 95-104, Springer
"""
opt_x_for_k = [[],
[0.494984],
[0.24874, 0.74622],
[0.16611, 0.49832, 0.83053],
[0.12468, 0.37405, 0.62342, 0.87279]]
def __init__(self, num_variables=16,
k_values=None,
phenome_preprocessor=None,
**kwargs):
if k_values is None:
k_values = [1] * num_variables
self.k_values = k_values
omegas = [TWO_PI * k for k in k_values]
self._min_bounds = [0.0] * num_variables
self._max_bounds = [1.0] * num_variables
self.num_variables = num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
obj_function = ModifiedRastriginFunction(num_variables, omegas, k_values)
TestProblem.__init__(self, obj_function,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
@staticmethod
def create_instance(num_variables, **kwargs):
"""Factory method for pre-defined modified Rastrigin problems.
Parameters
----------
num_variables : int
Must be 4, 8, or 16.
kwargs
Arbitrary keyword arguments, passed through to the constructor.
Returns
-------
problem : ModifiedRastrigin instance
"""
if num_variables == 4:
k_values = [2, 2, 3, 4]
elif num_variables == 8:
k_values = [1, 2, 1, 2, 1, 3, 1, 4]
elif num_variables == 16:
k_values = [1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 1, 4]
else:
raise Exception("There is no predefined instance for " + str(num_variables) + " variables")
return ModifiedRastrigin(num_variables, k_values, **kwargs)
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
local_optima = self.get_locally_optimal_solutions()
objective_values = [self.objective_function(opt.phenome) for opt in local_optima]
minimum = float("inf")
opt_solutions = []
for obj_value, individual in zip(objective_values, local_optima):
if obj_value == minimum:
opt_solutions.append(individual)
elif obj_value < minimum:
opt_solutions = [individual]
minimum = obj_value
if max_number is not None:
opt_solutions = opt_solutions[:max(1, max_number)]
return opt_solutions
def get_locally_optimal_solutions(self, max_number=None):
"""Return locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
positions = []
for i in range(self.num_variables):
positions.append(self.opt_x_for_k[self.k_values[i]])
optima = []
if max_number is None:
max_number = float("inf")
# build cross product of all dimensions
for position in itertools.product(*positions):
optima.append(Individual(list(position)))
if len(optima) >= max_number:
break
return optima
class RastriginFunction:
"""A configurable Rastrigin function.
The basic one-dimensional formula reads ``x ** 2 - a * cos(omega * x)``.
"""
def __init__(self, a=10.0, omega=TWO_PI):
self.a = a
self.omega = omega
def __call__(self, phenome):
"""Evaluate the function."""
a = self.a
omega = self.omega
cos = math.cos
ret = a * len(phenome)
for x in phenome:
ret += x ** 2 - a * cos(omega * x)
return ret
class Rastrigin(TestProblem):
"""A configurable Rastrigin test problem.
No bound constraints are pre-defined for this problem, but
:math:`[-5, 5]` for every variable is a typical choice.
"""
def __init__(self, num_variables=10,
a=10.0,
omega=TWO_PI,
phenome_preprocessor=None,
**kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, RastriginFunction(a, omega),
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
def rosenbrock(phenome):
"""The bare-bones Rosenbrock function."""
ret = 0.0
for i in range(len(phenome) - 1):
x = phenome[i]
ret += 100.0 * (x ** 2 - phenome[i+1]) ** 2 + (x - 1.0) ** 2
return ret
class Rosenbrock(TestProblem):
"""Rosenbrock's test problem.
No bound constraints are pre-defined for this problem.
"""
def __init__(self, num_variables=10, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, rosenbrock,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([1.0] * self.num_variables)]
def schaffer6(phenome):
"""The bare-bones Schaffer function 6."""
sum_of_squares = phenome[0] ** 2 + phenome[1] ** 2
result = math.sin(math.sqrt(sum_of_squares)) ** 2 - 0.5
result /= (1.0 + 0.001 * sum_of_squares) ** 2
result += 0.5
return result
class Schaffer6(TestProblem):
"""Schaffer's test problem 6.
This problem is radially symmetric. Thus it does not possess a discrete
set of local optima. It was defined for two dimensions in
[Schaffer1989]_. The global optimum is the origin and the search space
is :math:`[-100, 100] \\times [-100, 100]`.
References
----------
.. [Schaffer1989] Schaffer, J. David; Caruana, Richard A.; Eshelman,
Larry J.; Das, Rajarshi (1989). A study of control parameters
affecting online performance of genetic algorithms for function
optimization. In: Proceedings of the third international
conference on genetic algorithms, pp. 51-60, Morgan Kaufmann.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
self._min_bounds = [-100.0, -100.0]
self._max_bounds = [100.0, 100.0]
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, schaffer6,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
def schaffer7(phenome):
"""The bare-bones Schaffer function 7."""
sum_of_squares = phenome[0] ** 2 + phenome[1] ** 2
result = sum_of_squares ** 0.25
result *= math.sin(50.0 * sum_of_squares ** 0.1) ** 2 + 1.0
return result
class Schaffer7(TestProblem):
"""Schaffer's test problem 7.
This problem is radially symmetric. Thus it does not possess a discrete
set of local optima. It was defined for two dimensions in
[Schaffer1989]_. The global optimum is the origin and the search space
is :math:`[-100, 100] \\times [-100, 100]`.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
self._min_bounds = [-100.0, -100.0]
self._max_bounds = [100.0, 100.0]
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, schaffer7,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
def schwefel(phenome):
"""The bare-bones Schwefel function."""
ret = 0.0
sin = math.sin
sqrt = math.sqrt
for x in phenome:
ret -= x * sin(sqrt(abs(x)))
return ret
class Schwefel(TestProblem):
"""Schwefel's test problem.
Note that bound constraints are required for the global optimum to
exist. :math:`[-500, 500]` for each variable is the default here.
Then the problem has :math:`7^n` local optima.
"""
def __init__(self, num_variables=10, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
self._min_bounds = [-500.0] * num_variables
self._max_bounds = [500.0] * num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, schwefel,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([420.96874635998199] * self.num_variables)]
def get_locally_optimal_solutions(self, max_number=None):
"""Return the locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
# shortcuts
sqrt = math.sqrt
sin = math.sin
cos = math.cos
copysign = math.copysign
min_bounds = self.min_bounds
max_bounds = self.max_bounds
def first_derivative(x):
sqrt_x = sqrt(x)
return -sin(sqrt_x) - 0.5 * sqrt_x * cos(sqrt_x)
def second_derivative(x):
sqrt_x = sqrt(x)
return 0.25 * sin(sqrt_x) - (0.75 * cos(sqrt_x)) / sqrt_x
assert len(min_bounds) == len(max_bounds)
min_bound = min(min_bounds)
max_bound = max(max_bounds)
max_root_pos = int(copysign(sqrt(abs(max_bound)) / math.pi, max_bound))
min_root_pos = int(copysign(sqrt(abs(min_bound)) / math.pi, min_bound))
minima_positions = []
for pos in range(min_root_pos, max_root_pos):
if pos % 2 == 0:
# initial estimate (center between two zeros)
old_x = ((pos * math.pi) ** 2 + ((pos + 1.0) * math.pi) ** 2) * 0.5
new_x = old_x - first_derivative(old_x) / second_derivative(old_x)
# newton's method
counter = 0
while abs(new_x - old_x) > 1e-12 and counter < 20:
old_x = new_x
new_x = old_x - first_derivative(old_x) / second_derivative(old_x)
counter += 1
minima_positions.append(copysign(new_x, pos))
# filter feasible positions in each dimension
positions_in_dimensions = []
for dim in range(self.num_variables):
positions_in_this_dim = []
for pos in minima_positions:
if pos >= min_bounds[dim] and pos <= max_bounds[dim]:
positions_in_this_dim.append(pos)
positions_in_dimensions.append(positions_in_this_dim)
optima = []
if max_number is None:
max_number = float("inf")
# build cross product of all dimensions
for position in itertools.product(*positions_in_dimensions):
optima.append(Individual(list(position)))
if len(optima) >= max_number:
break
return optima
def six_hump_camelback(phenome):
"""The bare-bones six-hump camelback function."""
x1 = phenome[0]
x2 = phenome[1]
part1 = (4.0 - 2.1 * x1 ** 2 + (x1 ** 4) / 3.0) * x1 ** 2
return 4.0 * (part1 + x1 * x2 + (-4.0 + 4.0 * x2 ** 2) * x2 ** 2)
class SixHumpCamelback(TestProblem):
"""The so-called six-hump camelback test problem.
No bound constraints are pre-defined for this problem. Possible choices
including all the optima are :math:`[-1.9, 1.9] \\times [-1.1, 1.1]` or
larger rectangles.
"""
def __init__(self, phenome_preprocessor=None, **kwargs):
self.num_variables = 2
preprocessor = SequenceChecker(self.num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, six_hump_camelback,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the two global optima."""
optima = []
phenomes = [[0.089842007286237896, -0.71265640548186626],
[-0.089842007286237896, 0.71265640548186626]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max(1, max_number)]
return optima
def get_locally_optimal_solutions(self, max_number=None):
"""Return the locally optimal solutions.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
optima = self.get_optimal_solutions()
phenomes = [[-1.7036067132900241, 0.7960835697790869],
[1.7036067132900241, -0.7960835697790869],
[-1.6071047491815618, -0.56865145239564607],
[1.6071047607120053, 0.56865145738534051]]
for phenome in phenomes:
optima.append(Individual(phenome))
if max_number is not None:
optima = optima[:max(1, max_number)]
return optima
def sphere(phenome):
"""The bare-bones sphere function."""
return sum(x ** 2 for x in phenome)
class Sphere(TestProblem):
"""The sphere problem.
Possibly the most simple unimodal problem. No bound constraints are
pre-defined.
"""
def __init__(self, num_variables=10, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, sphere,
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
get_locally_optimal_solutions = get_optimal_solutions
def vincent(phenome):
"""The bare-bones Vincent function."""
sin = math.sin
log = math.log
ret = 0.0
for x in phenome:
ret += sin(10.0 * log(x))
return -ret / len(phenome)
class Vincent(TestProblem):
"""Vincent's test problem.
All variables have lower and upper bounds of 0.25 and 10, respectively.
The problem has :math:`6^n` global optima.
"""
def __init__(self, num_variables=5, phenome_preprocessor=None, **kwargs):
self.num_variables = num_variables
self._min_bounds = [0.25] * num_variables
self._max_bounds = [10.0] * num_variables
bounds = (self.min_bounds, self.max_bounds)
self.bound_constraints_checker = BoundConstraintsChecker(bounds, phenome_preprocessor)
TestProblem.__init__(self, vincent,
phenome_preprocessor=self.bound_constraints_checker,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
@property
def min_bounds(self):
return self._min_bounds
@min_bounds.setter
def min_bounds(self, bounds):
self._min_bounds = bounds
self.bound_constraints_checker.min_bounds = bounds
@property
def max_bounds(self):
return self._max_bounds
@max_bounds.setter
def max_bounds(self, bounds):
self._max_bounds = bounds
self.bound_constraints_checker.max_bounds = bounds
def get_optimal_solutions(self, max_number=None):
"""Return the optimal solutions.
All local optima are global optima in this problem.
Parameters
----------
max_number : int, optional
Potentially restrict the number of optima.
Returns
-------
optima : list of Individual
"""
# shortcuts
ceil = math.ceil
floor = math.floor
log = math.log
min_bounds = self.min_bounds
max_bounds = self.max_bounds
# find out how many minima there are in the feasible space
# first transform limits to "log"-space
transformed_min_bounds = [10.0 * log(min_bound) for min_bound in min_bounds]
transformed_max_bounds = [10.0 * log(max_bound) for max_bound in max_bounds]
# find the multiples of 2*pi that are closest to the bounds
min_counters = []
for transformed_min_bound in transformed_min_bounds:
min_counters.append(ceil((transformed_min_bound - HALF_PI) / TWO_PI))
max_counters = []
for transformed_max_bound in transformed_max_bounds:
max_counters.append(floor((transformed_max_bound + HALF_PI) / TWO_PI))
# optima are at every multiple in between
ranges = []
for min_counter, max_counter in zip(min_counters, max_counters):
ranges.append(list(range(int(min_counter), int(max_counter) + 1)))
optima = []
if max_number is None:
max_number = float("inf")
# build cross product of all dimensions
for position in itertools.product(*ranges):
opt = Individual()
# carry out inverse transformation
opt.phenome = [math.exp((TWO_PI * pos + HALF_PI) / 10.0) for pos in position]
optima.append(opt)
if len(optima) >= max_number:
break
return optima
get_locally_optimal_solutions = get_optimal_solutions
class WeierstrassFunction:
"""A configurable Weierstrass function."""
def __init__(self, a=0.5, b=3.0, k_max=20):
self.a = a
self.b = b
self.k_max = k_max
def __call__(self, phenome):
"""Evaluate the function."""
n = len(phenome)
a = self.a
b = self.b
k_max = self.k_max
sum1 = 0.0
cos = math.cos
for i in range(n):
for k in range(k_max + 1):
sum1 += a ** k * cos(TWO_PI * b ** k * (phenome[i] + 0.5))
sum2 = 0.0
for k in range(k_max + 1):
sum2 += a ** k * cos(TWO_PI * b ** k * 0.5)
return sum1 - n * sum2
class Weierstrass(TestProblem):
"""Weierstrass' test problem.
No bound constraints are pre-defined for this problem.
"""
def __init__(self, num_variables=10,
a=0.5,
b=3.0,
k_max=20,
phenome_preprocessor=None,
**kwargs):
self.num_variables = num_variables
preprocessor = SequenceChecker(num_variables,
previous_preprocessor=phenome_preprocessor)
TestProblem.__init__(self, WeierstrassFunction(a, b, k_max),
phenome_preprocessor=preprocessor,
**kwargs)
self.is_deterministic = True
self.do_maximize = False
def get_optimal_solutions(self, max_number=None):
"""Return the global optimum."""
return [Individual([0.0] * self.num_variables)]
| 31.871452
| 108
| 0.596212
|
acfdb60ab0b9bc3d005ce834d055c35ee8134f10
| 6,267
|
py
|
Python
|
tests/test_masked_dice_loss.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,971
|
2019-10-16T23:53:16.000Z
|
2022-03-31T20:58:24.000Z
|
tests/test_masked_dice_loss.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 2,851
|
2020-01-10T16:23:44.000Z
|
2022-03-31T22:14:53.000Z
|
tests/test_masked_dice_loss.py
|
dyollb/MONAI
|
9084c452c48095c82c71d4391b3684006e5a3c56
|
[
"Apache-2.0"
] | 614
|
2020-01-14T19:18:01.000Z
|
2022-03-31T14:06:14.000Z
|
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.losses import MaskedDiceLoss
TEST_CASES = [
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "smooth_nr": 1e-6, "smooth_dr": 1e-6},
{
"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]),
"target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]]),
"mask": torch.tensor([[[[0.0, 0.0], [1.0, 1.0]]]]),
},
0.500,
],
[ # shape: (2, 1, 2, 2), (2, 1, 2, 2)
{"include_background": True, "sigmoid": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]], [[[1.0, -1.0], [-1.0, 1.0]]]]),
"target": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 0.0], [1.0, 0.0]]]]),
"mask": torch.tensor([[[[1.0, 1.0], [1.0, 1.0]]], [[[1.0, 1.0], [0.0, 0.0]]]]),
},
0.422969,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": False, "to_onehot_y": True, "smooth_nr": 0, "smooth_dr": 0},
{
"input": torch.tensor([[[1.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 1.0], [0.0, 1.0, 0.0]]]),
"target": torch.tensor([[[0.0, 0.0, 1.0]], [[0.0, 1.0, 0.0]]]),
"mask": torch.tensor([[[1.0, 1.0, 1.0]], [[0.0, 1.0, 0.0]]]),
},
0.0,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": True, "to_onehot_y": True, "sigmoid": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
"mask": torch.tensor([[[1.0, 1.0, 0.0]]]),
},
0.47033,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{
"include_background": True,
"to_onehot_y": True,
"sigmoid": True,
"reduction": "none",
"smooth_nr": 1e-4,
"smooth_dr": 1e-4,
},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
[[0.296529, 0.415136], [0.599976, 0.428559]],
],
[ # shape: (2, 2, 3), (2, 1, 3)
{"include_background": True, "to_onehot_y": True, "softmax": True, "smooth_nr": 1e-4, "smooth_dr": 1e-4},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
0.383713,
],
[ # shape: (2, 2, 3), (2, 1, 3)
{
"include_background": True,
"to_onehot_y": True,
"softmax": True,
"reduction": "sum",
"smooth_nr": 1e-4,
"smooth_dr": 1e-4,
},
{
"input": torch.tensor([[[-1.0, 0.0, 1.0], [1.0, 0.0, -1.0]], [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]]),
"target": torch.tensor([[[1.0, 0.0, 0.0]], [[1.0, 1.0, 0.0]]]),
},
1.534853,
],
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "smooth_nr": 1e-6, "smooth_dr": 1e-6},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.307576,
],
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "squared_pred": True, "smooth_nr": 1e-5, "smooth_dr": 1e-5},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.178337,
],
[ # shape: (1, 1, 2, 2), (1, 1, 2, 2)
{"include_background": True, "sigmoid": True, "jaccard": True, "smooth_nr": 1e-5, "smooth_dr": 1e-5},
{"input": torch.tensor([[[[1.0, -1.0], [-1.0, 1.0]]]]), "target": torch.tensor([[[[1.0, 0.0], [1.0, 1.0]]]])},
0.470451,
],
]
class TestDiceLoss(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_shape(self, input_param, input_data, expected_val):
result = MaskedDiceLoss(**input_param).forward(**input_data)
np.testing.assert_allclose(result.detach().cpu().numpy(), expected_val, rtol=1e-5)
def test_ill_shape(self):
loss = MaskedDiceLoss()
with self.assertRaisesRegex(AssertionError, ""):
loss.forward(torch.ones((1, 2, 3)), torch.ones((4, 5, 6)))
def test_ill_opts(self):
with self.assertRaisesRegex(ValueError, ""):
MaskedDiceLoss(sigmoid=True, softmax=True)
chn_input = torch.ones((1, 1, 3))
chn_target = torch.ones((1, 1, 3))
with self.assertRaisesRegex(ValueError, ""):
MaskedDiceLoss(reduction="unknown")(chn_input, chn_target)
with self.assertRaisesRegex(ValueError, ""):
MaskedDiceLoss(reduction=None)(chn_input, chn_target)
def test_input_warnings(self):
chn_input = torch.ones((1, 1, 3))
chn_target = torch.ones((1, 1, 3))
with self.assertWarns(Warning):
loss = MaskedDiceLoss(include_background=False)
loss.forward(chn_input, chn_target)
with self.assertWarns(Warning):
loss = MaskedDiceLoss(softmax=True)
loss.forward(chn_input, chn_target)
with self.assertWarns(Warning):
loss = MaskedDiceLoss(to_onehot_y=True)
loss.forward(chn_input, chn_target)
if __name__ == "__main__":
unittest.main()
| 41.78
| 118
| 0.505345
|
acfdb706ff8cf0b5d0922009950ed7e76e33acb1
| 811
|
py
|
Python
|
ex065.py
|
isabellahenriques/Python_Estudos
|
a6e8b829d01a7c9fa34223e096f6389c81f2085c
|
[
"MIT"
] | null | null | null |
ex065.py
|
isabellahenriques/Python_Estudos
|
a6e8b829d01a7c9fa34223e096f6389c81f2085c
|
[
"MIT"
] | null | null | null |
ex065.py
|
isabellahenriques/Python_Estudos
|
a6e8b829d01a7c9fa34223e096f6389c81f2085c
|
[
"MIT"
] | null | null | null |
'''Crie um programa que leia vários números inteiros pelo teclado.
No final da execução, mostre a média entre todos os valores e
qual foi o maior e o menor valores lidos.
O programa deve perguntar ao usuário se ele quer ou não continuar a digitar valores.'''
resp = "S"
soma = quant = media = maior = menor = 0
while resp in "Ss":
numero = int(input("Digite um número: "))
soma = soma + numero
quant = quant + 1
if quant == 1:
maior = menor = numero
else:
if numero > maior:
maior = numero
if numero < menor:
menor = numero
resp = str(input("Quer continuar? [S/N] ")).upper().strip()[0]
media = soma / quant
print("Você digitou {} números e a média foi {}".format(quant,media))
print("O maior foi {} e o menor foi {}".format(maior,menor))
| 36.863636
| 87
| 0.636252
|
acfdb70e5a5b6f2e0381705fbba3b362f49ee0b3
| 31,497
|
py
|
Python
|
swift/container/sync.py
|
JMD110/swift
|
58ddca8fa5ccb99447f7dcc0745cc619449a5513
|
[
"Apache-2.0"
] | 1
|
2022-03-07T06:11:06.000Z
|
2022-03-07T06:11:06.000Z
|
swift/container/sync.py
|
JMD110/swift
|
58ddca8fa5ccb99447f7dcc0745cc619449a5513
|
[
"Apache-2.0"
] | null | null | null |
swift/container/sync.py
|
JMD110/swift
|
58ddca8fa5ccb99447f7dcc0745cc619449a5513
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import errno
import os
import uuid
from swift import gettext_ as _
from time import ctime, time
from random import choice, random
from struct import unpack_from
from eventlet import sleep, Timeout
from six.moves.urllib.parse import urlparse
import swift.common.db
from swift.common.db import DatabaseConnectionError
from swift.container.backend import ContainerBroker
from swift.container.sync_store import ContainerSyncStore
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.internal_client import (
delete_object, put_object, head_object,
InternalClient, UnexpectedResponse)
from swift.common.exceptions import ClientException
from swift.common.ring import Ring
from swift.common.ring.utils import is_local_device
from swift.common.swob import normalize_etag
from swift.common.utils import (
clean_content_type, config_true_value,
FileLikeIter, get_logger, hash_path, quote, validate_sync_to,
whataremyips, Timestamp, decode_timestamps)
from swift.common.daemon import Daemon
from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT
from swift.common.wsgi import ConfigString
from swift.common.middleware.versioned_writes.object_versioning import (
SYSMETA_VERSIONS_CONT, SYSMETA_VERSIONS_SYMLINK)
# The default internal client config body is to support upgrades without
# requiring deployment of the new /etc/swift/internal-client.conf
ic_conf_body = """
[DEFAULT]
# swift_dir = /etc/swift
# user = swift
# You can specify default log routing here if you want:
# log_name = swift
# log_facility = LOG_LOCAL0
# log_level = INFO
# log_address = /dev/log
#
# comma separated list of functions to call to setup custom log handlers.
# functions get passed: conf, name, log_to_console, log_route, fmt, logger,
# adapted_logger
# log_custom_handlers =
#
# If set, log_udp_host will override log_address
# log_udp_host =
# log_udp_port = 514
#
# You can enable StatsD logging here:
# log_statsd_host =
# log_statsd_port = 8125
# log_statsd_default_sample_rate = 1.0
# log_statsd_sample_rate_factor = 1.0
# log_statsd_metric_prefix =
[pipeline:main]
pipeline = catch_errors proxy-logging cache symlink proxy-server
[app:proxy-server]
use = egg:swift#proxy
account_autocreate = true
# See proxy-server.conf-sample for options
[filter:symlink]
use = egg:swift#symlink
# See proxy-server.conf-sample for options
[filter:cache]
use = egg:swift#memcache
# See proxy-server.conf-sample for options
[filter:proxy-logging]
use = egg:swift#proxy_logging
[filter:catch_errors]
use = egg:swift#catch_errors
# See proxy-server.conf-sample for options
""".lstrip()
class ContainerSync(Daemon):
"""
Daemon to sync syncable containers.
This is done by scanning the local devices for container databases and
checking for x-container-sync-to and x-container-sync-key metadata values.
If they exist, newer rows since the last sync will trigger PUTs or DELETEs
to the other container.
The actual syncing is slightly more complicated to make use of the three
(or number-of-replicas) main nodes for a container without each trying to
do the exact same work but also without missing work if one node happens to
be down.
Two sync points are kept per container database. All rows between the two
sync points trigger updates. Any rows newer than both sync points cause
updates depending on the node's position for the container (primary nodes
do one third, etc. depending on the replica count of course). After a sync
run, the first sync point is set to the newest ROWID known and the second
sync point is set to newest ROWID for which all updates have been sent.
An example may help. Assume replica count is 3 and perfectly matching
ROWIDs starting at 1.
First sync run, database has 6 rows:
* SyncPoint1 starts as -1.
* SyncPoint2 starts as -1.
* No rows between points, so no "all updates" rows.
* Six rows newer than SyncPoint1, so a third of the rows are sent
by node 1, another third by node 2, remaining third by node 3.
* SyncPoint1 is set as 6 (the newest ROWID known).
* SyncPoint2 is left as -1 since no "all updates" rows were synced.
Next sync run, database has 12 rows:
* SyncPoint1 starts as 6.
* SyncPoint2 starts as -1.
* The rows between -1 and 6 all trigger updates (most of which
should short-circuit on the remote end as having already been
done).
* Six more rows newer than SyncPoint1, so a third of the rows are
sent by node 1, another third by node 2, remaining third by node
3.
* SyncPoint1 is set as 12 (the newest ROWID known).
* SyncPoint2 is set as 6 (the newest "all updates" ROWID).
In this way, under normal circumstances each node sends its share of
updates each run and just sends a batch of older updates to ensure nothing
was missed.
:param conf: The dict of configuration values from the [container-sync]
section of the container-server.conf
:param container_ring: If None, the <swift_dir>/container.ring.gz will be
loaded. This is overridden by unit tests.
"""
def __init__(self, conf, container_ring=None, logger=None):
#: The dict of configuration values from the [container-sync] section
#: of the container-server.conf.
self.conf = conf
#: Logger to use for container-sync log lines.
self.logger = logger or get_logger(conf, log_route='container-sync')
#: Path to the local device mount points.
self.devices = conf.get('devices', '/srv/node')
#: Indicates whether mount points should be verified as actual mount
#: points (normally true, false for tests and SAIO).
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
#: Minimum time between full scans. This is to keep the daemon from
#: running wild on near empty systems.
self.interval = int(conf.get('interval', 300))
#: Maximum amount of time to spend syncing a container before moving on
#: to the next one. If a container sync hasn't finished in this time,
#: it'll just be resumed next scan.
self.container_time = int(conf.get('container_time', 60))
#: ContainerSyncCluster instance for validating sync-to values.
self.realms_conf = ContainerSyncRealms(
os.path.join(
conf.get('swift_dir', '/etc/swift'),
'container-sync-realms.conf'),
self.logger)
#: The list of hosts we're allowed to send syncs to. This can be
#: overridden by data in self.realms_conf
self.allowed_sync_hosts = [
h.strip()
for h in conf.get('allowed_sync_hosts', '127.0.0.1').split(',')
if h.strip()]
self.http_proxies = [
a.strip()
for a in conf.get('sync_proxy', '').split(',')
if a.strip()]
#: ContainerSyncStore instance for iterating over synced containers
self.sync_store = ContainerSyncStore(self.devices,
self.logger,
self.mount_check)
#: Number of containers with sync turned on that were successfully
#: synced.
self.container_syncs = 0
#: Number of successful DELETEs triggered.
self.container_deletes = 0
#: Number of successful PUTs triggered.
self.container_puts = 0
#: Number of containers whose sync has been turned off, but
#: are not yet cleared from the sync store.
self.container_skips = 0
#: Number of containers that had a failure of some type.
self.container_failures = 0
#: Per container stats. These are collected per container.
#: puts - the number of puts that were done for the container
#: deletes - the number of deletes that were fot the container
#: bytes - the total number of bytes transferred per the container
self.container_stats = collections.defaultdict(int)
self.container_stats.clear()
#: Time of last stats report.
self.reported = time()
self.swift_dir = conf.get('swift_dir', '/etc/swift')
#: swift.common.ring.Ring for locating containers.
self.container_ring = container_ring or Ring(self.swift_dir,
ring_name='container')
bind_ip = conf.get('bind_ip', '0.0.0.0')
self._myips = whataremyips(bind_ip)
self._myport = int(conf.get('bind_port', 6201))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self.conn_timeout = float(conf.get('conn_timeout', 5))
request_tries = int(conf.get('request_tries') or 3)
internal_client_conf_path = conf.get('internal_client_conf_path')
if not internal_client_conf_path:
self.logger.warning(
_('Configuration option internal_client_conf_path not '
'defined. Using default configuration, See '
'internal-client.conf-sample for options'))
internal_client_conf = ConfigString(ic_conf_body)
else:
internal_client_conf = internal_client_conf_path
try:
self.swift = InternalClient(
internal_client_conf, 'Swift Container Sync', request_tries)
except (OSError, IOError) as err:
if err.errno != errno.ENOENT and \
not str(err).endswith(' not found'):
raise
raise SystemExit(
_('Unable to load internal client from config: '
'%(conf)r (%(error)s)')
% {'conf': internal_client_conf_path, 'error': err})
def run_forever(self, *args, **kwargs):
"""
Runs container sync scans until stopped.
"""
sleep(random() * self.interval)
while True:
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_stats.clear()
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
elapsed = time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""
Runs a single container sync scan.
"""
self.logger.info(_('Begin container sync "once" mode'))
begin = time()
for path in self.sync_store.synced_containers_generator():
self.container_sync(path)
if time() - self.reported >= 3600: # once an hour
self.report()
self.report()
elapsed = time() - begin
self.logger.info(
_('Container sync "once" mode completed: %.02fs'), elapsed)
def report(self):
"""
Writes a report of the stats to the logger and resets the stats for the
next report.
"""
self.logger.info(
_('Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s '
'puts], %(skip)s skipped, %(fail)s failed'),
{'time': ctime(self.reported),
'sync': self.container_syncs,
'delete': self.container_deletes,
'put': self.container_puts,
'skip': self.container_skips,
'fail': self.container_failures})
self.reported = time()
self.container_syncs = 0
self.container_deletes = 0
self.container_puts = 0
self.container_skips = 0
self.container_failures = 0
def container_report(self, start, end, sync_point1, sync_point2, info,
max_row):
self.logger.info(_('Container sync report: %(container)s, '
'time window start: %(start)s, '
'time window end: %(end)s, '
'puts: %(puts)s, '
'posts: %(posts)s, '
'deletes: %(deletes)s, '
'bytes: %(bytes)s, '
'sync_point1: %(point1)s, '
'sync_point2: %(point2)s, '
'total_rows: %(total)s'),
{'container': '%s/%s' % (info['account'],
info['container']),
'start': start,
'end': end,
'puts': self.container_stats['puts'],
'posts': 0,
'deletes': self.container_stats['deletes'],
'bytes': self.container_stats['bytes'],
'point1': sync_point1,
'point2': sync_point2,
'total': max_row})
def container_sync(self, path):
"""
Checks the given path for a container database, determines if syncing
is turned on for that database and, if so, sends any updates to the
other container.
:param path: the path to a container db
"""
broker = None
try:
broker = ContainerBroker(path, logger=self.logger)
# The path we pass to the ContainerBroker is a real path of
# a container DB. If we get here, however, it means that this
# path is linked from the sync_containers dir. In rare cases
# of race or processes failures the link can be stale and
# the get_info below will raise a DB doesn't exist exception
# In this case we remove the stale link and raise an error
# since in most cases the db should be there.
try:
info = broker.get_info()
except DatabaseConnectionError as db_err:
if str(db_err).endswith("DB doesn't exist"):
self.sync_store.remove_synced_container(broker)
raise
x, nodes = self.container_ring.get_nodes(info['account'],
info['container'])
for ordinal, node in enumerate(nodes):
if is_local_device(self._myips, self._myport,
node['ip'], node['port']):
break
else:
return
if broker.metadata.get(SYSMETA_VERSIONS_CONT):
self.container_skips += 1
self.logger.increment('skips')
self.logger.warning('Skipping container %s/%s with '
'object versioning configured' % (
info['account'], info['container']))
return
if not broker.is_deleted():
sync_to = None
user_key = None
sync_point1 = info['x_container_sync_point1']
sync_point2 = info['x_container_sync_point2']
for key, (value, timestamp) in broker.metadata.items():
if key.lower() == 'x-container-sync-to':
sync_to = value
elif key.lower() == 'x-container-sync-key':
user_key = value
if not sync_to or not user_key:
self.container_skips += 1
self.logger.increment('skips')
return
err, sync_to, realm, realm_key = validate_sync_to(
sync_to, self.allowed_sync_hosts, self.realms_conf)
if err:
self.logger.info(
_('ERROR %(db_file)s: %(validate_sync_to_err)s'),
{'db_file': str(broker),
'validate_sync_to_err': err})
self.container_failures += 1
self.logger.increment('failures')
return
start_at = time()
stop_at = start_at + self.container_time
next_sync_point = None
sync_stage_time = start_at
try:
while time() < stop_at and sync_point2 < sync_point1:
rows = broker.get_items_since(sync_point2, 1)
if not rows:
break
row = rows[0]
if row['ROWID'] > sync_point1:
break
# This node will only initially sync out one third
# of the objects (if 3 replicas, 1/4 if 4, etc.)
# and will skip problematic rows as needed in case of
# faults.
# This section will attempt to sync previously skipped
# rows in case the previous attempts by any of the
# nodes didn't succeed.
if not self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key):
if not next_sync_point:
next_sync_point = sync_point2
sync_point2 = row['ROWID']
broker.set_x_container_sync_points(None, sync_point2)
if next_sync_point:
broker.set_x_container_sync_points(None,
next_sync_point)
else:
next_sync_point = sync_point2
sync_stage_time = time()
while sync_stage_time < stop_at:
rows = broker.get_items_since(sync_point1, 1)
if not rows:
break
row = rows[0]
key = hash_path(info['account'], info['container'],
row['name'], raw_digest=True)
# This node will only initially sync out one third of
# the objects (if 3 replicas, 1/4 if 4, etc.).
# It'll come back around to the section above
# and attempt to sync previously skipped rows in case
# the other nodes didn't succeed or in case it failed
# to do so the first time.
if unpack_from('>I', key)[0] % \
len(nodes) == ordinal:
self.container_sync_row(
row, sync_to, user_key, broker, info, realm,
realm_key)
sync_point1 = row['ROWID']
broker.set_x_container_sync_points(sync_point1, None)
sync_stage_time = time()
self.container_syncs += 1
self.logger.increment('syncs')
finally:
self.container_report(start_at, sync_stage_time,
sync_point1,
next_sync_point,
info, broker.get_max_row())
except (Exception, Timeout):
self.container_failures += 1
self.logger.increment('failures')
self.logger.exception(_('ERROR Syncing %s'),
broker if broker else path)
def _update_sync_to_headers(self, name, sync_to, user_key,
realm, realm_key, method, headers):
"""
Updates container sync headers
:param name: The name of the object
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param method: HTTP method to create sig with
:param headers: headers to update with container sync headers
"""
if realm and realm_key:
nonce = uuid.uuid4().hex
path = urlparse(sync_to).path + '/' + quote(name)
sig = self.realms_conf.get_sig(method, path,
headers.get('x-timestamp', 0),
nonce, realm_key,
user_key)
headers['x-container-sync-auth'] = '%s %s %s' % (realm,
nonce,
sig)
else:
headers['x-container-sync-key'] = user_key
def _object_in_remote_container(self, name, sync_to, user_key,
realm, realm_key, timestamp):
"""
Performs head object on remote to eliminate extra remote put and
local get object calls
:param name: The name of the object in the updated row in the local
database triggering the sync update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:param timestamp: last modified date of local object
:returns: True if object already exists in remote
"""
headers = {'x-timestamp': timestamp.internal}
self._update_sync_to_headers(name, sync_to, user_key, realm,
realm_key, 'HEAD', headers)
try:
metadata, _ = head_object(sync_to, name=name,
headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
retries=0)
remote_ts = Timestamp(metadata.get('x-timestamp', 0))
self.logger.debug("remote obj timestamp %s local obj %s" %
(timestamp.internal, remote_ts.internal))
if timestamp <= remote_ts:
return True
# Object in remote should be updated
return False
except ClientException as http_err:
# Object not in remote
if http_err.http_status == 404:
return False
raise http_err
def container_sync_row(self, row, sync_to, user_key, broker, info,
realm, realm_key):
"""
Sends the update the row indicates to the sync_to container.
Update can be either delete or put.
:param row: The updated row in the local database triggering the sync
update.
:param sync_to: The URL to the remote container.
:param user_key: The X-Container-Sync-Key to use when sending requests
to the other container.
:param broker: The local container database broker.
:param info: The get_info result from the local container database
broker.
:param realm: The realm from self.realms_conf, if there is one.
If None, fallback to using the older allowed_sync_hosts
way of syncing.
:param realm_key: The realm key from self.realms_conf, if there
is one. If None, fallback to using the older
allowed_sync_hosts way of syncing.
:returns: True on success
"""
try:
start_time = time()
# extract last modified time from the created_at value
ts_data, ts_ctype, ts_meta = decode_timestamps(
row['created_at'])
if row['deleted']:
# when sync'ing a deleted object, use ts_data - this is the
# timestamp of the source tombstone
try:
headers = {'x-timestamp': ts_data.internal}
self._update_sync_to_headers(row['name'], sync_to,
user_key, realm, realm_key,
'DELETE', headers)
delete_object(sync_to, name=row['name'], headers=headers,
proxy=self.select_http_proxy(),
logger=self.logger,
timeout=self.conn_timeout)
except ClientException as err:
if err.http_status not in (
HTTP_NOT_FOUND, HTTP_CONFLICT):
raise
self.container_deletes += 1
self.container_stats['deletes'] += 1
self.logger.increment('deletes')
self.logger.timing_since('deletes.timing', start_time)
else:
# when sync'ing a live object, use ts_meta - this is the time
# at which the source object was last modified by a PUT or POST
if self._object_in_remote_container(row['name'],
sync_to, user_key, realm,
realm_key, ts_meta):
return True
exc = None
# look up for the newest one; the symlink=get query-string has
# no effect unless symlinks are enabled in the internal client
# in which case it ensures that symlink objects retain their
# symlink property when sync'd.
headers_out = {'X-Newest': True,
'X-Backend-Storage-Policy-Index':
str(info['storage_policy_index'])}
try:
source_obj_status, headers, body = \
self.swift.get_object(info['account'],
info['container'], row['name'],
headers=headers_out,
acceptable_statuses=(2, 4),
params={'symlink': 'get'})
except (Exception, UnexpectedResponse, Timeout) as err:
headers = {}
body = None
exc = err
# skip object_versioning links; this is in case the container
# metadata is out of date
if headers.get(SYSMETA_VERSIONS_SYMLINK):
self.logger.info(
'Skipping versioning symlink %s/%s/%s ' % (
info['account'], info['container'],
row['name']))
return True
timestamp = Timestamp(headers.get('x-timestamp', 0))
if timestamp < ts_meta:
if exc:
raise exc
raise Exception(
_('Unknown exception trying to GET: '
'%(account)r %(container)r %(object)r'),
{'account': info['account'],
'container': info['container'],
'object': row['name']})
for key in ('date', 'last-modified'):
if key in headers:
del headers[key]
if 'etag' in headers:
headers['etag'] = normalize_etag(headers['etag'])
if 'content-type' in headers:
headers['content-type'] = clean_content_type(
headers['content-type'])
self._update_sync_to_headers(row['name'], sync_to, user_key,
realm, realm_key, 'PUT', headers)
put_object(sync_to, name=row['name'], headers=headers,
contents=FileLikeIter(body),
proxy=self.select_http_proxy(), logger=self.logger,
timeout=self.conn_timeout)
self.container_puts += 1
self.container_stats['puts'] += 1
self.container_stats['bytes'] += row['size']
self.logger.increment('puts')
self.logger.timing_since('puts.timing', start_time)
except ClientException as err:
if err.http_status == HTTP_UNAUTHORIZED:
self.logger.info(
_('Unauth %(sync_from)r => %(sync_to)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to})
elif err.http_status == HTTP_NOT_FOUND:
self.logger.info(
_('Not found %(sync_from)r => %(sync_to)r \
- object %(obj_name)r'),
{'sync_from': '%s/%s' %
(quote(info['account']), quote(info['container'])),
'sync_to': sync_to, 'obj_name': row['name']})
else:
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
except (Exception, Timeout):
self.logger.exception(
_('ERROR Syncing %(db_file)s %(row)s'),
{'db_file': str(broker), 'row': row})
self.container_failures += 1
self.logger.increment('failures')
return False
return True
def select_http_proxy(self):
return choice(self.http_proxies) if self.http_proxies else None
| 46.455752
| 79
| 0.546623
|
acfdb97758e08f0566c58cfb1794553570ef0a54
| 3,624
|
py
|
Python
|
python/test/lib/util_test.py
|
andreatulimiero/scion
|
80446907061356863c03db7ec8b9b3b41944c01e
|
[
"Apache-2.0"
] | 1
|
2021-05-27T12:40:48.000Z
|
2021-05-27T12:40:48.000Z
|
python/test/lib/util_test.py
|
andreatulimiero/scion
|
80446907061356863c03db7ec8b9b3b41944c01e
|
[
"Apache-2.0"
] | 1
|
2019-06-26T06:38:40.000Z
|
2019-06-26T06:38:40.000Z
|
python/test/lib/util_test.py
|
andreatulimiero/scion
|
80446907061356863c03db7ec8b9b3b41944c01e
|
[
"Apache-2.0"
] | 1
|
2020-07-06T02:50:04.000Z
|
2020-07-06T02:50:04.000Z
|
# Copyright 2015 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`lib_util_test` --- lib.util unit tests
=====================================================
"""
# Stdlib
import builtins
from unittest.mock import patch, mock_open
# External packages
import nose
import nose.tools as ntools
import yaml
# SCION
from python.lib.errors import (
SCIONIOError,
SCIONYAMLError,
)
from python.lib.util import (
load_yaml_file,
write_file,
)
class TestWriteFile(object):
"""
Unit tests for lib.util.write_file
"""
@patch("lib.util.os.rename", autospec=True)
@patch.object(builtins, 'open', mock_open())
@patch("lib.util.os.makedirs", autospec=True)
@patch("lib.util.os.path.dirname", autospec=True)
def test_basic(self, dirname, makedirs, rename):
dirname.return_value = "Dir_Name"
# Call
write_file("File_Path", "Text")
# Tests
dirname.assert_called_once_with("File_Path")
makedirs.assert_called_once_with("Dir_Name", exist_ok=True)
builtins.open.assert_called_once_with("File_Path.new", 'w')
builtins.open.return_value.write.assert_called_once_with("Text")
rename.assert_called_once_with("File_Path.new", "File_Path")
@patch("lib.util.os.makedirs", autospec=True)
def test_mkdir_error(self, mkdir):
mkdir.side_effect = FileNotFoundError
# Call
ntools.assert_raises(SCIONIOError, write_file, "File_Path", "Text")
@patch.object(builtins, 'open', mock_open())
@patch("lib.util.os.makedirs", autospec=True)
def test_file_error(self, mkdir):
builtins.open.side_effect = PermissionError
# Call
ntools.assert_raises(SCIONIOError, write_file, "File_Path", "Text")
@patch("lib.util.os.rename", autospec=True)
@patch.object(builtins, 'open', mock_open())
@patch("lib.util.os.makedirs", autospec=True)
def test_rename_error(self, mkdir, rename):
rename.side_effect = PermissionError
# Call
ntools.assert_raises(SCIONIOError, write_file, "File_Path", "Text")
class Loader(object):
"""
Helper class for load_yaml_file tests.
"""
@patch.object(builtins, 'open', mock_open())
def _file_error(self, target):
builtins.open.side_effect = IsADirectoryError
ntools.assert_raises(SCIONIOError, target, "File_Path")
@patch.object(builtins, 'open', mock_open())
def _check_loader_error(self, target, loader_path, excp, expected):
with patch(loader_path, autospec=True) as loader:
loader.side_effect = excp
ntools.assert_raises(expected, target, "File_Path")
class TestLoadYAMLFile(Loader):
"""
Unit tests for lib.util.load_yaml_file
"""
def test_file_error(self):
self._file_error(load_yaml_file)
def test_json_error(self):
for excp in (yaml.scanner.ScannerError, ):
yield (
self._check_loader_error, load_yaml_file, "lib.util.yaml.load",
excp, SCIONYAMLError,
)
if __name__ == "__main__":
nose.run(defaultTest=__name__)
| 32.357143
| 79
| 0.675497
|
acfdb97dd009179b73a4e960e2fef7f44691c52d
| 2,726
|
py
|
Python
|
CUB-experiments/utils.py
|
ashleylqx/AIB
|
77e418cac52f0ca5f2a7c54927468a7bd75a8fc9
|
[
"MIT"
] | 5
|
2021-05-23T13:05:45.000Z
|
2022-02-13T21:40:59.000Z
|
CUB-experiments/utils.py
|
ashleylqx/AIB
|
77e418cac52f0ca5f2a7c54927468a7bd75a8fc9
|
[
"MIT"
] | null | null | null |
CUB-experiments/utils.py
|
ashleylqx/AIB
|
77e418cac52f0ca5f2a7c54927468a7bd75a8fc9
|
[
"MIT"
] | 3
|
2021-08-11T03:23:31.000Z
|
2021-11-17T01:48:52.000Z
|
import torch
from torch import nn
from torch.autograd import Variable
import cv2
import numpy as np
from config import *
def str2bool(v):
"""
codes from : https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
"""
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def cuda(tensor, is_cuda):
if is_cuda : return tensor.cuda()
else : return tensor
class Weight_EMA_Update(object):
def __init__(self, model, initial_state_dict, decay=0.999):
self.model = model
self.model.load_state_dict(initial_state_dict, strict=True)
self.decay = decay
def update(self, new_state_dict):
state_dict = self.model.state_dict()
for key in state_dict.keys():
state_dict[key] = (self.decay)*state_dict[key] + (1-self.decay)*new_state_dict[key]
#state_dict[key] = (1-self.decay)*state_dict[key] + (self.decay)*new_state_dict[key]
self.model.load_state_dict(state_dict)
def postprocess_prediction(prediction, size=None):
"""
Postprocess saliency maps by resizing and applying gaussian blurringself.
args:
prediction: numpy array with saliency postprocess_prediction
size: original (H,W) of the image
returns:
numpy array with saliency map normalized 0-255 (int8)
"""
print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
prediction = prediction - np.min(prediction)
# prediction = prediction - np.mean(prediction)
# prediction[prediction<0] = 0
# print('max %.4f min %.4f'%(np.max(prediction), np.min(prediction))) # l1 norm is much larger than l2? but maps are similar
if np.max(prediction) != 0:
saliency_map = (prediction/np.max(prediction) * 255).astype(np.uint8)
else:
saliency_map = prediction.astype(np.uint8)
if size is None:
size = MNIST_RESIZE
# resize back to original size
saliency_map = cv2.GaussianBlur(saliency_map, (7, 7), 0)
saliency_map = cv2.resize(saliency_map, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
# saliency_map = cv2.resize(saliency_map, (size[1], size[0]), interpolation=cv2.INTER_CUBIC)
# saliency_map = cv2.GaussianBlur(saliency_map, (7, 7), 0)
# clip again
# saliency_map = np.clip(saliency_map, 0, 255)
if np.max(saliency_map)!=0:
saliency_map = saliency_map.astype('float') / np.max(saliency_map) * 255.
else:
print('Zero saliency map.')
return saliency_map
| 32.070588
| 128
| 0.66361
|
acfdb9a748b2454779e4007a096bb260a412477c
| 3,292
|
py
|
Python
|
mysite/mysite/settings.py
|
tailongnguyen/cryptopokemon
|
801e977d50cf2d18dfb592f6ece91ee6f7eec83e
|
[
"MIT"
] | 4
|
2018-04-20T08:17:16.000Z
|
2019-01-02T04:54:36.000Z
|
mysite/mysite/settings.py
|
tailongnguyen/ethereum-auction-blockchain
|
801e977d50cf2d18dfb592f6ece91ee6f7eec83e
|
[
"MIT"
] | null | null | null |
mysite/mysite/settings.py
|
tailongnguyen/ethereum-auction-blockchain
|
801e977d50cf2d18dfb592f6ece91ee6f7eec83e
|
[
"MIT"
] | null | null | null |
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'n!$@^200fe3w#x#&a36=7f**o$nqvy9k170*d8p#+&ha9+c%ql'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'cryptopokemon'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Bangkok'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = "/home/tailongnguyen/Study/Technology Workshops/cryptopokemon/mysite/media/"
| 26.336
| 91
| 0.69593
|
acfdb9bc2b1fc3469e6aa6bacca40d92bae5f15e
| 1,809
|
py
|
Python
|
tests/quick/se/01.hello-2T-smt/test.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 135
|
2016-10-21T03:31:49.000Z
|
2022-03-25T01:22:20.000Z
|
tests/quick/se/01.hello-2T-smt/test.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 35
|
2017-03-10T17:57:46.000Z
|
2022-02-18T17:34:16.000Z
|
tests/quick/se/01.hello-2T-smt/test.py
|
mandaltj/gem5_chips
|
b9c0c602241ffda7851c1afb32fa01f295bb98fd
|
[
"BSD-3-Clause"
] | 48
|
2016-12-08T12:03:13.000Z
|
2022-02-16T09:16:13.000Z
|
# Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Korey Sewell
process1 = Process(cmd = 'hello', executable = binpath('hello'), pid = 100)
process2 = Process(cmd = 'hello', executable = binpath('hello'),
pid = 101, ppid = 100)
root.system.cpu[0].workload = [process1, process2]
| 53.205882
| 75
| 0.772803
|
acfdba0f37d364daafb1a6ac1fcac3894390409e
| 305
|
py
|
Python
|
parameter.py
|
cihan53/CRNN-Keras
|
7a7c099ab530f009f1bdbf6a844f24750ef2c7ab
|
[
"MIT"
] | null | null | null |
parameter.py
|
cihan53/CRNN-Keras
|
7a7c099ab530f009f1bdbf6a844f24750ef2c7ab
|
[
"MIT"
] | null | null | null |
parameter.py
|
cihan53/CRNN-Keras
|
7a7c099ab530f009f1bdbf6a844f24750ef2c7ab
|
[
"MIT"
] | null | null | null |
CHAR_VECTOR = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789 "
letters = [letter for letter in CHAR_VECTOR]
num_classes = len(letters) + 1
# img_w, img_h = 128, 64
img_w, img_h = 320, 320
# Network parameters
batch_size = 30
val_batch_size = 16
downsample_factor = 4
max_text_len = 8
| 20.333333
| 79
| 0.777049
|
acfdbaea720a47a8546224ba089f4a6f9843712d
| 798
|
py
|
Python
|
tests/accounts/test_admin.py
|
pennlabs/platform-dev
|
062fa8b23f78f51b8ec5201507ca35dbc5a1f567
|
[
"MIT"
] | 1
|
2020-02-20T09:06:43.000Z
|
2020-02-20T09:06:43.000Z
|
tests/accounts/test_admin.py
|
pennlabs/platform-dev
|
062fa8b23f78f51b8ec5201507ca35dbc5a1f567
|
[
"MIT"
] | 11
|
2020-03-08T22:20:21.000Z
|
2021-09-22T18:39:57.000Z
|
tests/accounts/test_admin.py
|
pennlabs/platform-dev
|
062fa8b23f78f51b8ec5201507ca35dbc5a1f567
|
[
"MIT"
] | null | null | null |
from django.contrib.admin.sites import AdminSite
from django.test import TestCase
from accounts.admin import StudentAdmin
from accounts.models import Student, User
class StudentAdminTestCase(TestCase):
def setUp(self):
self.user = User.objects.create(
pennid=1, username="user", first_name="First", last_name="Last"
)
self.student = Student.objects.create(user=self.user)
self.sa = StudentAdmin(Student, AdminSite())
def test_username(self):
self.assertEqual(self.sa.username(self.student), self.user.username)
def test_first_name(self):
self.assertEqual(self.sa.first_name(self.student), self.user.first_name)
def test_last_name(self):
self.assertEqual(self.sa.last_name(self.student), self.user.last_name)
| 33.25
| 80
| 0.715539
|
acfdbb6511ee133d0a582d458e133dd49c74a54f
| 4,161
|
py
|
Python
|
src/ansiblelint/config.py
|
JensHeinrich/ansible-lint
|
532918d5b0d285ba0c47eb4f14be940b5d465d5a
|
[
"MIT"
] | null | null | null |
src/ansiblelint/config.py
|
JensHeinrich/ansible-lint
|
532918d5b0d285ba0c47eb4f14be940b5d465d5a
|
[
"MIT"
] | null | null | null |
src/ansiblelint/config.py
|
JensHeinrich/ansible-lint
|
532918d5b0d285ba0c47eb4f14be940b5d465d5a
|
[
"MIT"
] | null | null | null |
"""Store configuration options as a singleton."""
import os
import re
import subprocess
import sys
from argparse import Namespace
from functools import lru_cache
from typing import Dict, List, Optional, Tuple
from packaging.version import Version
from ansiblelint.constants import ANSIBLE_MISSING_RC
DEFAULT_KINDS = [
# Do not sort this list, order matters.
{"requirements": "requirements.yml"}, # v2 and v1
{"requirements": "**/meta/requirements.yml"}, # v1 only
{"reno": "releasenotes/*/*.{yaml,yml}"}, # reno release notes
{"playbook": "**/playbooks/*.{yml,yaml}"},
{"playbook": "**/*playbook*.{yml,yaml}"},
{"role": "**/roles/*/"},
{"tasks": "**/tasks/**/*.{yaml,yml}"},
{"handlers": "**/handlers/*.{yaml,yml}"},
{"vars": "**/{host_vars,group_vars,vars,defaults}/**/*.{yaml,yml}"},
{"meta": "**/meta/main.{yaml,yml}"},
{"yaml": ".config/molecule/config.{yaml,yml}"}, # molecule global config
{
"requirements": "**/molecule/*/{collections,requirements}.{yaml,yml}"
}, # molecule old collection requirements (v1), ansible 2.8 only
{"yaml": "**/molecule/*/{base,molecule}.{yaml,yml}"}, # molecule config
{"playbook": "**/molecule/*/*.{yaml,yml}"}, # molecule playbooks
{"yaml": "**/*.{yaml,yml}"},
{"yaml": "**/.*.{yaml,yml}"},
]
options = Namespace(
colored=True,
cwd=".",
display_relative_path=True,
exclude_paths=[],
lintables=[],
listrules=False,
listtags=False,
parseable=False,
parseable_severity=False,
quiet=False,
rulesdirs=[],
skip_list=[],
tags=[],
verbosity=False,
warn_list=[],
kinds=DEFAULT_KINDS,
mock_modules=[],
mock_roles=[],
loop_var_prefix=None,
offline=False,
project_dir=None,
extra_vars=None,
skip_action_validation=True,
)
# Used to store detected tag deprecations
used_old_tags: Dict[str, str] = {}
# Used to store collection list paths (with mock paths if needed)
collection_list: List[str] = []
@lru_cache()
def ansible_collections_path() -> str:
"""Return collection path variable for current version of Ansible."""
# respect Ansible behavior, which is to load old name if present
for env_var in ["ANSIBLE_COLLECTIONS_PATHS", "ANSIBLE_COLLECTIONS_PATH"]:
if env_var in os.environ:
return env_var
# https://github.com/ansible/ansible/pull/70007
if ansible_version() >= ansible_version("2.10.0.dev0"):
return "ANSIBLE_COLLECTIONS_PATH"
return "ANSIBLE_COLLECTIONS_PATHS"
def parse_ansible_version(stdout: str) -> Tuple[str, Optional[str]]:
"""Parse output of 'ansible --version'."""
# ansible-core 2.11+: 'ansible [core 2.11.3]'
match = re.match(r"^ansible \[(?:core|base) ([^\]]+)\]", stdout)
if match:
return match.group(1), None
# ansible-base 2.10 and Ansible 2.9: 'ansible 2.x.y'
match = re.match(r"^ansible ([^\s]+)", stdout)
if match:
return match.group(1), None
return "", "FATAL: Unable parse ansible cli version: %s" % stdout
@lru_cache()
def ansible_version(version: str = "") -> Version:
"""Return current Version object for Ansible.
If version is not mentioned, it returns current version as detected.
When version argument is mentioned, it return converts the version string
to Version object in order to make it usable in comparisons.
"""
if not version:
proc = subprocess.run(
["ansible", "--version"],
universal_newlines=True,
check=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if proc.returncode == 0:
version, error = parse_ansible_version(proc.stdout)
if error is not None:
print(error)
sys.exit(ANSIBLE_MISSING_RC)
else:
print(
"Unable to find a working copy of ansible executable.",
proc,
)
sys.exit(ANSIBLE_MISSING_RC)
return Version(version)
if ansible_collections_path() in os.environ:
collection_list = os.environ[ansible_collections_path()].split(':')
| 32.507813
| 77
| 0.628455
|
acfdbc634d297fb5b32a92bc4c637a70004787ea
| 15,948
|
py
|
Python
|
peeringdb_server/signals.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
peeringdb_server/signals.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
peeringdb_server/signals.py
|
tbaschak/peeringdb
|
20d89d53d8e1d807383fa84d74601e37ba4dc9d4
|
[
"BSD-2-Clause"
] | null | null | null |
import django.urls
from django.db.models.signals import post_save, pre_delete, pre_save
from django.contrib.contenttypes.models import ContentType
from django_namespace_perms.models import Group, GroupPermission
from django_namespace_perms.constants import PERM_CRUD, PERM_READ
from django.template import loader
from django.conf import settings
from django.dispatch import receiver
from allauth.account.signals import user_signed_up
from corsheaders.signals import check_request_enabled
from django_peeringdb.models.abstract import AddressModel
from peeringdb_server.inet import RdapLookup, RdapNotFoundError, RdapException
from peeringdb_server.deskpro import (
ticket_queue,
ticket_queue_asnauto_affil,
ticket_queue_asnauto_create,
)
from peeringdb_server.models import (
QUEUE_ENABLED,
QUEUE_NOTIFY,
UserOrgAffiliationRequest,
is_suggested,
VerificationQueueItem,
Organization,
InternetExchange,
Facility,
Network,
NetworkContact,
)
import peeringdb_server.settings as pdb_settings
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import override
def addressmodel_save(sender, instance=None, **kwargs):
"""
Mark address model objects for geocode sync if one of the address
fields is updated
"""
if instance.id:
# instance is being updated
old = sender.objects.get(id=instance.id)
for field in AddressModel._meta.get_fields():
if field.name in ["latitude", "longitude"]:
continue
a = getattr(instance, field.name)
b = getattr(old, field.name)
if a != b:
# print("Change in field '%s' - '%s'(%s) to '%s'(%s) - marking %s for geocode sync" % (field.name, a, type(a), b, type(b), instance))
# address model field has changed, mark for geocode sync
instance.geocode_status = False
pre_save.connect(addressmodel_save, sender=Facility)
def org_save(sender, **kwargs):
"""
we want to create a user group for an organization when that
organization is created
"""
inst = kwargs.get("instance")
ix_namespace = InternetExchange.nsp_namespace_from_id(inst.id, "*")
# make the general member group for the org
try:
group = Group.objects.get(name=inst.group_name)
except Group.DoesNotExist:
group = Group(name=inst.group_name)
group.save()
perm = GroupPermission(
group=group, namespace=inst.nsp_namespace, permissions=PERM_READ
)
perm.save()
GroupPermission(
group=group,
namespace=NetworkContact.nsp_namespace_from_id(inst.id, "*", "private"),
permissions=PERM_READ,
).save()
GroupPermission(
group=group,
namespace=f"{ix_namespace}.ixf_ixp_member_list_url.private",
permissions=PERM_READ,
).save()
# make the admin group for the org
try:
group = Group.objects.get(name=inst.admin_group_name)
except Group.DoesNotExist:
group = Group(name=inst.admin_group_name)
group.save()
perm = GroupPermission(
group=group, namespace=inst.nsp_namespace, permissions=PERM_CRUD
)
perm.save()
GroupPermission(
group=group, namespace=inst.nsp_namespace_manage, permissions=PERM_CRUD
).save()
GroupPermission(
group=group,
namespace=NetworkContact.nsp_namespace_from_id(inst.id, "*", "private"),
permissions=PERM_CRUD,
).save()
GroupPermission(
group=group,
namespace=f"{ix_namespace}.ixf_ixp_member_list_url.private",
permissions=PERM_CRUD,
).save()
if inst.status == "deleted":
for ar in inst.affiliation_requests.all():
ar.delete()
post_save.connect(org_save, sender=Organization)
def org_delete(sender, instance, **kwargs):
"""
When an organization is HARD deleted we want to also remove any
usergroups tied to the organization
"""
try:
instance.usergroup.delete()
except Group.DoesNotExist:
pass
try:
instance.admin_usergroup.delete()
except Group.DoesNotExist:
pass
for ar in instance.affiliation_requests.all():
ar.delete()
pre_delete.connect(org_delete, sender=Organization)
@receiver(user_signed_up, dispatch_uid="allauth.user_signed_up")
def new_user_to_guests(request, user, sociallogin=None, **kwargs):
"""
When a user is created via oauth login put them in the guest
group for now.
Unless pdb_settings.AUTO_VERIFY_USERS is toggled on in settings, in which
case users get automatically verified (note that this does
not include email verification, they will still need to do that)
"""
if pdb_settings.AUTO_VERIFY_USERS:
user.set_verified()
else:
user.set_unverified()
# USER TO ORGANIZATION AFFILIATION
def uoar_creation(sender, instance, created=False, **kwargs):
"""
When a user to organization affiliation request is created
we want to notify the approporiate management entity
We also want to attempt to derive the targeted organization
from the ASN the user provided
"""
if created:
if instance.asn and not instance.org_id:
network = Network.objects.filter(asn=instance.asn).first()
if network:
# network with targeted asn found, set org
instance.org = network.org
instance.status = "pending"
instance.save()
if instance.org_id and instance.org.admin_usergroup.user_set.count() > 0:
# check that user is not already a member of that org
if instance.user.groups.filter(name=instance.org.usergroup.name).exists():
instance.approve()
return
# organization exists already and has admins, notify organization
# admins
for user in instance.org.admin_usergroup.user_set.all():
with override(user.locale):
user.email_user(
_(
"User %(u_name)s wishes to be affiliated to your Organization"
)
% {"u_name": instance.user.full_name},
loader.get_template(
"email/notify-org-admin-user-affil.txt"
).render(
{
"user": instance.user,
"org": instance.org,
"org_management_url": "%s/org/%d#users"
% (settings.BASE_URL, instance.org.id),
}
),
)
else:
request_type = "be affiliated to"
rdap_data = {"emails": []}
org_created = False
net_created = False
rdap_lookup = None
if instance.asn and not instance.org_id:
# ASN specified in request, but no network found
# Lookup RDAP information
try:
rdap_lookup = rdap = RdapLookup().get_asn(instance.asn)
ok = rdap_lookup.emails
except RdapException as inst:
instance.deny()
raise
# create organization
instance.org, org_created = Organization.create_from_rdap(
rdap, instance.asn, instance.org_name
)
instance.save()
# create network
net, net_created = Network.create_from_rdap(
rdap, instance.asn, instance.org
)
# if affiliate auto appove is on, auto approve at this point
if pdb_settings.AUTO_APPROVE_AFFILIATION:
instance.approve()
return
ticket_queue_asnauto_create(
instance.user,
instance.org,
net,
rdap,
net.asn,
org_created=org_created,
net_created=net_created,
)
# if user's relationship to network can be validated now
# we can approve the ownership request right away
if instance.user.validate_rdap_relationship(rdap):
instance.approve()
ticket_queue_asnauto_affil(instance.user, instance.org, net, rdap)
return
if instance.org:
# organization has been set on affiliation request
entity_name = instance.org.name
if not instance.org.owned:
# organization is currently not owned
request_type = "request ownership of"
# if affiliate auto appove is on, auto approve at this point
if pdb_settings.AUTO_APPROVE_AFFILIATION:
instance.approve()
return
# if user's relationship to the org can be validated by
# checking the rdap information of the org's networks
# we can approve the affiliation (ownership) request right away
for asn, rdap in list(instance.org.rdap_collect.items()):
rdap_data["emails"].extend(rdap.emails)
if instance.user.validate_rdap_relationship(rdap):
ticket_queue_asnauto_affil(
instance.user,
instance.org,
Network.objects.get(asn=asn),
rdap,
)
instance.approve()
return
else:
entity_name = instance.org_name
if pdb_settings.AUTO_APPROVE_AFFILIATION:
org = Organization.objects.create(
name=instance.org_name, status="ok"
)
instance.org = org
instance.approve()
return
# organization has no owners and RDAP information could not verify the user's relationship to the organization, notify pdb staff for review
ticket_queue(
"User %s wishes to %s %s"
% (instance.user.username, request_type, entity_name),
loader.get_template("email/notify-pdb-admin-user-affil.txt").render(
{
"user": instance.user,
"instance": instance,
"base_url": settings.BASE_URL,
"org_add_url": "%s%s"
% (
settings.BASE_URL,
django.urls.reverse(
"admin:peeringdb_server_organization_add"
),
),
"net_add_url": "%s%s"
% (
settings.BASE_URL,
django.urls.reverse("admin:peeringdb_server_network_add"),
),
"review_url": "%s%s"
% (
settings.BASE_URL,
django.urls.reverse(
"admin:peeringdb_server_user_change",
args=(instance.user.id,),
),
),
"approve_url": "%s%s"
% (
settings.BASE_URL,
django.urls.reverse(
"admin:peeringdb_server_userorgaffiliationrequest_actions",
args=(instance.id, "approve_and_notify"),
),
),
"emails": list(set(rdap_data["emails"])),
"rdap_lookup": rdap_lookup,
}
),
instance.user,
)
elif instance.status == "approved" and instance.org_id:
# uoar was not created, and status is now approved, call approve
# to finalize
instance.approve()
post_save.connect(uoar_creation, sender=UserOrgAffiliationRequest)
# VERIFICATION QUEUE
if getattr(settings, "DISABLE_VERIFICATION_QUEUE", False) is False:
def verification_queue_update(sender, instance, **kwargs):
if instance.status == "pending":
try:
VerificationQueueItem.objects.get(
content_type=ContentType.objects.get_for_model(sender),
object_id=instance.id,
)
except VerificationQueueItem.DoesNotExist:
q = VerificationQueueItem(item=instance)
q.save()
else:
try:
q = VerificationQueueItem.objects.get(
content_type=ContentType.objects.get_for_model(sender),
object_id=instance.id,
)
q.delete()
except VerificationQueueItem.DoesNotExist:
pass
def verification_queue_delete(sender, instance, **kwargs):
try:
q = VerificationQueueItem.objects.get(
content_type=ContentType.objects.get_for_model(sender),
object_id=instance.id,
)
q.delete()
except VerificationQueueItem.DoesNotExist:
pass
def verification_queue_notify(sender, instance, **kwargs):
# notification was already sent
if instance.notified:
return
# we dont sent notifications unless requesting user has been identified
if not instance.user_id:
return
item = instance.item
user = instance.user
if type(item) in QUEUE_NOTIFY and not getattr(
settings, "DISABLE_VERIFICATION_QUEUE_EMAILS", False
):
if type(item) == Network:
rdap = RdapLookup().get_asn(item.asn)
else:
rdap = None
title = f"{instance.content_type} - {item}"
if is_suggested(item):
title = f"[SUGGEST] {title}"
ticket_queue(
title,
loader.get_template("email/notify-pdb-admin-vq.txt").render(
{
"entity_type_name": str(instance.content_type),
"suggested": is_suggested(item),
"item": item,
"user": user,
"rdap": rdap,
"edit_url": "%s%s"
% (settings.BASE_URL, instance.item_admin_url),
}
),
instance.user,
)
instance.notified = True
instance.save()
post_save.connect(verification_queue_notify, sender=VerificationQueueItem)
for model in QUEUE_ENABLED:
post_save.connect(verification_queue_update, sender=model)
pre_delete.connect(verification_queue_delete, sender=model)
def cors_allow_api_get_to_everyone(sender, request, **kwargs):
# FIXME: path name to look for should come from config
return (
request.path == "/api" or request.path.startswith("/api/")
) and request.method in ["GET", "OPTIONS"]
check_request_enabled.connect(cors_allow_api_get_to_everyone)
| 34.820961
| 151
| 0.547091
|
acfdbcad0559d0fbe5f9354b117acf2da879ad80
| 9,048
|
py
|
Python
|
pylxd/deprecated/image.py
|
AdamIsrael/pylxd
|
d5d47a4d1185b4956e997d70e09d649ea73ba26b
|
[
"Apache-2.0"
] | null | null | null |
pylxd/deprecated/image.py
|
AdamIsrael/pylxd
|
d5d47a4d1185b4956e997d70e09d649ea73ba26b
|
[
"Apache-2.0"
] | 1
|
2018-04-21T16:31:29.000Z
|
2018-04-21T16:31:29.000Z
|
pylxd/deprecated/image.py
|
AdamIsrael/pylxd
|
d5d47a4d1185b4956e997d70e09d649ea73ba26b
|
[
"Apache-2.0"
] | 1
|
2021-08-16T15:00:35.000Z
|
2021-08-16T15:00:35.000Z
|
# Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import datetime
import json
from six.moves import urllib
from pylxd.deprecated import base
from pylxd.deprecated import connection
from pylxd.deprecated import exceptions
image_architecture = {
0: 'Unknown',
1: 'i686',
2: 'x86_64',
3: 'armv7l',
4: 'aarch64',
5: 'ppc',
6: 'ppc64',
7: 'ppc64le'
}
class LXDImage(base.LXDBase):
def __init__(self, conn=None):
self.connection = conn or connection.LXDConnection()
# list images
def image_list(self):
try:
(state, data) = self.connection.get_object('GET', '/1.0/images')
return [image.split('/1.0/images/')[-1]
for image in data['metadata']]
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def image_defined(self, image):
try:
(state, data) = self.connection.get_object('GET', '/1.0/images/%s'
% image)
except exceptions.APIError as ex:
if ex.status_code == 404:
return False
else:
raise
else:
return True
def image_list_by_key(self, params):
try:
(state, data) = self.connection.get_object(
'GET', '/1.0/images', urllib.parse.urlencode(params))
return [image.split('/1.0/images/')[-1]
for image in data['metadata']]
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
# image info
def image_info(self, image):
try:
(state, data) = self.connection.get_object('GET', '/1.0/images/%s'
% image)
image = {
'image_upload_date': self.get_image_date(image,
data.get('metadata'),
'uploaded_at'),
'image_created_date': self.get_image_date(image,
data.get('metadata'),
'created_at'),
'image_expires_date': self.get_image_date(image,
data.get('metadata'),
'expires_at'),
'image_public': self.get_image_permission(
image,
data.get('metadata')),
'image_size': '%sMB' % self.get_image_size(
image,
data.get('metadata')),
'image_fingerprint': self.get_image_fingerprint(
image,
data.get('metadata')),
'image_architecture': self.get_image_architecture(
image,
data.get('metadata')),
}
return image
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def get_image_date(self, image, data, key):
try:
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/images/%s' % image)
data = data.get('metadata')
if data[key] != 0:
return datetime.datetime.fromtimestamp(
data[key]).strftime('%Y-%m-%d %H:%M:%S')
else:
return 'Unknown'
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def get_image_permission(self, image, data):
try:
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/images/%s' % image)
data = data.get('metadata')
return True if data['public'] == 1 else False
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def get_image_size(self, image, data):
try:
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/images/%s' % image)
data = data.get('metadata')
image_size = data['size']
if image_size <= 0:
raise exceptions.ImageInvalidSize()
return image_size // 1024 ** 2
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def get_image_fingerprint(self, image, data):
try:
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/images/%s' % image)
data = data.get('metadata')
return data['fingerprint']
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
def get_image_architecture(self, image, data):
try:
if data is None:
(state, data) = self.connection.get_object(
'GET', '/1.0/images/%s' % image)
data = data.get('metadata')
return image_architecture[data['architecture']]
except Exception as e:
print("Unable to fetch image info - {}".format(e))
raise
# image operations
def image_upload(self, path=None, data=None, headers={}):
data = data or open(path, 'rb').read()
try:
return self.connection.get_object('POST', '/1.0/images',
data, headers)
except Exception as e:
print("Unable to upload image - {}".format(e))
raise
def image_delete(self, image):
try:
return self.connection.get_status('DELETE', '/1.0/images/%s'
% image)
except Exception as e:
print("Unable to delete image - {}".format(e))
raise
def image_export(self, image):
try:
return self.connection.get_raw('GET', '/1.0/images/%s/export'
% image)
except Exception as e:
print("Unable to export image - {}".format(e))
raise
def image_update(self, image, data):
try:
return self.connection.get_status('PUT', '/1.0/images/%s' % image,
json.dumps(data))
except Exception as e:
print("Unable to update image - {}".format(e))
raise
def image_rename(self, image, data):
try:
return self.connection.get_status('POST', '/1.0/images/%s' % image,
json.dumps(data))
except Exception as e:
print("Unable to rename image - {}".format(e))
raise
class LXDAlias(base.LXDBase):
def alias_list(self):
(state, data) = self.connection.get_object(
'GET', '/1.0/images/aliases')
return [alias.split('/1.0/images/aliases/')[-1]
for alias in data['metadata']]
def alias_defined(self, alias):
return self.connection.get_status('GET', '/1.0/images/aliases/%s'
% alias)
def alias_show(self, alias):
return self.connection.get_object('GET', '/1.0/images/aliases/%s'
% alias)
def alias_update(self, alias, data):
return self.connection.get_status('PUT',
'/1.0/images/aliases/%s' % alias,
json.dumps(data))
def alias_rename(self, alias, data):
return self.connection.get_status('POST',
'/1.0/images/aliases/%s' % alias,
json.dumps(data))
def alias_create(self, data):
return self.connection.get_status('POST', '/1.0/images/aliases',
json.dumps(data))
def alias_delete(self, alias):
return self.connection.get_status('DELETE', '/1.0/images/aliases/%s'
% alias)
| 36.930612
| 79
| 0.492374
|
acfdbd0312f9465259c1dc8c90cde08657a080a4
| 1,124
|
py
|
Python
|
lib/revitronui/charts.py
|
revitron/revitron-ui
|
167af8a2e843b83963b2bcd8cd1d009efffb83ce
|
[
"MIT"
] | 6
|
2020-05-17T09:14:28.000Z
|
2022-02-18T04:01:45.000Z
|
lib/revitronui/charts.py
|
revitron/revitron-ui
|
167af8a2e843b83963b2bcd8cd1d009efffb83ce
|
[
"MIT"
] | 3
|
2020-10-09T23:24:12.000Z
|
2020-11-16T12:30:17.000Z
|
lib/revitronui/charts.py
|
revitron/revitron-ui
|
167af8a2e843b83963b2bcd8cd1d009efffb83ce
|
[
"MIT"
] | 4
|
2020-10-08T16:30:03.000Z
|
2021-12-17T10:29:37.000Z
|
from pyrevit import script
class LineChart:
def __init__(self, data, labels, title=None):
import revitronui
self.output = script.get_output()
self.chart = self.make()
self.chart.data.labels = labels
dataset = self.chart.data.new_dataset(title)
dataset.data = data
if self.hasBackground:
palette = revitronui.Palette(len(data))
dataset.backgroundColor = palette.get()
else:
dataset.set_color(0x2c, 0x3e, 0x50, 0.5)
if title:
self.chart.options.title = {
'display': True,
'text': title,
'fontSize': 18,
'fontColor': '#2c3e50',
'fontStyle': 'bold'
}
@property
def hasBackground(self):
return False
def make(self):
return self.output.make_line_chart()
def draw(self):
self.chart.draw()
def get(self):
return self.chart
class BarChart(LineChart):
def make(self):
return self.output.make_bar_chart()
class DoughnutChart(LineChart):
@property
def hasBackground(self):
return True
def make(self):
return self.output.make_doughnut_chart()
class PieChart(DoughnutChart):
def make(self):
return self.output.make_pie_chart()
| 18.733333
| 46
| 0.69306
|
acfdbe5fc41809c08c87adaec96c9f952ccce275
| 916
|
py
|
Python
|
Lista2/ex2.py
|
brunocozendey/Pythonplayground
|
41257c5010274f7964b3f72a2d00513ddf8ad3c1
|
[
"MIT"
] | null | null | null |
Lista2/ex2.py
|
brunocozendey/Pythonplayground
|
41257c5010274f7964b3f72a2d00513ddf8ad3c1
|
[
"MIT"
] | null | null | null |
Lista2/ex2.py
|
brunocozendey/Pythonplayground
|
41257c5010274f7964b3f72a2d00513ddf8ad3c1
|
[
"MIT"
] | null | null | null |
# -*- coding: cp1252 -*-
'''
O programa lê uma string (com várias palavras) e verifique se ela é um
palíndromo. Um palíndromo é uma cadeia que pode ser lida de trás para frente ou frente para
trás e possui exatamente o mesmo valor. Exemplo: SUBI NO ONIBUS
Criado por: Bruno Cozendey
Criado em: 17/05/2018
'''
def ler():
while True:
try:
str1 = str(raw_input('Digite uma frase para verificar se é um palíndromo: \n'))
break
except:
print 'Ooops algo ocorreu de errado!'
return str1.replace(' ','')
def inverte(str1):
str1_inv = ''
for i in range(len(str1)):
str1_inv += str1[(len(str1)-1)-i]
return str(str1_inv)
def compara(str1,str1_inv):
if str1.lower() == str1_inv.lower():
print 'É um palíndromo!'
else:
print 'Não é palíndromo!'
#Main
str1 = ler()
str1_inv = inverte(str1)
compara(str1,str1_inv)
| 24.756757
| 91
| 0.631004
|
acfdbf796603aed4f0000b6cfb5d5c3885362694
| 7,260
|
py
|
Python
|
SynthText_Chinese/gen.py
|
shijieS/Scene-Text-Understanding
|
247df9a664f2c6c2c2e34fc14eddbb175142c53f
|
[
"OML"
] | 380
|
2017-10-19T01:36:27.000Z
|
2022-03-14T07:32:17.000Z
|
SynthText_Chinese/gen.py
|
Wanjpeng/Scene-Text-Understanding
|
247df9a664f2c6c2c2e34fc14eddbb175142c53f
|
[
"OML"
] | null | null | null |
SynthText_Chinese/gen.py
|
Wanjpeng/Scene-Text-Understanding
|
247df9a664f2c6c2c2e34fc14eddbb175142c53f
|
[
"OML"
] | 118
|
2017-11-23T02:37:53.000Z
|
2021-05-10T05:12:16.000Z
|
# -*- coding: utf-8 -*-
# Author: Ankush Gupta
# Date: 2015
"""
Entry-point for generating synthetic text images, as described in:
@InProceedings{Gupta16,
author = "Gupta, A. and Vedaldi, A. and Zisserman, A.",
title = "Synthetic Data for Text Localisation in Natural Images",
booktitle = "IEEE Conference on Computer Vision and Pattern Recognition",
year = "2016",
}
"""
import numpy as np
import h5py
import os, sys, traceback
import os.path as osp
from synthgen import *
from common import *
import wget, tarfile
import cv2 as cv
import time
## Define some configuration variables:
NUM_IMG = -1 # no. of images to use for generation (-1 to use all available):
INSTANCE_PER_IMAGE = 1 # no. of times to use the same image
SECS_PER_IMG = 5 #max time per image in seconds
# path to the data-file, containing image, depth and segmentation:
DATA_PATH = 'data'
DB_FNAME = osp.join(DATA_PATH,'dset.h5')
# url of the data (google-drive public file):
DATA_URL = 'http://www.robots.ox.ac.uk/~ankush/data.tar.gz'
OUT_FILE = 'results/SynthText_cartoon_viz.h5'
def get_data():
"""
Download the image,depth and segmentation data:
Returns, the h5 database.
"""
if not osp.exists(DB_FNAME):
try:
colorprint(Color.BLUE,'\tdownloading data (56 M) from: '+DATA_URL,bold=True)
print
sys.stdout.flush()
out_fname = 'data.tar.gz'
wget.download(DATA_URL,out=out_fname)
tar = tarfile.open(out_fname)
tar.extractall()
tar.close()
os.remove(out_fname)
colorprint(Color.BLUE,'\n\tdata saved at:'+DB_FNAME,bold=True)
sys.stdout.flush()
except:
print colorize(Color.RED,'Data not found and have problems downloading.',bold=True)
sys.stdout.flush()
sys.exit(-1)
# open the h5 file and return:
return h5py.File(DB_FNAME,'r')
def add_res_to_db(imgname,res,db):
"""
Add the synthetically generated text image instance
and other metadata to the dataset.
"""
ninstance = len(res)
for i in xrange(ninstance):
print colorize(Color.GREEN,'added into the db %s '%res[i]['txt'])
dname = "%s_%d"%(imgname, i)
db['data'].create_dataset(dname,data=res[i]['img'])
db['data'][dname].attrs['charBB'] = res[i]['charBB']
db['data'][dname].attrs['wordBB'] = res[i]['wordBB']
print 'type of res[i][\'txt\'] ',type(res[i]['txt'])
#db['data'][dname].attrs['txt'] = res[i]['txt']
db['data'][dname].attrs.create('txt', res[i]['txt'], dtype=h5py.special_dtype(vlen=unicode))
print 'type of db ',type(db['data'][dname].attrs['txt'])
print colorize(Color.GREEN,'successfully added')
print res[i]['txt']
print res[i]['img'].shape
print 'charBB',res[i]['charBB'].shape
print 'charBB',res[i]['charBB']
print 'wordBB',res[i]['wordBB'].shape
print 'wordBB',res[i]['wordBB']
'''
img = Image.fromarray(res[i]['img'])
hsv_img=np.array(rgb2hsv(img))
print 'hsv_img_shape',hsv_img.shape
print 'hsv_img',hsv_img
H=hsv_img[:,:,2]
print 'H_channel',H.shape,H
#img = Image.fromarray(db['data'][dname][:])
'''
def rgb2hsv(image):
return image.convert('HSV')
def rgb2gray(image):
rgb=np.array(image)
r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
gray = 0.2989 * r + 0.5870 * g + 0.1140 * b
return gray
def main(viz=False):
# open databases:
print colorize(Color.BLUE,'getting data..',bold=True)
db = get_data()
print colorize(Color.BLUE,'\t-> done',bold=True)
# open the output h5 file:
out_db = h5py.File(OUT_FILE,'w')
out_db.create_group('/data')
print colorize(Color.GREEN,'Storing the output in: '+OUT_FILE, bold=True)
# get the names of the image files in the dataset:
imnames = sorted(db['image'].keys())
N = len(imnames)
global NUM_IMG
if NUM_IMG < 0:
NUM_IMG = N
start_idx,end_idx = 0,min(NUM_IMG, N)
RV3 = RendererV3(DATA_PATH,max_time=SECS_PER_IMG)
for i in xrange(start_idx,end_idx):
t1=time.time()
imname = imnames[i]
try:
# get the image:
img = Image.fromarray(db['image'][imname][:])
# get the pre-computed depth:
# there are 2 estimates of depth (represented as 2 "channels")
# here we are using the second one (in some cases it might be
# useful to use the other one):
img_resize=img.resize(db['depth'][imname].shape)
depth = db['depth'][imname][:].T
print 'depth shape,img shape',depth.shape,np.array(img).shape
print 'depth info',depth
print 'depth max min',np.max(depth),np.min(depth)
#depth = depth[:,:,1]
#modify the depth with HSV H_channel
#img_resize=img.resize(depth.shape)
hsv_img=np.array(rgb2hsv(img_resize))
print 'hsv_img_shape',hsv_img.shape
#print 'hsv_img',hsv_img
H=hsv_img[:,:,2]
H=H.T
H=H.astype('float32')
print 'H_channel',H.shape,H
print 'H_max min',np.max(H),np.min(H)
print 'scale',np.max(depth)/np.max(H)
#depth= (np.max(depth)/np.max(H))*H
#depth= H
#print np.isnan(H).any()
#print np.isinf(H).any()
#print np.isnan(depth).any()
#print np.isinf(depth).any()
print 'depth shape',depth.shape
#print 'depth info',depth
print 'depth max min',np.max(depth),np.min(depth)
gray=np.array(rgb2gray(img_resize))
#print 'gray',gray.shape,gray
depth= (np.max(depth)/np.max(gray))*gray.astype('float32')
#add more blur
#mean blur
kernel = np.ones((5,5),np.float32)/25
gray = cv2.filter2D(gray,-1,kernel)
#print 'gray',gray.shape,gray
# get segmentation:
seg = db['seg'][imname][:].astype('float32')
area = db['seg'][imname].attrs['area']
label = db['seg'][imname].attrs['label']
print 'seg info',seg.shape,area.shape,label.shape
# re-size uniformly:
sz = depth.shape[:2][::-1]
img = np.array(img.resize(sz,Image.ANTIALIAS))
seg = np.array(Image.fromarray(seg).resize(sz,Image.NEAREST))
print colorize(Color.RED,'%d of %d'%(i,end_idx-1), bold=True)
res = RV3.render_text(img,depth,seg,area,label,
ninstance=INSTANCE_PER_IMAGE,viz=viz)
t2=time.time()
for ct in range(5):
if len(res) > 0:
# non-empty : successful in placing text:
add_res_to_db(imname,res,out_db)
break
else:
res = RV3.render_text(img,depth,seg,area,label,
ninstance=INSTANCE_PER_IMAGE,viz=viz)
# visualize the output:
print 'time consume in each pic',t2-t1
if viz:
if 'q' in raw_input(colorize(Color.RED,'continue? (enter to continue, q to exit): ',True)):
break
except:
traceback.print_exc()
print colorize(Color.GREEN,'>>>> CONTINUING....', bold=True)
continue
db.close()
out_db.close()
if __name__=='__main__':
import argparse
parser = argparse.ArgumentParser(description='Genereate Synthetic Scene-Text Images')
parser.add_argument('--viz',action='store_true',dest='viz',default=False,help='flag for turning on visualizations')
args = parser.parse_args()
main(args.viz)
| 32.410714
| 117
| 0.623278
|
acfdbf7a49ae6861b6d5a893fb3c5286d04c25cc
| 5,387
|
py
|
Python
|
zerver/lib/storage.py
|
measo3/2018-2-OSS-L5
|
15af7b91489b6cab794c5bd5af5948b3cc059f85
|
[
"Apache-2.0"
] | 3
|
2018-12-04T01:44:43.000Z
|
2019-05-13T06:16:21.000Z
|
zerver/lib/storage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 58
|
2018-11-27T15:18:54.000Z
|
2018-12-09T13:43:07.000Z
|
zerver/lib/storage.py
|
hcxiong/zulip
|
bf22eefedebd50b25f32b22988217c13a89b65d1
|
[
"Apache-2.0"
] | 9
|
2019-11-04T18:59:29.000Z
|
2022-03-22T17:46:37.000Z
|
# Useful reading is https://zulip.readthedocs.io/en/latest/subsystems/front-end-build-process.html
import os
import shutil
from typing import Any, Dict, List, Optional, Tuple
from django.conf import settings
from django.contrib.staticfiles.storage import ManifestStaticFilesStorage
from pipeline.storage import PipelineMixin
from zerver.lib.str_utils import force_str
class AddHeaderMixin:
def post_process(self, paths: Dict[str, Tuple['ZulipStorage', str]], dry_run: bool=False,
**kwargs: Any) -> List[Tuple[str, str, bool]]:
if dry_run:
return []
with open(settings.STATIC_HEADER_FILE, 'rb') as header_file:
header = header_file.read().decode(settings.FILE_CHARSET)
# A dictionary of path to tuples of (old_path, new_path,
# processed). The return value of this method is the values
# of this dictionary
ret_dict = {}
for name in paths:
storage, path = paths[name]
if not path.startswith('min/') or not path.endswith('.css'):
ret_dict[path] = (path, path, False)
continue
# Prepend the header
with storage.open(path, 'rb') as orig_file:
orig_contents = orig_file.read().decode(settings.FILE_CHARSET)
storage.delete(path)
with storage.open(path, 'w') as new_file:
new_file.write(force_str(header + orig_contents, encoding=settings.FILE_CHARSET))
ret_dict[path] = (path, path, True)
super_class = super()
if hasattr(super_class, 'post_process'):
super_ret = super_class.post_process(paths, dry_run, **kwargs) # type: ignore # https://github.com/python/mypy/issues/2956
else:
super_ret = []
# Merge super class's return value with ours
for val in super_ret:
old_path, new_path, processed = val
if processed:
ret_dict[old_path] = val
return list(ret_dict.values())
class RemoveUnminifiedFilesMixin:
def post_process(self, paths: Dict[str, Tuple['ZulipStorage', str]], dry_run: bool=False,
**kwargs: Any) -> List[Tuple[str, str, bool]]:
if dry_run:
return []
root = settings.STATIC_ROOT
to_remove = ['js']
for tree in to_remove:
shutil.rmtree(os.path.join(root, tree))
is_valid = lambda p: all([not p.startswith(k) for k in to_remove])
paths = {k: v for k, v in paths.items() if is_valid(k)}
super_class = super()
if hasattr(super_class, 'post_process'):
return super_class.post_process(paths, dry_run, **kwargs) # type: ignore # https://github.com/python/mypy/issues/2956
return []
class IgnoreBundlesManifestStaticFilesStorage(ManifestStaticFilesStorage):
def hashed_name(self, name: str, content: Optional[str]=None, filename: Optional[str]=None) -> str:
ext = os.path.splitext(name)[1]
if (name.startswith("webpack-bundles") and
ext in ['.js', '.css', '.map']):
# Hack to avoid renaming already-hashnamed webpack bundles
# when minifying; this was causing every bundle to have
# two hashes appended to its name, one by webpack and one
# here. We can't just skip processing of these bundles,
# since we do need the Django storage to add these to the
# manifest for django_webpack_loader to work. So, we just
# use a no-op hash function for these already-hashed
# assets.
return name
if ext in ['.png', '.gif', '.jpg', '.svg']:
# Similarly, don't hash-rename image files; we only serve
# the original file paths (not the hashed file paths), and
# so the only effect of hash-renaming these is to increase
# the size of release tarballs with duplicate copies of thesex.
#
# One could imagine a future world in which we instead
# used the hashed paths for these; in that case, though,
# we should instead be removing the non-hashed paths.
return name
if ext in ['json', 'po', 'mo', 'mp3', 'ogg', 'html']:
# And same story for translation files, sound files, etc.
return name
return super().hashed_name(name, content, filename)
if settings.PRODUCTION:
# This is a hack to use staticfiles.json from within the
# deployment, rather than a directory under STATIC_ROOT. By doing
# so, we can use a different copy of staticfiles.json for each
# deployment, which ensures that we always use the correct static
# assets for each deployment.
ManifestStaticFilesStorage.manifest_name = os.path.join(settings.DEPLOY_ROOT,
"staticfiles.json")
orig_path = ManifestStaticFilesStorage.path
def path(self: ManifestStaticFilesStorage, name: str) -> str:
if name == ManifestStaticFilesStorage.manifest_name:
return name
return orig_path(self, name)
ManifestStaticFilesStorage.path = path
class ZulipStorage(PipelineMixin,
AddHeaderMixin, RemoveUnminifiedFilesMixin,
IgnoreBundlesManifestStaticFilesStorage):
pass
| 41.438462
| 135
| 0.62558
|
acfdc0b455ac3dc5f0df27c91724f7cbd9f7f7af
| 4,568
|
py
|
Python
|
mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/jarfile.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/jarfile.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 4
|
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
mac/google-cloud-sdk/lib/googlecloudsdk/command_lib/app/jarfile.py
|
bopopescu/cndw
|
ee432efef88a4351b355f3d6d5350defc7f4246b
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for handling Manifest file in a Java jar file.
Jar files are just zip files with a particular interpretation for certain files
in the zip under the META-INF/ directory. So we can read and write them using
the standard zipfile module.
The specification for jar files is at
http://docs.oracle.com/javase/7/docs/technotes/guides/jar/jar.html
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from __future__ import with_statement
import re
import zipfile
_MANIFEST_NAME = 'META-INF/MANIFEST.MF'
class Error(Exception):
pass
class InvalidJarError(Error):
pass
class Manifest(object):
"""The parsed manifest from a jar file.
Attributes:
main_section: a dict representing the main (first) section of the manifest.
Each key is a string that is an attribute, such as 'Manifest-Version', and
the corresponding value is a string that is the value of the attribute,
such as '1.0'.
sections: a dict representing the other sections of the manifest. Each key
is a string that is the value of the 'Name' attribute for the section,
and the corresponding value is a dict like the main_section one, for the
other attributes.
"""
def __init__(self, main_section, sections):
self.main_section = main_section
self.sections = sections
def ReadManifest(jar_file_name):
"""Read and parse the manifest out of the given jar.
Args:
jar_file_name: the name of the jar from which the manifest is to be read.
Returns:
A parsed Manifest object, or None if the jar has no manifest.
Raises:
IOError: if the jar does not exist or cannot be read.
"""
with zipfile.ZipFile(jar_file_name) as jar:
try:
manifest_string = jar.read(_MANIFEST_NAME).decode('utf-8')
except KeyError:
return None
return _ParseManifest(manifest_string, jar_file_name)
def _ParseManifest(manifest_string, jar_file_name):
"""Parse a Manifest object out of the given string.
Args:
manifest_string: a str or unicode that is the manifest contents.
jar_file_name: a str that is the path of the jar, for use in exception
messages.
Returns:
A Manifest object parsed out of the string.
Raises:
InvalidJarError: if the manifest is not well-formed.
"""
# Lines in the manifest might be terminated by \r\n so normalize.
manifest_string = '\n'.join(manifest_string.splitlines()).rstrip('\n')
section_strings = re.split('\n{2,}', manifest_string)
parsed_sections = [_ParseManifestSection(s, jar_file_name)
for s in section_strings]
main_section = parsed_sections[0]
sections = {}
for entry in parsed_sections[1:]:
name = entry.get('Name')
if name is None:
raise InvalidJarError('%s: Manifest entry has no Name attribute: %r' %
(jar_file_name, entry))
else:
sections[name] = entry
return Manifest(main_section, sections)
def _ParseManifestSection(section, jar_file_name):
"""Parse a dict out of the given manifest section string.
Args:
section: a str or unicode that is the manifest section. It looks something
like this (without the >):
> Name: section-name
> Some-Attribute: some value
> Another-Attribute: another value
jar_file_name: a str that is the path of the jar, for use in exception
messages.
Returns:
A dict where the keys are the attributes (here, 'Name', 'Some-Attribute',
'Another-Attribute'), and the values are the corresponding attribute values.
Raises:
InvalidJarError: if the manifest section is not well-formed.
"""
# Join continuation lines.
section = section.replace('\n ', '').rstrip('\n')
if not section:
return {}
try:
return dict(line.split(': ', 1) for line in section.split('\n'))
except ValueError:
raise InvalidJarError('%s: Invalid manifest %r' % (jar_file_name, section))
| 31.944056
| 80
| 0.714974
|
acfdc0b81705fab3cf8bbbcb0f8973ac12aac435
| 236
|
py
|
Python
|
invenio_app_ils/internal_locations/loaders/jsonschemas/__init__.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 41
|
2018-09-04T13:00:46.000Z
|
2022-03-24T20:45:56.000Z
|
invenio_app_ils/internal_locations/loaders/jsonschemas/__init__.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 720
|
2017-03-10T08:02:41.000Z
|
2022-01-14T15:36:37.000Z
|
invenio_app_ils/internal_locations/loaders/jsonschemas/__init__.py
|
NRodriguezcuellar/invenio-app-ils
|
144a25a6c56330b214c6fd0b832220fa71f2e68a
|
[
"MIT"
] | 54
|
2017-03-09T16:05:29.000Z
|
2022-03-17T08:34:51.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# invenio-app-ils is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""ILS eitems JSON loader."""
| 26.222222
| 76
| 0.690678
|
acfdc0d7ee563b69f16d7aee54951bda28288608
| 8,060
|
py
|
Python
|
coderedcms/settings.py
|
fakegit/coderedcms
|
10dd10635bba9c2dcecede4b8e557b5a6ffd8b23
|
[
"BSD-3-Clause"
] | 526
|
2018-07-31T20:14:17.000Z
|
2022-03-23T08:08:29.000Z
|
coderedcms/settings.py
|
fakegit/coderedcms
|
10dd10635bba9c2dcecede4b8e557b5a6ffd8b23
|
[
"BSD-3-Clause"
] | 325
|
2018-08-01T13:53:55.000Z
|
2022-03-31T15:08:28.000Z
|
coderedcms/settings.py
|
fakegit/coderedcms
|
10dd10635bba9c2dcecede4b8e557b5a6ffd8b23
|
[
"BSD-3-Clause"
] | 153
|
2018-08-02T07:42:40.000Z
|
2022-03-24T23:54:59.000Z
|
import os
from django.conf import settings
from functools import lru_cache
PROJECT_DIR = settings.PROJECT_DIR if getattr(settings, 'PROJECT_DIR') else os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
BASE_DIR = settings.BASE_DIR if getattr(settings, 'BASE_DIR') else os.path.dirname(PROJECT_DIR)
DEFAULTS = {
'PROTECTED_MEDIA_URL': '/protected/',
'PROTECTED_MEDIA_ROOT': os.path.join(BASE_DIR, 'protected'),
'PROTECTED_MEDIA_UPLOAD_WHITELIST': [],
'PROTECTED_MEDIA_UPLOAD_BLACKLIST': ['.sh', '.exe', '.bat', '.ps1', '.app', '.jar', '.py', '.php', '.pl', '.rb'], # noqa
'FRONTEND_BTN_SIZE_DEFAULT': '',
'FRONTEND_BTN_SIZE_CHOICES': (
('btn-sm', 'Small'),
('', 'Default'),
('btn-lg', 'Large'),
),
'FRONTEND_BTN_STYLE_DEFAULT': 'btn-primary',
'FRONTEND_BTN_STYLE_CHOICES': (
('btn-primary', 'Primary'),
('btn-secondary', 'Secondary'),
('btn-success', 'Success'),
('btn-danger', 'Danger'),
('btn-warning', 'Warning'),
('btn-info', 'Info'),
('btn-link', 'Link'),
('btn-light', 'Light'),
('btn-dark', 'Dark'),
('btn-outline-primary', 'Outline Primary'),
('btn-outline-secondary', 'Outline Secondary'),
('btn-outline-success', 'Outline Success'),
('btn-outline-danger', 'Outline Danger'),
('btn-outline-warning', 'Outline Warning'),
('btn-outline-info', 'Outline Info'),
('btn-outline-light', 'Outline Light'),
('btn-outline-dark', 'Outline Dark'),
),
'FRONTEND_CAROUSEL_FX_DEFAULT': '',
'FRONTEND_CAROUSEL_FX_CHOICES': (
('', 'Slide'),
('carousel-fade', 'Fade'),
),
'FRONTEND_COL_SIZE_DEFAULT': '',
'FRONTEND_COL_SIZE_CHOICES': (
('', 'Automatically size'),
('12', 'Full row'),
('6', 'Half - 1/2 column'),
('4', 'Thirds - 1/3 column'),
('8', 'Thirds - 2/3 column'),
('3', 'Quarters - 1/4 column'),
('9', 'Quarters - 3/4 column'),
('2', 'Sixths - 1/6 column'),
('10', 'Sixths - 5/6 column'),
('1', 'Twelfths - 1/12 column'),
('5', 'Twelfths - 5/12 column'),
('7', 'Twelfths - 7/12 column'),
('11', 'Twelfths - 11/12 column'),
),
'FRONTEND_COL_BREAK_DEFAULT': 'md',
'FRONTEND_COL_BREAK_CHOICES': (
('', 'Always expanded'),
('sm', 'sm - Expand on small screens (phone, 576px) and larger'),
('md', 'md - Expand on medium screens (tablet, 768px) and larger'),
('lg', 'lg - Expand on large screens (laptop, 992px) and larger'),
('xl', 'xl - Expand on extra large screens (wide monitor, 1200px)'),
),
'FRONTEND_NAVBAR_FORMAT_DEFAULT': '',
'FRONTEND_NAVBAR_FORMAT_CHOICES': (
('', 'Default Bootstrap Navbar'),
('codered-navbar-center', 'Centered logo at top'),
),
'FRONTEND_NAVBAR_COLOR_SCHEME_DEFAULT': 'navbar-light',
'FRONTEND_NAVBAR_COLOR_SCHEME_CHOICES': (
('navbar-light', 'Light - for use with a light-colored navbar'),
('navbar-dark', 'Dark - for use with a dark-colored navbar'),
),
'FRONTEND_NAVBAR_CLASS_DEFAULT': 'bg-light',
'FRONTEND_NAVBAR_COLLAPSE_MODE_DEFAULT': 'navbar-expand-lg',
'FRONTEND_NAVBAR_COLLAPSE_MODE_CHOICES': (
('', 'Never show menu - Always collapse menu behind a button'),
('navbar-expand-sm', 'sm - Show on small screens (phone size) and larger'),
('navbar-expand-md', 'md - Show on medium screens (tablet size) and larger'),
('navbar-expand-lg', 'lg - Show on large screens (laptop size) and larger'),
('navbar-expand-xl', 'xl - Show on extra large screens (desktop, wide monitor)'),
),
'FRONTEND_THEME_HELP': "Change the color palette of your site with a Bootstrap theme. Powered by Bootswatch https://bootswatch.com/.", # noqa
'FRONTEND_THEME_DEFAULT': '',
'FRONTEND_THEME_CHOICES': (
('', 'Default - Classic Bootstrap'),
('cerulean', 'Cerulean - A calm blue sky'),
('cosmo', 'Cosmo - An ode to Metro'),
('cyborg', 'Cyborg - Jet black and electric blue'),
('darkly', 'Darkly - Flatly in night mode'),
('flatly', 'Flatly - Flat and modern'),
('journal', 'Journal - Crisp like a new sheet of paper'),
('litera', 'Litera - The medium is the message'),
('lumen', 'Lumen - Light and shadow'),
('lux', 'Lux - A touch of class'),
('materia', 'Materia - Material is the metaphor'),
('minty', 'Minty - A fresh feel'),
('pulse', 'Pulse - A trace of purple'),
('sandstone', 'Sandstone - A touch of warmth'),
('simplex', 'Simplex - Mini and minimalist'),
('sketchy', 'Sketchy - A hand-drawn look for mockups and mirth'),
('slate', 'Slate - Shades of gunmetal gray'),
('solar', 'Solar - A dark spin on Solarized'),
('spacelab', 'Spacelab - Silvery and sleek'),
('superhero', 'Superhero - The brave and the blue'),
('united', 'United - Ubuntu orange and unique font'),
('yeti', 'Yeti - A friendly foundation'),
),
'FRONTEND_TEMPLATES_BLOCKS': {
'cardblock': (
('coderedcms/blocks/card_block.html', 'Card'),
('coderedcms/blocks/card_head.html', 'Card with header'),
('coderedcms/blocks/card_foot.html', 'Card with footer'),
('coderedcms/blocks/card_head_foot.html', 'Card with header and footer'),
('coderedcms/blocks/card_blurb.html', 'Blurb - rounded image and no border'),
('coderedcms/blocks/card_img.html', 'Cover image - use image as background'),
),
'cardgridblock': (
('coderedcms/blocks/cardgrid_group.html', 'Card group - attached cards of equal size'),
('coderedcms/blocks/cardgrid_deck.html', 'Card deck - separate cards of equal size'),
('coderedcms/blocks/cardgrid_columns.html', 'Card masonry - fluid brick pattern'),
),
'pagelistblock': (
('coderedcms/blocks/pagelist_block.html', 'General, simple list'),
('coderedcms/blocks/pagelist_list_group.html', 'General, list group navigation panel'),
('coderedcms/blocks/pagelist_article_media.html', 'Article, media format'),
('coderedcms/blocks/pagelist_article_card_group.html',
'Article, card group - attached cards of equal size'),
('coderedcms/blocks/pagelist_article_card_deck.html',
'Article, card deck - separate cards of equal size'),
('coderedcms/blocks/pagelist_article_card_columns.html',
'Article, card masonry - fluid brick pattern'),
),
'pagepreviewblock': (
('coderedcms/blocks/pagepreview_card.html', 'Card'),
('coderedcms/blocks/pagepreview_form.html', 'Form inputs'),
),
# templates that are available for all block types
'*': (
('', 'Default'),
),
},
'FRONTEND_TEMPLATES_PAGES': {
# templates that are available for all page types
'*': (
('', 'Default'),
('coderedcms/pages/web_page.html', 'Web page showing title and cover image'),
('coderedcms/pages/web_page_notitle.html', 'Web page without title and cover image'),
('coderedcms/pages/home_page.html', 'Home page without title and cover image'),
('coderedcms/pages/base.html', 'Blank page - no navbar or footer'),
),
},
'BANNER': None,
'BANNER_BACKGROUND': '#f00',
'BANNER_TEXT_COLOR': '#fff',
}
@lru_cache()
def get_config():
config = DEFAULTS.copy()
for var in config:
cr_var = 'CODERED_%s' % var
if hasattr(settings, cr_var):
config[var] = getattr(settings, cr_var)
return config
cr_settings = get_config()
try:
import bootstrap4.bootstrap as bootstrap
except ImportError:
import bootstrap3.bootstrap as bootstrap
get_bootstrap_setting = bootstrap.get_bootstrap_setting
| 40.913706
| 146
| 0.594169
|
acfdc0d88dd8536aef9dd8eb79765ae65aab0b5a
| 2,525
|
py
|
Python
|
utils/anchor.py
|
mshmoon/siamrpn-lightweight
|
f6527e34c9eaaeb45817b12babd78ee73b1c7525
|
[
"MIT"
] | 1
|
2020-11-20T09:34:45.000Z
|
2020-11-20T09:34:45.000Z
|
utils/anchor.py
|
mshmoon/siamrpn-lightweight
|
f6527e34c9eaaeb45817b12babd78ee73b1c7525
|
[
"MIT"
] | null | null | null |
utils/anchor.py
|
mshmoon/siamrpn-lightweight
|
f6527e34c9eaaeb45817b12babd78ee73b1c7525
|
[
"MIT"
] | null | null | null |
# Copyright (c) SenseTime. All Rights Reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import numpy as np
from utils.bbox import corner2center, center2corner
class Anchors:
"""
This class generate anchors.
"""
def __init__(self, stride, ratios, scales, image_center=0, size=0):
self.stride = stride
self.ratios = ratios
self.scales = scales
self.image_center = image_center
self.size = size
self.anchor_num = len(self.scales) * len(self.ratios)
self.anchors = None
self.generate_anchors()
def generate_anchors(self):
"""
generate anchors based on predefined configuration
"""
self.anchors = np.zeros((self.anchor_num, 4), dtype=np.float32)
size = self.stride * self.stride
count = 0
for r in self.ratios:
ws = int(math.sqrt(size*1. / r))
hs = int(ws * r)
for s in self.scales:
w = ws * s
h = hs * s
self.anchors[count][:] = [-w*0.5, -h*0.5, w*0.5, h*0.5][:]
count += 1
def generate_all_anchors(self, im_c, size):
"""
im_c: image center
size: image size
"""
if self.image_center == im_c and self.size == size:
return False
self.image_center = im_c
self.size = size
a0x = im_c - size // 2 * self.stride
ori = np.array([a0x] * 4, dtype=np.float32)
zero_anchors = self.anchors + ori
x1 = zero_anchors[:, 0]
y1 = zero_anchors[:, 1]
x2 = zero_anchors[:, 2]
y2 = zero_anchors[:, 3]
x1, y1, x2, y2 = map(lambda x: x.reshape(self.anchor_num, 1, 1),
[x1, y1, x2, y2])
cx, cy, w, h = corner2center([x1, y1, x2, y2])
disp_x = np.arange(0, size).reshape(1, 1, -1) * self.stride
disp_y = np.arange(0, size).reshape(1, -1, 1) * self.stride
cx = cx + disp_x
cy = cy + disp_y
# broadcast
zero = np.zeros((self.anchor_num, size, size), dtype=np.float32)
cx, cy, w, h = map(lambda x: x + zero, [cx, cy, w, h])
x1, y1, x2, y2 = center2corner([cx, cy, w, h])
self.all_anchors = (np.stack([x1, y1, x2, y2]).astype(np.float32),
np.stack([cx, cy, w, h]).astype(np.float32))
return True
| 29.360465
| 74
| 0.547327
|
acfdc107cc7e0ff85668033c21e9c90c857e0849
| 70,942
|
py
|
Python
|
pytorch_lightning/core/lightning.py
|
lxww302/pytorch-lightning
|
4018237c309b7d9d6978da73132003615341e04a
|
[
"Apache-2.0"
] | null | null | null |
pytorch_lightning/core/lightning.py
|
lxww302/pytorch-lightning
|
4018237c309b7d9d6978da73132003615341e04a
|
[
"Apache-2.0"
] | 1
|
2020-11-09T21:07:07.000Z
|
2020-11-09T21:07:07.000Z
|
pytorch_lightning/core/lightning.py
|
zippeurfou/pytorch-lightning
|
4018237c309b7d9d6978da73132003615341e04a
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import collections
import copy
import inspect
import re
import types
from abc import ABC
from argparse import Namespace
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union, Mapping
import torch
from pytorch_lightning import _logger as log
from pytorch_lightning.core.grads import GradInformation
from pytorch_lightning.core.hooks import CheckpointHooks, DataHooks, ModelHooks
from pytorch_lightning.core.memory import ModelSummary
from pytorch_lightning.core.saving import ALLOWED_CONFIG_TYPES, PRIMITIVE_TYPES, ModelIO
from pytorch_lightning.core.step_result import Result
from pytorch_lightning.utilities import rank_zero_warn, AMPType
from pytorch_lightning.utilities.device_dtype_mixin import DeviceDtypeModuleMixin
from pytorch_lightning.utilities.xla_device_utils import XLADeviceUtils
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.parsing import (
AttributeDict,
collect_init_args,
get_init_args,
)
from pytorch_lightning.callbacks import Callback
from torch import ScriptModule, Tensor
from torch.nn import Module
from torch.optim.optimizer import Optimizer
TPU_AVAILABLE = XLADeviceUtils.tpu_device_exists()
if TPU_AVAILABLE:
import torch_xla.core.xla_model as xm
class LightningModule(
ABC,
DeviceDtypeModuleMixin,
GradInformation,
ModelIO,
ModelHooks,
DataHooks,
CheckpointHooks,
Module,
):
# Below is for property support of JIT in PyTorch 1.7
# since none of them is important when using JIT, we are going to ignore them.
__jit_unused_properties__ = [
"datamodule",
"example_input_array",
"hparams",
"hparams_initial",
"on_gpu",
"current_epoch",
"global_step",
] + DeviceDtypeModuleMixin.__jit_unused_properties__
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# see (https://github.com/pytorch/pytorch/blob/3e6bb5233f9ca2c5aa55d9cda22a7ee85439aa6e/
# torch/nn/modules/module.py#L227)
torch._C._log_api_usage_once(f"lightning.module.{self.__class__.__name__}")
self.exp_save_path = None
self.loaded_optimizer_states_dict = {}
#: Pointer to the trainer object
self.trainer = None
#: Pointer to the logger object
self.logger = None
#: True if using dp
self.use_dp = False
#: True if using ddp
self.use_ddp = False
#: True if using ddp2
self.use_ddp2 = False
# True if on tpu
self.use_tpu = False
#: True if using amp
self.use_amp = False
#: The precision used
self.precision = 32
# optionally can be set by user
self._example_input_array = None
self._datamodule = None
self._results: Optional[Result] = None
self._current_fx_name = ''
self._running_manual_backward = False
self._current_hook_fx_name = None
self._current_dataloader_idx = None
def optimizers(self):
opts = self.trainer.optimizers
# single optimizer
if isinstance(opts, list) and len(opts) == 1 and isinstance(opts[0], Optimizer):
return opts[0]
# multiple opts
else:
return opts
@property
def example_input_array(self) -> Any:
return self._example_input_array
@property
def current_epoch(self) -> int:
"""The current epoch"""
return self.trainer.current_epoch if self.trainer else 0
@property
def global_step(self) -> int:
"""Total training batches seen across all epochs"""
return self.trainer.global_step if self.trainer else 0
@example_input_array.setter
def example_input_array(self, example: Any) -> None:
self._example_input_array = example
@property
def datamodule(self) -> Any:
return self._datamodule
@datamodule.setter
def datamodule(self, datamodule: Any) -> None:
self._datamodule = datamodule
@property
def on_gpu(self):
"""
True if your model is currently running on GPUs.
Useful to set flags around the LightningModule for different CPU vs GPU behavior.
"""
return self.device.type == "cuda"
def print(self, *args, **kwargs) -> None:
r"""
Prints only from process 0. Use this in any distributed mode to log only once.
Args:
*args: The thing to print. Will be passed to Python's built-in print function.
**kwargs: Will be passed to Python's built-in print function.
Example:
.. code-block:: python
def forward(self, x):
self.print(x, 'in forward')
"""
if self.trainer.is_global_zero:
print(*args, **kwargs)
def log(
self,
name: str,
value: Any,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a key, value
Example::
self.log('train_loss', loss)
The default behavior per hook is as follows
.. csv-table:: ``*`` also applies to the test loop
:header: "LightningMoule Hook", "on_step", "on_epoch", "prog_bar", "logger"
:widths: 20, 10, 10, 10, 10
"training_step", "T", "F", "F", "T"
"training_step_end", "T", "F", "F", "T"
"training_epoch_end", "F", "T", "F", "T"
"validation_step*", "F", "T", "F", "T"
"validation_step_end*", "F", "T", "F", "T"
"validation_epoch_end*", "F", "T", "F", "T"
Args:
name: key name
value: value name
prog_bar: if True logs to the progress bar
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs at the training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs at the val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group
"""
if self._results is not None:
# in any epoch end can't log step metrics (only epoch metric)
if 'epoch_end' in self._current_fx_name and on_step:
m = f'on_step=True cannot be used on {self._current_fx_name} method'
raise MisconfigurationException(m)
if 'epoch_end' in self._current_fx_name and on_epoch is False:
m = f'on_epoch cannot be False when called from the {self._current_fx_name} method'
raise MisconfigurationException(m)
# add log_dict
# TODO: if logged twice fail with crash
# set the default depending on the fx_name
on_step = self.__auto_choose_log_on_step(on_step)
on_epoch = self.__auto_choose_log_on_epoch(on_epoch)
if self._current_hook_fx_name is not None:
self.trainer.logger_connector.check_logging_in_callbacks(
self._current_hook_fx_name,
on_step=on_step,
on_epoch=on_epoch
)
# make sure user doesn't introduce logic for multi-dataloaders
if "/dataloader_idx_" in name:
raise MisconfigurationException(
f"Logged key: {name} should not contain information about dataloader_idx.")
accelerator = self.trainer.accelerator_backend
self._results.log(
name,
value,
prog_bar,
logger,
on_step,
on_epoch,
reduce_fx,
tbptt_reduce_fx,
tbptt_pad_token,
enable_graph,
sync_dist,
sync_dist_op,
sync_dist_group,
accelerator.sync_tensor,
self._current_dataloader_idx,
)
def log_dict(
self,
dictionary: dict,
prog_bar: bool = False,
logger: bool = True,
on_step: Optional[bool] = None,
on_epoch: Optional[bool] = None,
reduce_fx: Callable = torch.mean,
tbptt_reduce_fx: Callable = torch.mean,
tbptt_pad_token: int = 0,
enable_graph: bool = False,
sync_dist: bool = False,
sync_dist_op: Union[Any, str] = 'mean',
sync_dist_group: Optional[Any] = None,
):
"""
Log a dictonary of values at once
Example::
values = {'loss': loss, 'acc': acc, ..., 'metric_n': metric_n}
self.log_dict(values)
Args:
dictionary: key value pairs (str, tensors)
prog_bar: if True logs to the progress base
logger: if True logs to the logger
on_step: if True logs at this step. None auto-logs for training_step but not validation/test_step
on_epoch: if True logs epoch accumulated metrics. None auto-logs for val/test step but not training_step
reduce_fx: reduction function over step values for end of epoch. Torch.mean by default
tbptt_reduce_fx: function to reduce on truncated back prop
tbptt_pad_token: token to use for padding
enable_graph: if True, will not auto detach the graph
sync_dist: if True, reduces the metric across GPUs/TPUs
sync_dist_op: the op to sync across GPUs/TPUs
sync_dist_group: the ddp group:
"""
for k, v in dictionary.items():
self.log(
name=k,
value=v,
prog_bar=prog_bar,
logger=logger,
on_step=on_step,
on_epoch=on_epoch,
reduce_fx=reduce_fx,
enable_graph=enable_graph,
sync_dist=sync_dist,
sync_dist_group=sync_dist_group,
sync_dist_op=sync_dist_op,
tbptt_pad_token=tbptt_pad_token,
tbptt_reduce_fx=tbptt_reduce_fx,
)
def write_prediction(self, name, value, filename='predictions.pt'):
self.trainer.evaluation_loop.predictions._add_prediction(name, value, filename)
def write_prediction_dict(self, predictions_dict, filename='predictions.pt'):
for k, v in predictions_dict.items():
self.write_prediction(k, v, filename)
def __auto_choose_log_on_step(self, on_step):
if on_step is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_step = True
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_step = False
else:
on_step = False
return on_step
def __auto_choose_log_on_epoch(self, on_epoch):
if on_epoch is None:
if self._current_fx_name in {'training_step', 'training_step_end'}:
on_epoch = False
elif self._current_fx_name in {'evaluation_step', 'evaluation_step_end',
'evaluation_epoch_end', 'training_epoch_end'}:
on_epoch = True
else:
on_epoch = True
return on_epoch
def forward(self, *args, **kwargs):
r"""
Same as :meth:`torch.nn.Module.forward()`, however in Lightning you want this to define
the operations you want to use for prediction (i.e.: on a server or as a feature extractor).
Normally you'd call ``self()`` from your :meth:`training_step` method.
This makes it easy to write a complex system for training with the outputs
you'd want in a prediction setting.
You may also find the :func:`~pytorch_lightning.core.decorators.auto_move_data` decorator useful
when using the module outside Lightning in a production setting.
Args:
*args: Whatever you decide to pass into the forward method.
**kwargs: Keyword arguments are also possible.
Return:
Predicted output
Examples:
.. code-block:: python
# example if we were using this model as a feature extractor
def forward(self, x):
feature_maps = self.convnet(x)
return feature_maps
def training_step(self, batch, batch_idx):
x, y = batch
feature_maps = self(x)
logits = self.classifier(feature_maps)
# ...
return loss
# splitting it this way allows model to be used a feature extractor
model = MyModelAbove()
inputs = server.get_request()
results = model(inputs)
server.write_results(results)
# -------------
# This is in stark contrast to torch.nn.Module where normally you would have this:
def forward(self, batch):
x, y = batch
feature_maps = self.convnet(x)
logits = self.classifier(feature_maps)
return logits
"""
return super().forward(*args, **kwargs)
def training_step(self, *args, **kwargs):
r"""
Here you compute and return the training loss and some additional metrics for e.g.
the progress bar or logger.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch
optimizer_idx (int): When using multiple optimizers, this argument will also be present.
hiddens(:class:`~torch.Tensor`): Passed in if
:paramref:`~pytorch_lightning.trainer.trainer.Trainer.truncated_bptt_steps` > 0.
Return:
Any of.
- :class:`~torch.Tensor` - The loss tensor
- `dict` - A dictionary. Can include any keys, but must include the key 'loss'
- `None` - Training will skip to the next batch
In this step you'd normally do the forward pass and calculate the loss for a batch.
You can also do fancier things like multiple forward passes or something model specific.
Example::
def training_step(self, batch, batch_idx):
x, y, z = batch
out = self.encoder(x)
loss = self.loss(out, x)
return loss
If you define multiple optimizers, this step will be called with an additional
``optimizer_idx`` parameter.
.. code-block:: python
# Multiple optimizers (e.g.: GANs)
def training_step(self, batch, batch_idx, optimizer_idx):
if optimizer_idx == 0:
# do training_step with encoder
if optimizer_idx == 1:
# do training_step with decoder
If you add truncated back propagation through time you will also get an additional
argument with the hidden states of the previous step.
.. code-block:: python
# Truncated back-propagation through time
def training_step(self, batch, batch_idx, hiddens):
# hiddens are the hidden states from the previous truncated backprop step
...
out, hiddens = self.lstm(data, hiddens)
...
return {'loss': loss, 'hiddens': hiddens}
Note:
The loss value shown in the progress bar is smoothed (averaged) over the last values,
so it differs from the actual loss returned in train/validation step.
"""
rank_zero_warn(
"`training_step` must be implemented to be used with the Lightning Trainer"
)
def training_step_end(self, *args, **kwargs):
"""
Use this when training with dp or ddp2 because :meth:`training_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [training_step(sub_batch) for sub_batch in sub_batches]
training_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in `training_step` for each batch part.
Return:
Anything
When using dp/ddp2 distributed backends, only a portion of the batch is inside the training_step:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
# softmax uses only a portion of the batch in the denomintaor
loss = self.softmax(out)
loss = nce_loss(loss)
return loss
If you wish to do something with all the parts of the batch, then use this method to do it:
.. code-block:: python
def training_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return {'pred': out}
def training_step_end(self, training_step_outputs):
gpu_0_pred = training_step_outputs[0]['pred']
gpu_1_pred = training_step_outputs[1]['pred']
gpu_n_pred = training_step_outputs[n]['pred']
# this softmax now uses the full batch
loss = nce_loss([gpu_0_pred, gpu_1_pred, gpu_n_pred])
return loss
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def training_epoch_end(self, outputs: List[Any]) -> None:
"""
Called at the end of the training epoch with the outputs of all training steps.
Use this in case you need to do something with all the outputs for every training_step.
.. code-block:: python
# the pseudocode for these calls
train_outs = []
for train_batch in train_data:
out = training_step(train_batch)
train_outs.append(out)
training_epoch_end(train_outs)
Args:
outputs: List of outputs you defined in :meth:`training_step`, or if there are
multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If this method is not overridden, this won't be called.
Example::
def training_epoch_end(self, training_step_outputs):
# do something with all training_step outputs
return result
With multiple dataloaders, ``outputs`` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each training step for that dataloader.
.. code-block:: python
def training_epoch_end(self, training_step_outputs):
for out in training_step_outputs:
# do something here
"""
def validation_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the validation set.
In this step you'd might generate examples or calculate anything of interest like accuracy.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(train_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple val datasets used)
Return:
Any of.
- Any object or value
- `None` - Validation will skip to the next batch
.. code-block:: python
# pseudocode of order
out = validation_step()
if defined('validation_step_end'):
out = validation_step_end(out)
out = validation_epoch_end(out)
.. code-block:: python
# if you have one val dataloader:
def validation_step(self, batch, batch_idx)
# if you have multiple val dataloaders:
def validation_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single validation dataset
def validation_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
val_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'val_loss': loss, 'val_acc': val_acc})
If you pass in multiple val datasets, validation_step will have an additional argument.
.. code-block:: python
# CASE 2: multiple validation datasets
def validation_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`validation_step` is called, the model has been put in eval mode
and PyTorch gradients have been disabled. At the end of validation,
the model goes back to training mode and gradients are enabled.
"""
def validation_step_end(self, *args, **kwargs):
"""
Use this when validating with dp or ddp2 because :meth:`validation_step`
will operate on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [validation_step(sub_batch) for sub_batch in sub_batches]
validation_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`validation_step`
for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT validation_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
loss = self.softmax(out)
loss = nce_loss(loss)
self.log('val_loss', loss)
# --------------
# with validation_step_end to do softmax over the full batch
def validation_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
return out
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something with these
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def validation_epoch_end(
self, outputs: List[Any]
) -> None:
"""
Called at the end of the validation epoch with the outputs of all validation steps.
.. code-block:: python
# the pseudocode for these calls
val_outs = []
for val_batch in val_data:
out = validation_step(val_batch)
val_outs.append(out)
validation_epoch_end(val_outs)
Args:
outputs: List of outputs you defined in :meth:`validation_step`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader.
Return:
None
Note:
If you didn't define a :meth:`validation_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def validation_epoch_end(self, val_step_outputs):
for out in val_step_outputs:
# do something
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each validation step for that dataloader.
.. code-block:: python
def validation_epoch_end(self, outputs):
for dataloader_output_result in outputs:
dataloader_outs = dataloader_output_result.dataloader_i_outputs
self.log('final_metric', final_value)
"""
def test_step(self, *args, **kwargs):
r"""
Operates on a single batch of data from the test set.
In this step you'd normally generate examples or calculate anything of interest
such as accuracy.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]):
The output of your :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): The index of this batch.
dataloader_idx (int): The index of the dataloader that produced this batch
(only if multiple test datasets used).
Return:
Any of.
- Any object or value
- `None` - Testing will skip to the next batch
.. code-block:: python
# if you have one test dataloader:
def test_step(self, batch, batch_idx)
# if you have multiple test dataloaders:
def test_step(self, batch, batch_idx, dataloader_idx)
Examples:
.. code-block:: python
# CASE 1: A single test dataset
def test_step(self, batch, batch_idx):
x, y = batch
# implement your own
out = self(x)
loss = self.loss(out, y)
# log 6 example images
# or generated text... or whatever
sample_imgs = x[:6]
grid = torchvision.utils.make_grid(sample_imgs)
self.logger.experiment.add_image('example_images', grid, 0)
# calculate acc
labels_hat = torch.argmax(out, dim=1)
test_acc = torch.sum(y == labels_hat).item() / (len(y) * 1.0)
# log the outputs!
self.log_dict({'test_loss': loss, 'test_acc': test_acc})
If you pass in multiple validation datasets, :meth:`test_step` will have an additional
argument.
.. code-block:: python
# CASE 2: multiple test datasets
def test_step(self, batch, batch_idx, dataloader_idx):
# dataloader_idx tells you which dataset this is.
Note:
If you don't need to validate you don't need to implement this method.
Note:
When the :meth:`test_step` is called, the model has been put in eval mode and
PyTorch gradients have been disabled. At the end of the test epoch, the model goes back
to training mode and gradients are enabled.
"""
def test_step_end(self, *args, **kwargs):
"""
Use this when testing with dp or ddp2 because :meth:`test_step` will operate
on only part of the batch. However, this is still optional
and only needed for things like softmax or NCE loss.
Note:
If you later switch to ddp or some other mode, this will still be called
so that you don't have to change your code.
.. code-block:: python
# pseudocode
sub_batches = split_batches_for_dp(batch)
batch_parts_outputs = [test_step(sub_batch) for sub_batch in sub_batches]
test_step_end(batch_parts_outputs)
Args:
batch_parts_outputs: What you return in :meth:`test_step` for each batch part.
Return:
None or anything
.. code-block:: python
# WITHOUT test_step_end
# if used in DP or DDP2, this batch is 1/num_gpus large
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self(x)
loss = self.softmax(out)
self.log('test_loss', loss)
# --------------
# with test_step_end to do softmax over the full batch
def test_step(self, batch, batch_idx):
# batch is 1/num_gpus big
x, y = batch
out = self.encoder(x)
return out
def test_epoch_end(self, output_results):
# this out is now the full size of the batch
all_test_step_outs = output_results.out
loss = nce_loss(all_test_step_outs)
self.log('test_loss', loss)
See Also:
See the :ref:`multi_gpu` guide for more details.
"""
def test_epoch_end(
self, outputs: List[Any]
) -> None:
"""
Called at the end of a test epoch with the output of all test steps.
.. code-block:: python
# the pseudocode for these calls
test_outs = []
for test_batch in test_data:
out = test_step(test_batch)
test_outs.append(out)
test_epoch_end(test_outs)
Args:
outputs: List of outputs you defined in :meth:`test_step_end`, or if there
are multiple dataloaders, a list containing a list of outputs for each dataloader
Return:
None
Note:
If you didn't define a :meth:`test_step`, this won't be called.
Examples:
With a single dataloader:
.. code-block:: python
def test_epoch_end(self, outputs):
# do something with the outputs of all test batches
all_test_preds = test_step_outputs.predictions
some_result = calc_all_results(all_test_preds)
self.log(some_result)
With multiple dataloaders, `outputs` will be a list of lists. The outer list contains
one entry per dataloader, while the inner list contains the individual outputs of
each test step for that dataloader.
.. code-block:: python
def test_epoch_end(self, outputs):
final_value = 0
for dataloader_outputs in outputs:
for test_step_out in dataloader_outputs:
# do something
final_value += test_step_out
self.log('final_metric', final_value)
"""
def configure_optimizers(
self,
):
r"""
Choose what optimizers and learning-rate schedulers to use in your optimization.
Normally you'd need one. But in the case of GANs or similar you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a 'lr_scheduler'
key which value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional 'frequency' key.
- None - Fit will run without any optimizer.
Note:
The 'frequency' value is an int corresponding to the number of sequential batches
optimized with the specific optimizer. It should be given to none or to all of the optimizers.
There is a difference between passing multiple optimizers in a list,
and passing multiple optimizers in dictionaries with a frequency of 1:
In the former case, all optimizers will operate on the given batch in each optimization step.
In the latter, only one optimizer will operate on the given batch at every step.
The lr_dict is a dictionary which contains scheduler and its associated configuration.
It has five keys. The default configuration is shown below.
.. code-block:: python
{
'scheduler': lr_scheduler, # The LR schduler
'interval': 'epoch', # The unit of the scheduler's step size
'frequency': 1, # The frequency of the scheduler
'reduce_on_plateau': False, # For ReduceLROnPlateau scheduler
'monitor': 'val_loss', # Metric for ReduceLROnPlateau to monitor
'strict': True # Whether to crash the training if `monitor` is not found
}
If user only provides LR schedulers, then their configuration will set to default as shown above.
Examples:
.. code-block:: python
# most cases
def configure_optimizers(self):
opt = Adam(self.parameters(), lr=1e-3)
return opt
# multiple optimizer case (e.g.: GAN)
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
return generator_opt, disriminator_opt
# example with learning rate schedulers
def configure_optimizers(self):
generator_opt = Adam(self.model_gen.parameters(), lr=0.01)
disriminator_opt = Adam(self.model_disc.parameters(), lr=0.02)
discriminator_sched = CosineAnnealing(discriminator_opt, T_max=10)
return [generator_opt, disriminator_opt], [discriminator_sched]
# example with step-based learning rate schedulers
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
gen_sched = {'scheduler': ExponentialLR(gen_opt, 0.99),
'interval': 'step'} # called after each training step
dis_sched = CosineAnnealing(discriminator_opt, T_max=10) # called every epoch
return [gen_opt, dis_opt], [gen_sched, dis_sched]
# example with optimizer frequencies
# see training procedure in `Improved Training of Wasserstein GANs`, Algorithm 1
# https://arxiv.org/abs/1704.00028
def configure_optimizers(self):
gen_opt = Adam(self.model_gen.parameters(), lr=0.01)
dis_opt = Adam(self.model_disc.parameters(), lr=0.02)
n_critic = 5
return (
{'optimizer': dis_opt, 'frequency': n_critic},
{'optimizer': gen_opt, 'frequency': 1}
)
Note:
Some things to know:
- Lightning calls ``.backward()`` and ``.step()`` on each optimizer
and learning rate scheduler as needed.
- If you use 16-bit precision (``precision=16``), Lightning will automatically
handle the optimizers for you.
- If you use multiple optimizers, :meth:`training_step` will have an additional
``optimizer_idx`` parameter.
- If you use LBFGS Lightning handles the closure function automatically for you.
- If you use multiple optimizers, gradients will be calculated only
for the parameters of current optimizer at each training step.
- If you need to control how often those optimizers step or override the
default ``.step()`` schedule, override the :meth:`optimizer_step` hook.
- If you only want to call a learning rate scheduler every ``x`` step or epoch,
or want to monitor a custom metric, you can specify these in a lr_dict:
.. code-block:: python
{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1',
'frequency': x,
}
"""
rank_zero_warn(
"`configure_optimizers` must be implemented to be used with the Lightning Trainer"
)
def manual_backward(self, loss: Tensor, optimizer: Optimizer, *args, **kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you
This function forwards all args to the .backward() call as well.
.. tip:: In manual mode we still automatically clip grads if Trainer(gradient_clip_val=x) is set
.. tip:: In manual mode we still automatically accumulate grad over batches if
Trainer(accumulate_grad_batches=x) is set and you use `model.manual_optimizer_step(optimizer)`
Example::
def training_step(...):
(opt_a, opt_b) = self.optimizers()
loss = ...
# automatically applies scaling, etc...
self.manual_backward(loss, opt_a)
self.manual_optimizer_step(opt_a)
"""
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_backward')
# backward
self._running_manual_backward = True
self.trainer.train_loop.backward(loss, optimizer, -1, *args, **kwargs)
self._running_manual_backward = False
def manual_optimizer_step(self,
optimizer: Optimizer,
*args,
make_optimizer_step: Optional[bool] = None,
optimizer_closure: Optional[Callable] = None,
** kwargs) -> None:
"""
Call this directly from your training_step when doing optimizations manually.
By using this we can ensure that all the proper scaling when using 16-bit etc has been done for you
.. tip:: In manual mode we still automatically accumulate grad over batches if
Trainer(accumulate_grad_batches=x) is set.
Args:
optimizer: Optimizer used to perform `.step()` call
make_optimizer_step: Whether to force an optimizer step. When nothing is provided,
we will use `accumulate_grad_batches` for accumulation frequency by default.
However, one coud provide True and False based on its own scheduling.
c.f example 2 and 3
optimizer_closure: One could provide its own optimizer_closure. Set to None by default.
args: Any parameters provided to optimizer.step()
kwargs: Any parameters provided to optimizer.step()
Example::
def training_step(...):
(opt_a, opt_b) = self.optimizers()
loss = ...
# automatically applies scaling, etc...
self.manual_backward(loss, opt_a)
# This will use accumulate gradients for `accumulate_grad_batches` batches
# and then run opt_a.step()
self.manual_optimizer_step(opt_a)
Example::
def training_step(self, batch, batch_idx):
# using Boring Model
opt = self.optimizers() # only 1 optimizer
def compute_loss():
x = batch[0]
x = F.dropout(x, 0.1)
predictions = self(x)
predictions = F.dropout(predictions, 0.1)
loss = self.loss(None, predictions)
return loss
def optimizer_closure():
# emulate MC dropout training
num_backward = 1
losses = []
for backward_idx in range(num_backward + 1):
loss = compute_loss()
losses.append(loss)
retain_graph = num_backward!= backward_idx
self.manual_backward(loss, opt, retain_graph=retain_graph)
loss_mean = torch.stack(losses).mean()
loss_std = torch.stack(losses).std()
self.log("train_loss_mean", loss_mean, on_step=True, prog_bar=True, on_epoch=True)
self.log("train_loss_std", loss_std, on_step=True, prog_bar=True, on_epoch=True)
self.manual_optimizer_step(opt, optimizer_closure=optimizer_closure)
Example::
# Scenario for a gan.
def training_step(self, batch, batch_idx, optimizer_idx):
# emulate gans training
opt_gen, opt_dis = self.optimizers()
# Note: Be careful, don't log on the same key in self.log in both closure
# as they will be aggregated together on epoch_end
def gen_closure():
... forward and compute loss for generator
loss_gen = ...
self.log("loss_gen", loss_gen, on_step=True, on_epoch=True)
self.manual_backward(loss_gen, opt_gen)
def dis_closure():
... forward and compute loss for discriminator
loss_dis = ...
self.log("loss_dis", loss_dis, on_step=True, on_epoch=True)
self.manual_backward(loss_dis, opt_dis)
# this will accumulate gradients for 2 batches and then call opt_gen.step()
self.manual_optimizer_step(
opt_gen,
optimizer_closure=gen_closure,
make_optimizer_step=batch_idx % 2 == 0)
# update discriminator every 4 batches
# therefore, no gradient accumulation for discriminator
if batch_idx % 4 == 0 :
# Note: Set make_optimizer_step to True or it will use by default
# Trainer(accumulate_grad_batches=x)
self.manual_optimizer_step(
opt_dis,
optimizer_closure=dis_closure,
make_optimizer_step=True)
"""
# make sure we're using manual opt
self._verify_is_manual_optimization('manual_optimizer_step')
should_make_optimizer_step = not self.trainer.train_loop.should_accumulate()
make_optimizer_step = make_optimizer_step if make_optimizer_step is not None else should_make_optimizer_step
if make_optimizer_step:
# mock closure function as the user is responsible to call `manual_backward`
def do_nothing_optimizer_closure():
return
is_callable = isinstance(optimizer_closure, types.FunctionType)
optimizer_closure = optimizer_closure if is_callable else do_nothing_optimizer_closure
self.trainer.train_loop.optimizer_step(
optimizer,
None,
self.trainer.batch_idx,
optimizer_closure,
*args,
**kwargs,
)
# update will be called after every optimizer_step call
if self.trainer.amp_backend == AMPType.NATIVE:
self.trainer.scaler.update()
# perform zero grad
optimizer.zero_grad()
else:
# make sure to call optimizer_closure when accumulating
if isinstance(optimizer_closure, types.FunctionType):
optimizer_closure()
def backward(self, loss: Tensor, optimizer: Optimizer, optimizer_idx: int, *args, **kwargs) -> None:
"""
Override backward with your own implementation if you need to.
Args:
loss: Loss is already scaled by accumulated grads
optimizer: Current optimizer being used
optimizer_idx: Index of the current optimizer being used
Called to perform backward step.
Feel free to override as needed.
The loss passed in has already been scaled for accumulated gradients if requested.
Example::
def backward(self, loss, optimizer, optimizer_idx):
loss.backward()
"""
if self.trainer.train_loop.automatic_optimization or self._running_manual_backward:
loss.backward(*args, **kwargs)
def toggle_optimizer(self, optimizer: Optimizer, optimizer_idx: int):
"""
Makes sure only the gradients of the current optimizer's parameters are calculated
in the training step to prevent dangling gradients in multiple-optimizer setup.
.. note:: Only called when using multiple optimizers
Override for your own behavior
Args:
optimizer:
optimizer_idx:
"""
for param in self.parameters():
param.requires_grad = False
for group in optimizer.param_groups:
for param in group['params']:
param.requires_grad = True
def optimizer_step(
self,
*args,
epoch: int = None,
batch_idx: int = None,
optimizer: Optimizer = None,
optimizer_idx: int = None,
optimizer_closure: Optional[Callable] = None,
on_tpu: bool = None,
using_native_amp: bool = None,
using_lbfgs: bool = None,
**kwargs,
) -> None:
r"""
Override this method to adjust the default way the
:class:`~pytorch_lightning.trainer.trainer.Trainer` calls each optimizer.
By default, Lightning calls ``step()`` and ``zero_grad()`` as shown in the example
once per optimizer.
.. tip:: Consider using `manual_optimizer_step` instead of overriding this method as done previously.
Warning:
If you are overriding this method, make sure that you pass the ``optimizer_closure`` parameter
to ``optimizer.step()`` function as shown in the examples. This ensures that
``train_step_and_backward_closure`` is called within
:meth:`~pytorch_lightning.trainer.training_loop.TrainLoop.run_training_batch`.
Args:
epoch: Current epoch
batch_idx: Index of current batch
optimizer: A PyTorch optimizer
optimizer_idx: If you used multiple optimizers this indexes into that list.
optimizer_closure: closure for all optimizers
on_tpu: true if TPU backward is required
using_native_amp: True if using native amp
using_lbfgs: True if the matching optimizer is lbfgs
Examples:
.. code-block:: python
# DEFAULT
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
optimizer.step(closure=optimizer_closure)
# Alternating schedule for optimizer steps (i.e.: GANs)
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# update generator opt every 2 steps
if optimizer_idx == 0:
if batch_idx % 2 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# update discriminator opt every 4 steps
if optimizer_idx == 1:
if batch_idx % 4 == 0 :
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
# ...
# add as many optimizers as you want
Here's another example showing how to use this for more advanced things such as
learning rate warm-up:
.. code-block:: python
# learning rate warm-up
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# warm up lr
if self.trainer.global_step < 500:
lr_scale = min(1., float(self.trainer.global_step + 1) / 500.)
for pg in optimizer.param_groups:
pg['lr'] = lr_scale * self.learning_rate
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
Note:
If you also override the :meth:`~pytorch_lightning.core.hooks.ModelHooks.on_before_zero_grad`
model hook don't forget to add the call to it before ``optimizer.zero_grad()`` yourself.
"""
if on_tpu:
xm.optimizer_step(optimizer, optimizer_args={'closure': optimizer_closure, **kwargs})
elif self.trainer.amp_backend == AMPType.NATIVE:
# native amp does not yet support closures.
# TODO: pass the closure to the step ASAP
optimizer_closure()
self.trainer.scaler.step(optimizer)
elif self.trainer.amp_backend == AMPType.APEX:
# apex amp does not yet support closures.
# TODO: pass the closure to the step ASAP
optimizer_closure()
optimizer.step(*args, **kwargs)
else:
optimizer.step(closure=optimizer_closure, *args, **kwargs)
def optimizer_zero_grad(
self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int
):
optimizer.zero_grad()
def tbptt_split_batch(self, batch: Tensor, split_size: int) -> list:
r"""
When using truncated backpropagation through time, each batch must be split along the
time dimension. Lightning handles this by default, but for custom behavior override
this function.
Args:
batch: Current batch
split_size: The size of the split
Return:
List of batch splits. Each split will be passed to :meth:`training_step` to enable truncated
back propagation through time. The default implementation splits root level Tensors and
Sequences at dim=1 (i.e. time dim). It assumes that each time dim is the same length.
Examples:
.. code-block:: python
def tbptt_split_batch(self, batch, split_size):
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t:t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t:t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
Note:
Called in the training loop after
:meth:`~pytorch_lightning.callbacks.base.Callback.on_batch_start`
if :paramref:`~pytorch_lightning.trainer.Trainer.truncated_bptt_steps` > 0.
Each returned batch split is passed separately to :meth:`training_step`.
"""
time_dims = [
len(x[0])
for x in batch
if isinstance(x, (torch.Tensor, collections.Sequence))
]
assert len(time_dims) >= 1, "Unable to determine batch time dimension"
assert all(
x == time_dims[0] for x in time_dims
), "Batch time dimension length is ambiguous"
splits = []
for t in range(0, time_dims[0], split_size):
batch_split = []
for i, x in enumerate(batch):
if isinstance(x, torch.Tensor):
split_x = x[:, t: t + split_size]
elif isinstance(x, collections.Sequence):
split_x = [None] * len(x)
for batch_idx in range(len(x)):
split_x[batch_idx] = x[batch_idx][t: t + split_size]
batch_split.append(split_x)
splits.append(batch_split)
return splits
def summarize(self, mode: str = ModelSummary.MODE_DEFAULT) -> ModelSummary:
model_summary = ModelSummary(self, mode=mode)
log.info("\n" + str(model_summary))
return model_summary
def freeze(self) -> None:
r"""
Freeze all params for inference.
Example:
.. code-block:: python
model = MyLightningModule(...)
model.freeze()
"""
for param in self.parameters():
param.requires_grad = False
self.eval()
def unfreeze(self) -> None:
"""
Unfreeze all parameters for training.
.. code-block:: python
model = MyLightningModule(...)
model.unfreeze()
"""
for param in self.parameters():
param.requires_grad = True
self.train()
def get_progress_bar_dict(self) -> Dict[str, Union[int, str]]:
r"""
Implement this to override the default items displayed in the progress bar.
By default it includes the average loss value, split index of BPTT (if used)
and the version of the experiment when using a logger.
.. code-block::
Epoch 1: 4%|▎ | 40/1095 [00:03<01:37, 10.84it/s, loss=4.501, v_num=10]
Here is an example how to override the defaults:
.. code-block:: python
def get_progress_bar_dict(self):
# don't show the version number
items = super().get_progress_bar_dict()
items.pop("v_num", None)
return items
Return:
Dictionary with the items to be displayed in the progress bar.
"""
# call .item() only once but store elements without graphs
running_train_loss = self.trainer.train_loop.running_loss.mean()
avg_training_loss = (
running_train_loss.cpu().item()
if running_train_loss is not None
else float("NaN")
)
tqdm_dict = {"loss": "{:.3f}".format(avg_training_loss)}
if self.trainer.truncated_bptt_steps is not None:
tqdm_dict["split_idx"] = self.trainer.split_idx
if self.trainer.logger is not None and self.trainer.logger.version is not None:
version = self.trainer.logger.version
# show last 4 places of long version strings
version = version[-4:] if isinstance(version, str) else version
tqdm_dict["v_num"] = version
return tqdm_dict
def _verify_is_manual_optimization(self, fn_name):
if self.trainer.train_loop.automatic_optimization:
m = f'to use {fn_name}, please disable automatic optimization: Trainer(automatic_optimization=False)'
raise MisconfigurationException(m)
@classmethod
def _auto_collect_arguments(cls, frame=None) -> Tuple[Dict, Dict]:
"""
Collect all module arguments in the current constructor and all child constructors.
The child constructors are all the ``__init__`` methods that reach the current class through
(chained) ``super().__init__()`` calls.
Args:
frame: instance frame
Returns:
self_arguments: arguments dictionary of the first instance
parents_arguments: arguments dictionary of the parent's instances
"""
if not frame:
frame = inspect.currentframe()
frame_args = collect_init_args(frame.f_back, [])
self_arguments = frame_args[-1]
# set hyper_parameters in child
self_arguments = self_arguments
parents_arguments = {}
# add all arguments from parents
for args in frame_args[:-1]:
parents_arguments.update(args)
return self_arguments, parents_arguments
def save_hyperparameters(self, *args, frame=None) -> None:
"""Save all model arguments.
Args:
args: single object of `dict`, `NameSpace` or `OmegaConf`
or string names or argumenst from class `__init__`
>>> from collections import OrderedDict
>>> class ManuallyArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # manually assign arguments
... self.save_hyperparameters('arg1', 'arg3')
... def forward(self, *args, **kwargs):
... ...
>>> model = ManuallyArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg3": 3.14
>>> class AutomaticArgsModel(LightningModule):
... def __init__(self, arg1, arg2, arg3):
... super().__init__()
... # equivalent automatic
... self.save_hyperparameters()
... def forward(self, *args, **kwargs):
... ...
>>> model = AutomaticArgsModel(1, 'abc', 3.14)
>>> model.hparams
"arg1": 1
"arg2": abc
"arg3": 3.14
>>> class SingleArgModel(LightningModule):
... def __init__(self, params):
... super().__init__()
... # manually assign single argument
... self.save_hyperparameters(params)
... def forward(self, *args, **kwargs):
... ...
>>> model = SingleArgModel(Namespace(p1=1, p2='abc', p3=3.14))
>>> model.hparams
"p1": 1
"p2": abc
"p3": 3.14
"""
if not frame:
frame = inspect.currentframe().f_back
init_args = get_init_args(frame)
assert init_args, "failed to inspect the self init"
if not args:
# take all arguments
hp = init_args
self._hparams_name = "kwargs" if hp else None
else:
# take only listed arguments in `save_hparams`
isx_non_str = [i for i, arg in enumerate(args) if not isinstance(arg, str)]
if len(isx_non_str) == 1:
hp = args[isx_non_str[0]]
cand_names = [k for k, v in init_args.items() if v == hp]
self._hparams_name = cand_names[0] if cand_names else None
else:
hp = {arg: init_args[arg] for arg in args if isinstance(arg, str)}
self._hparams_name = "kwargs"
# `hparams` are expected here
if hp:
self._set_hparams(hp)
# make deep copy so there is not other runtime changes reflected
self._hparams_initial = copy.deepcopy(self._hparams)
def _set_hparams(self, hp: Union[dict, Namespace, str]) -> None:
if isinstance(hp, Namespace):
hp = vars(hp)
if isinstance(hp, dict):
hp = AttributeDict(hp)
elif isinstance(hp, PRIMITIVE_TYPES):
raise ValueError(f"Primitives {PRIMITIVE_TYPES} are not allowed.")
elif not isinstance(hp, ALLOWED_CONFIG_TYPES):
raise ValueError(f"Unsupported config type of {type(hp)}.")
if isinstance(hp, dict) and isinstance(self.hparams, dict):
self.hparams.update(hp)
else:
self._hparams = hp
def to_onnx(self, file_path: str, input_sample: Optional[Tensor] = None, **kwargs):
"""Saves the model in ONNX format
Args:
file_path: The path of the file the model should be saved to.
input_sample: A sample of an input tensor for tracing.
**kwargs: Will be passed to torch.onnx.export function.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
>>> with tempfile.NamedTemporaryFile(suffix='.onnx', delete=False) as tmpfile:
... model = SimpleModel()
... input_sample = torch.randn((1, 64))
... model.to_onnx(tmpfile.name, input_sample, export_params=True)
... os.path.isfile(tmpfile.name)
True
"""
if isinstance(input_sample, Tensor):
input_data = input_sample
elif self.example_input_array is not None:
input_data = self.example_input_array
else:
if input_sample is not None:
raise ValueError(
f"Received `input_sample` of type {type(input_sample)}. Expected type is `Tensor`"
)
else:
raise ValueError(
"Could not export to ONNX since neither `input_sample` nor"
" `model.example_input_array` attribute is set."
)
input_data = input_data.to(self.device)
if "example_outputs" not in kwargs:
self.eval()
with torch.no_grad():
kwargs["example_outputs"] = self(input_data)
torch.onnx.export(self, input_data, file_path, **kwargs)
def to_torchscript(
self, file_path: Optional[str] = None, method: Optional[str] = 'script',
example_inputs: Optional[Union[torch.Tensor, Tuple[torch.Tensor]]] = None, **kwargs
) -> Union[ScriptModule, Dict[str, ScriptModule]]:
"""
By default compiles the whole model to a :class:`~torch.jit.ScriptModule`.
If you want to use tracing, please provided the argument `method='trace'` and make sure that either the
example_inputs argument is provided, or the model has self.example_input_array set.
If you would like to customize the modules that are scripted you should override this method.
In case you want to return multiple modules, we recommend using a dictionary.
Args:
file_path: Path where to save the torchscript. Default: None (no file saved).
method: Whether to use TorchScript's script or trace method. Default: 'script'
example_inputs: Tensor to be used to do tracing when method is set to 'trace'.
Default: None (Use self.example_input_array)
**kwargs: Additional arguments that will be passed to the :func:`torch.jit.script` or
:func:`torch.jit.trace` function.
Note:
- Requires the implementation of the
:meth:`~pytorch_lightning.core.lightning.LightningModule.forward` method.
- The exported script will be set to evaluation mode.
- It is recommended that you install the latest supported version of PyTorch
to use this feature without limitations. See also the :mod:`torch.jit`
documentation for supported features.
Example:
>>> class SimpleModel(LightningModule):
... def __init__(self):
... super().__init__()
... self.l1 = torch.nn.Linear(in_features=64, out_features=4)
...
... def forward(self, x):
... return torch.relu(self.l1(x.view(x.size(0), -1)))
...
>>> model = SimpleModel()
>>> torch.jit.save(model.to_torchscript(), "model.pt") # doctest: +SKIP
>>> os.path.isfile("model.pt") # doctest: +SKIP
>>> torch.jit.save(model.to_torchscript(file_path="model_trace.pt", method='trace', # doctest: +SKIP
... example_inputs=torch.randn(1, 64))) # doctest: +SKIP
>>> os.path.isfile("model_trace.pt") # doctest: +SKIP
True
Return:
This LightningModule as a torchscript, regardless of whether file_path is
defined or not.
"""
mode = self.training
with torch.no_grad():
if method == 'script':
torchscript_module = torch.jit.script(self.eval(), **kwargs)
elif method == 'trace':
# if no example inputs are provided, try to see if model has example_input_array set
if example_inputs is None:
example_inputs = self.example_input_array
# automatically send example inputs to the right device and use trace
example_inputs = self.transfer_batch_to_device(example_inputs, device=self.device)
torchscript_module = torch.jit.trace(func=self.eval(), example_inputs=example_inputs, **kwargs)
else:
raise ValueError(f"The 'method' parameter only supports 'script' or 'trace', but value given was:"
f"{method}")
self.train(mode)
if file_path is not None:
torch.jit.save(torchscript_module, file_path)
return torchscript_module
@property
def hparams(self) -> Union[AttributeDict, dict, Namespace]:
if not hasattr(self, "_hparams"):
self._hparams = AttributeDict()
return self._hparams
@property
def hparams_initial(self) -> AttributeDict:
if not hasattr(self, "_hparams_initial"):
return AttributeDict()
# prevent any change
return copy.deepcopy(self._hparams_initial)
@hparams.setter
def hparams(self, hp: Union[dict, Namespace, Any]):
hparams_assignment_name = self.__get_hparams_assignment_variable()
self._hparams_name = hparams_assignment_name
self._set_hparams(hp)
# this resolves case when user does not uses `save_hyperparameters` and do hard assignement in init
if not hasattr(self, "_hparams_initial"):
self._hparams_initial = copy.deepcopy(self._hparams)
def __get_hparams_assignment_variable(self):
""""""
"""
looks at the code of the class to figure out what the user named self.hparams
this only happens when the user explicitly sets self.hparams
"""
try:
class_code = inspect.getsource(self.__class__)
lines = class_code.split("\n")
for line in lines:
line = re.sub(r"\s+", "", line, flags=re.UNICODE)
if ".hparams=" in line:
return line.split("=")[1]
except Exception as e:
return "hparams"
return None
| 38.744948
| 119
| 0.578289
|
acfdc263394d91adfc12ad45b583ea0b56c93451
| 2,530
|
py
|
Python
|
lib/sedna/common/utils.py
|
lidongen/sedna
|
fe54975c435e7c5a211f7d5960489d1d7f5a19ff
|
[
"Apache-2.0"
] | 1
|
2021-06-19T10:19:28.000Z
|
2021-06-19T10:19:28.000Z
|
lib/sedna/common/utils.py
|
TymonXie/sedna
|
ee71aedec864146dd245af740a8496c3d57ef758
|
[
"Apache-2.0"
] | null | null | null |
lib/sedna/common/utils.py
|
TymonXie/sedna
|
ee71aedec864146dd245af740a8496c3d57ef758
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import codecs
import logging
import os
import pickle
import shutil
LOG = logging.getLogger(__name__)
def clean_folder(folder):
if not os.path.exists(folder):
LOG.info(f"folder={folder} is not exist.")
else:
LOG.info(f"clean target dir, dir={folder}")
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
try:
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
except Exception as e:
LOG.error('Failed to delete %s. Reason: %s' % (file_path, e))
def remove_path_prefix(org_str: str, prefix: str):
"""remove the prefix, for converting path in container to path in host."""
p = prefix[:-1] if prefix.endswith('/') else prefix
if org_str.startswith(p):
out_str = org_str.replace(p, '', 1)
return out_str
else:
LOG.info(f"remove prefix failed, original str={org_str}, "
f"prefix={prefix}")
return org_str
def obj_to_pickle_string(x):
return codecs.encode(pickle.dumps(x), "base64").decode()
def pickle_string_to_obj(s):
return pickle.loads(codecs.decode(s.encode(), "base64"))
def model_layer_flatten(weights):
"""like this:
weights.shape=[(3, 3, 3, 64), (64,), (3, 3, 64, 32), (32,), (6272, 64),
(64,), (64, 32), (32,), (32, 2), (2,)]
flatten_weights=[(1728,), (64,), (18432,), (32,), (401408,), (64,),
(2048,), (32,), (64,), (2,)]
:param weights:
:return:
"""
flatten = [layer.reshape((-1)) for layer in weights]
return flatten
def model_layer_reshape(flatten_weights, shapes):
shaped_model = []
for idx, flatten_layer in enumerate(flatten_weights):
shaped_model.append(flatten_layer.reshape(shapes[idx]))
return shaped_model
| 32.435897
| 78
| 0.640711
|
acfdc428154a34ef537c3b24a5a9fac6639ba791
| 408
|
py
|
Python
|
packages/migrations/0003_auto_20210416_1007.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | 1
|
2021-02-11T22:16:51.000Z
|
2021-02-11T22:16:51.000Z
|
packages/migrations/0003_auto_20210416_1007.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | 54
|
2021-02-11T18:52:11.000Z
|
2021-06-13T13:45:01.000Z
|
packages/migrations/0003_auto_20210416_1007.py
|
dandeduck/package-tracking-web
|
f7cb3dffd6f7f6b7ced5b1106a049c79c192dfa5
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.12 on 2021-04-16 10:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('packages', '0002_auto_20210321_0949'),
]
operations = [
migrations.AlterField(
model_name='address',
name='street_number',
field=models.PositiveSmallIntegerField(null=True),
),
]
| 21.473684
| 62
| 0.620098
|
acfdc44d2168dcc389d35cb26ec15afdfaf9f370
| 10,050
|
py
|
Python
|
ensemble/control/tactical/gapcordinator.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
ensemble/control/tactical/gapcordinator.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
ensemble/control/tactical/gapcordinator.py
|
licit-lab/ensemble
|
7a78ef0d69610d4fcfc5e008f931ade15e35acbf
|
[
"Linux-OpenIB"
] | null | null | null |
"""
**Platoon Gap Coordinator**
This module details the implementation of the ``Front Gap`` and ``Rear Gap`` Coordinators existing in each one of the vehicles created when running a platoon. The coordinators have access to a centralized information center called ``Data Query`` to retrieve information in the vecinity of the vehicle.
"""
# ============================================================================
# STANDARD IMPORTS
# ============================================================================
from typing import Iterable
import pandas as pd
import networkx as nx
from itertools import groupby
from dataclasses import dataclass, asdict
from itertools import chain
# ============================================================================
# INTERNAL IMPORTS
# ============================================================================
from ensemble.component.vehiclelist import EMPTY_MESSAGE, VehicleList
from ensemble.logic.platoon_set import PlatoonSet
from ensemble.logic.subscriber import Subscriber
from ensemble.control.tactical.vehcoordinator import (
VehGapCoordinator,
MAXNDST,
PLT_TYP,
)
from ensemble.metaclass.controller import AbsController
from ensemble.tools.screen import log_in_terminal
# ============================================================================
# CLASS AND DEFINITIONS
# ============================================================================
EMPTY_MESSAGE = "\tNo platoons have been registered"
@dataclass
class GlobalGapCoordinator(Subscriber):
def __init__(self, vehicle_registry: VehicleList):
self._gcnet = nx.DiGraph()
super().__init__(vehicle_registry)
self.platoon_sets = {}
self.free_gcs = []
self.update_platoons()
# =========================================================================
# PROTOCOLS
# =========================================================================
def __hash__(self):
return hash(self._publisher)
def __getitem__(self, index):
result = self._gcnet.nodes()[index].get("vgc")
return result
def pandas_print(self, columns: Iterable = []) -> pd.DataFrame:
"""Transforms vehicle list into a pandas for rendering purposes
Returns:
df (DataFrame): Returns a table with pandas data.
"""
veh_data = []
for _, vgc in self._gcnet.nodes(data=True):
data = vgc.get("vgc")
d = asdict(data)
d = dict(d, **asdict(data.ego))
d["platoonid"] = data.platoonid
d["distance"] = data.ego.distance
veh_data.append(d)
df = pd.DataFrame(veh_data)
if columns and not df.empty:
df = df[columns]
return df.set_index(["platoonid", "vehid"]) if not df.empty else df
def pretty_print(self, columns: list = []) -> str:
"""Summary of info"""
df = self.pandas_print(["platoonid", "vehid"] + columns)
return EMPTY_MESSAGE if df.empty else str(df)
def __str__(self):
if self._gcnet is None:
return EMPTY_MESSAGE
return str(self.pandas_print())
def __repr__(self):
if self._gcnet is None:
return EMPTY_MESSAGE
return repr(self.pandas_print())
def __len__(self):
if self._gcnet is None:
return 0
return len(self._gcnet.nodes)
# =========================================================================
# METHODS
# =========================================================================
def update(self):
"""Follower method to add/release vehicle gapcoordinator"""
self.add_vehicle_gcs()
self.release_vehicle_gcs()
self.update_leaders()
def add_vehicle_gcs(self):
"""Add all gap coordinators w.r.t publisher"""
for veh, _ in self._publisher.iterate_links_distances():
vgc = VehGapCoordinator(veh)
self.add_gapcoordinator(vgc)
def release_vehicle_gcs(self):
"""Releases all gap coordinators w.r.t publihser"""
for vgc in self.iter_group_link(downtoup=True, group=True):
if (
vgc.ego.vehid
# not in self._publisher._request.get_vehicles_property("vehid")
not in [v.vehid for v in self._publisher]
):
self.release_gapcoordinator(vgc)
def vgcs(self):
"Existing vehicle gap coordinators"
return iter(
map(lambda x: x[1].get("vgc"), self._gcnet.nodes(data=True))
)
def add_gapcoordinator(self, vgc: VehGapCoordinator):
"""Adds a single gap coordinator to the list"""
if vgc not in self.vgcs() and vgc.ego.vehtype in PLT_TYP:
self._gcnet.add_node(vgc.ego.vehid, vgc=vgc)
self[vgc.ego.vehid].init_reference()
self.update_leader(vgc)
def release_gapcoordinator(self, vgc: VehGapCoordinator):
"""Releases a single gap coordinator from the node list"""
self._gcnet.remove_node(vgc.ego.vehid)
self.free_gcs.append(vgc)
def update_leader(self, vgc: VehGapCoordinator):
"""Add or creates leader for a specific gap coordinator"""
leader = self._publisher.get_leader(vgc.ego, distance=MAXNDST)
if (
leader is not None
and leader.vehtype in PLT_TYP
and vgc.ego.vehtype in PLT_TYP
):
self._gcnet.add_edge(vgc.ego.vehid, leader.vehid)
self[vgc.ego.vehid].leader = self[leader.vehid]
self[vgc.ego.vehid].leader_data = {"id": leader.vehid}
def update_leaders(self):
"""Updates leaders for all gap coordinators"""
for vgc in self.iter_group_link(downtoup=True, group=True):
self.update_leader(vgc)
def update_states(self):
"""Update platoon state according to current information"""
for vgc in self.iter_group_link(downtoup=True, group=True):
vgc.status = vgc.solve_state()
def iter_group_link(self, downtoup=True, group=False):
"""Iteratorator by link ordered from largest ttd towards smaller
Args:
downtoup (bool, optional): Downstream to upstream. Defaults to True.
group (bool, optional): Returns without grouping per platoon. Defaults to False.
Yields:
vgc (VehicleGapCoordinator): Vehicle gap coordinator or iterable.
"""
vtf = lambda x: x[1].get("vgc").ego.link
vgcs = sorted(
self._gcnet.nodes(data=True),
key=lambda x: x[1].get("vgc").ego.ttd,
reverse=downtoup,
)
for _, group_gc in groupby(vgcs, vtf):
if group:
for _, gc in group_gc:
yield gc.get("vgc")
else:
yield group_gc
def create_platoon_sets(self):
"""Create all platoons subsets"""
converter = lambda x: x[1].get("vgc")
for vgc in self.iter_group_link(downtoup=True, group=True):
if not vgc.platoon:
if vgc.leader.ego == vgc.ego or vgc.ego in PLT_TYP:
# Head
ps = PlatoonSet((vgc,))
self.platoon_sets[ps.platoonid] = ps
vgc.positionid = len(ps) - 1
else:
# Try join from behind
# Retrieve id of leader
lps = self.platoon_sets[vgc.leader.platoonid]
nwps = PlatoonSet((vgc,))
jps = lps + nwps
if isinstance(jps, tuple):
# This means back was refused
self.platoon_sets[jps[1].platoonid] = jps[1]
vgc.positionid = len(jps[1]) - 1
else:
self.platoon_sets[vgc.leader.platoonid] = jps
PlatoonSet.set_pid(
nwps.platoonid
) # Retrieves former id
vgc.positionid = len(jps) - 1
vgc.platoon = True
def update_platoons(self):
"""First iteration to fill the platoon registry based on the current
vehicle information.
"""
# The main idea to update the platoon_registry is the following:
# 1. Once the vehicle registry is updated, via a dispatch may update
# the list of gap coordinators.
# 2. When entering here gap coordinators should be available.
# 3. W
# 2. Merge gap coordinators:
# 2a. Iterate over gc per link
# 2b. Iterate from upstream towards downstream on gc (small with largest ttd)
# 2c. Consider the gc on the current link
# 2d. For ech gc find it's leader.
# 2d1. Create a platoon set for the vehicle with less ttd
# 2d1. Is my leader joinable?
# yes -> join current platoon set with my leader
# no -> return
self.update()
# Gap Coord (gc) Group by link (Vehicle in same link)
self.create_platoon_sets()
self.update_states()
@property
def nplatoons(self) -> int:
"""Return the number of created platoons"""
return len(self.platoon_sets.keys())
@property
def cacc(self):
"""Returns the operational controller object"""
return self._cacc
@cacc.setter
def cacc(self, control: AbsController):
"""A function just to attach the control of the system to the layer and initialize the references
Args:
control (AbsController): Callable, operational controller
"""
self._cacc = control
def apply_cacc(self, time: float):
"""This method intends to apply the cacc over all vehicles within the platoon at specific time step"""
for vgc in self.iter_group_link(downtoup=True, group=True):
vgc.evolve_control(self.cacc, time)
| 36.948529
| 305
| 0.553433
|
acfdc587e4eddd2a0926bb0d96aeecb97613a28b
| 2,538
|
py
|
Python
|
mypy_boto3_builder/cli_parser.py
|
pyto86pri/mypy_boto3_builder
|
e8132dc4632430e0abd4cd330af51a8b1c82028f
|
[
"MIT"
] | null | null | null |
mypy_boto3_builder/cli_parser.py
|
pyto86pri/mypy_boto3_builder
|
e8132dc4632430e0abd4cd330af51a8b1c82028f
|
[
"MIT"
] | null | null | null |
mypy_boto3_builder/cli_parser.py
|
pyto86pri/mypy_boto3_builder
|
e8132dc4632430e0abd4cd330af51a8b1c82028f
|
[
"MIT"
] | null | null | null |
"""
CLI parser.
"""
import argparse
from pathlib import Path
from typing import Sequence
import pkg_resources
from mypy_boto3_builder.service_name import ServiceName, ServiceNameCatalog
def get_absolute_path(path: str) -> Path:
"""
Get absolute path from a string.
Arguments:
path -- String containing path.
Returns:
Absolute path.
"""
return Path(path).absolute()
def get_service_name(name: str) -> ServiceName:
"""
Convert boto3 service name to ServiceName.
Arguments:
name -- Service name.
Raises:
argparse.ArgumentTypeError -- If service not found.
"""
try:
return ServiceNameCatalog.find(name)
except ValueError:
pass
return ServiceNameCatalog.create(name)
def parse_args(args: Sequence[str]) -> argparse.Namespace:
"""
Main CLI parser for builder.
Returns:
Argument parser.
"""
try:
version = pkg_resources.get_distribution("mypy-boto3-builder").version
except pkg_resources.DistributionNotFound:
version = "0.0.0"
parser = argparse.ArgumentParser("mypy_boto3_builder", description="Builder for mypy-boto3.")
parser.add_argument("-d", "--debug", action="store_true", help="Show debug messages")
parser.add_argument(
"-b",
"--build-version",
help="Set custom output version, otherwise boto3 version is used.",
)
parser.add_argument("-v", "--version", action="version", version=version)
parser.add_argument(
"--skip-master",
action="store_true",
help="Whether to skip master and stubs modules",
)
parser.add_argument(
"--skip-services", action="store_true", help="Whether to skip service modules"
)
parser.add_argument(
"--panic",
action="store_true",
help="Raise exception on logger warning and above",
)
parser.add_argument(
"output_path", metavar="OUTPUT_PATH", help="Output path", type=get_absolute_path
)
parser.add_argument(
"-s",
"--services",
dest="service_names",
nargs="*",
metavar="SERVICE_NAME",
help="List of AWS services, by default all services are used",
type=get_service_name,
default=[],
)
parser.add_argument(
"--installed",
action="store_true",
help="Generate already installed packages for typings folder.",
)
result = parser.parse_args(args)
result.builder_version = version
return result
| 25.897959
| 97
| 0.638298
|
acfdc5c507065fb72c0ddd5e8ac1ad81c3dee4d2
| 3,573
|
py
|
Python
|
get_pet_labels.py
|
embeaver/Dog-breed-classifier
|
954c6394d135e54c91c204669bb23da9383185cf
|
[
"MIT"
] | null | null | null |
get_pet_labels.py
|
embeaver/Dog-breed-classifier
|
954c6394d135e54c91c204669bb23da9383185cf
|
[
"MIT"
] | null | null | null |
get_pet_labels.py
|
embeaver/Dog-breed-classifier
|
954c6394d135e54c91c204669bb23da9383185cf
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/get_pet_labels.py
#
# PROGRAMMER: Erika Beaver
# DATE CREATED: 3/11/19
# REVISED DATE: 3/31/19
# PURPOSE: Create the function get_pet_labels that creates the pet labels from
# the image's filename. This function inputs:
# - The Image Folder as image_dir within get_pet_labels function and
# as in_arg.dir for the function call within the main function.
# This function creates and returns the results dictionary as results_dic
# within get_pet_labels function and as results within main.
# The results_dic dictionary has a 'key' that's the image filename and
# a 'value' that's a list. This list will contain the following item
# at index 0 : pet image label (string).
#
##
# Imports python modules
from os import listdir
# TODO 2: Define get_pet_labels function below please be certain to replace None
# in the return statement with results_dic dictionary that you create
# with this function
#
def get_pet_labels(image_dir):
"""
Creates a dictionary of pet labels (results_dic) based upon the filenames
of the image files. These pet image labels are used to check the accuracy
of the labels that are returned by the classifier function, since the
filenames of the images contain the true identity of the pet in the image.
Be sure to format the pet labels so that they are in all lower case letters
and with leading and trailing whitespace characters stripped from them.
(ex. filename = 'Boston_terrier_02259.jpg' Pet label = 'boston terrier')
Parameters:
image_dir - The (full) path to the folder of images that are to be
classified by the classifier function (string)
Returns:
results_dic - Dictionary with 'key' as image filename and 'value' as a
List. The list contains for following item:
index 0 = pet image label (string)
"""
filename_list = listdir('pet_images/')
# Create emtpy dictionay
results_dic = dict()
items_in_dic = len(results_dic)
print('\nEmpty Dictionary results_dic - n items = ', items_in_dic)
# Adds new key-value pairs to dictionary ONLY when key doesnt already exist.
# a list that contains only one item - the pet image label
for idx in range(0, len(filename_list), 1):
if filename_list[idx][0] !='.':
pet_label = ''
word_list_pet_image = filename_list[idx].lower().split('_')
#print('word list pet image = ', word_list_pet_image)
# check if word in word_list_pet_image only contains alphabetic characters and strip the word
for word in word_list_pet_image:
if word.isalpha():
pet_label = pet_label + " " + word
pet_label = pet_label.strip()
# Adds pet_label and filename to results dictionary only if filename doesn't already exsist
if filename_list[idx] not in results_dic:
results_dic[filename_list[idx]] = [pet_label]
else:
print("** Warning: Key= ", filename_list[idx],
"already exists in results_dic")
#print('\n Dictionary results results_dic items = ', len(results_dic))
#print("\nReults_dictionary: ", results_dic, "\n")
return results_dic
| 47.64
| 105
| 0.642317
|
acfdc6b34873698aecc844a26aa9a88950865231
| 2,668
|
py
|
Python
|
chastewebservice.py
|
ModellingWebLab/fc-runner
|
24daeaf10ad8afc77c2d17606b7076317be94b0e
|
[
"BSD-3-Clause"
] | null | null | null |
chastewebservice.py
|
ModellingWebLab/fc-runner
|
24daeaf10ad8afc77c2d17606b7076317be94b0e
|
[
"BSD-3-Clause"
] | 6
|
2019-05-24T11:23:38.000Z
|
2020-10-06T09:45:41.000Z
|
chastewebservice.py
|
ModellingWebLab/fc-runner
|
24daeaf10ad8afc77c2d17606b7076317be94b0e
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import cgi
import cgitb
import os
import sys
import fcws
temporaryDir = fcws.config['temp_dir']
debugPrefix = fcws.config['debug_log_file_prefix']
cgitb.enable(format='text', context=1, logdir=os.path.join(temporaryDir, debugPrefix + 'cgitb'))
def SendError(msg):
print("Content-Type: text/html\n\n")
print("<html><head><title>ChastePermissionError</title></head><body>%s</body></html>" % msg)
sys.exit(0)
# Parse sent objects
form = cgi.FieldStorage()
if 'password' not in form or form['password'].value != fcws.config['password']:
SendError("Missing or incorrect password supplied.")
if 'cancelTask' in form:
# Special action: cancel or revoke an experiment
print("Content-Type: text/plain\n\n")
fcws.CancelExperiment(form['cancelTask'].value)
elif 'getModelInterface' in form:
# Special action: get the ontology interface for a model
for field in ['callBack', 'signature']:
if field not in form:
SendError("Missing required field.")
print("Content-Type: text/plain\n\n")
fcws.GetModelInterface(
form['callBack'].value, form['signature'].value, form['GetModelInterface'].value)
elif 'getProtoInterface' in form:
# Special action: get the ontology interface for a protocol
for field in ['callBack', 'signature']:
if field not in form:
SendError("Missing required field.")
print("Content-Type: text/plain\n\n")
fcws.GetProtocolInterface(
form['callBack'].value, form['signature'].value, form['getProtoInterface'].value)
else:
# Standard action: schedule experiment
for field in ['callBack', 'signature', 'model', 'protocol', 'user', 'isAdmin']:
if field not in form:
SendError("Missing required field.")
print("Content-Type: text/plain\n\n")
signature = form["signature"].value
# Wrap the rest in a try so we alert the caller properly if an exception occurs
try:
callBack = form["callBack"].value
modelUrl = form["model"].value
protocolUrl = form["protocol"].value
args = (callBack, signature, modelUrl, protocolUrl)
kwargs = {
'user': form['user'].value,
'isAdmin': (form['isAdmin'].value == 'true'),
}
if 'dataset' in form and 'fittingSpec' in form:
kwargs['datasetUrl'] = form['dataset'].value
kwargs['fittingSpecUrl'] = form['fittingSpec'].value
fcws.ScheduleExperiment(*args, **kwargs)
except Exception as e:
print(signature.value, "failed due to unexpected error:", e, "<br/>")
print("Full internal details follow:<br/>")
raise
| 35.573333
| 96
| 0.654798
|
acfdc78df7242d3584becdd2c05b1cc4d5e49461
| 6,087
|
py
|
Python
|
hw2/test/test.py
|
idoleat/P-Language-Compiler-CourseProject
|
57db735b349a0a3a30d78b927953e2d44b7c7d53
|
[
"MIT"
] | 7
|
2020-09-10T16:54:49.000Z
|
2022-03-15T12:39:23.000Z
|
hw2/test/test.py
|
idoleat/simple-P-compiler
|
57db735b349a0a3a30d78b927953e2d44b7c7d53
|
[
"MIT"
] | null | null | null |
hw2/test/test.py
|
idoleat/simple-P-compiler
|
57db735b349a0a3a30d78b927953e2d44b7c7d53
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
import subprocess
import os
import sys
import json
from argparse import ArgumentParser
class Grader:
basic_case_dir = "./basic_cases"
basic_cases = {
1 : "decl",
2 : "expr1",
3 : "expr2",
4 : "expr3",
5: "function1",
6: "function2",
7: "relation",
8: "simple",
9: "statement",
10: "whilefor1",
11: "whilefor2"
}
basic_case_scores = [0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
advance_case_dir = "./advance_cases"
advance_cases = {
1 : "arrayErr",
2 : "assignErr",
3 : "compoundErr",
4 : "conditionErr",
5 : "declErr",
6: "funcErr",
7: "general1",
8: "general2",
9: "general3",
10: "general4",
11: "general5",
12: "parentheses",
13: "syntacticErr",
14: "whileErr"
}
advance_case_scores = [0, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4]
diff_result = ""
def __init__(self, parser):
self.parser = parser
self.output_dir = "result"
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def get_case_id_list(self, basic_id, advance_id):
if basic_id == 0:
self.basic_id_list = self.basic_cases.keys()
else:
if not basic_id in self.basic_cases:
print("ERROR: Invalid basic case ID %d" % basic_id)
exit(1)
self.basic_id_list = [basic_id]
if advance_id == 0:
self.advance_id_list = self.advance_cases.keys()
else:
if not advance_id in self.advance_cases:
print("ERROR: Invalid advance case ID %d" % advance_id)
exit(1)
self.advance_id_list = [advance_id]
def gen_output(self, case_type, case_id):
if case_type == "basic":
test_case = "%s/%s/%s.p" % (self.basic_case_dir, "test_cases", self.basic_cases[case_id])
output_file = "%s/%s" % (self.output_dir, self.basic_cases[case_id])
elif case_type == "advance":
test_case = "%s/%s/%s.p" % (self.advance_case_dir, "test_cases", self.advance_cases[case_id])
output_file = "%s/%s" % (self.output_dir, self.advance_cases[case_id])
clist = [self.parser, test_case]
cmd = " ".join(clist)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
except Exception as e:
print(Colors.RED + "Call of '%s' failed: %s" % (" ".join(clist), e))
exit(1)
stdout = str(proc.stdout.read(), "utf-8")
stderr = str(proc.stderr.read(), "utf-8")
retcode = proc.wait()
with open(output_file, "w") as out:
out.write(stdout)
out.write(stderr)
def test_sample_case(self, case_type, case_id):
self.gen_output(case_type, case_id)
if case_type == "basic":
output_file = "%s/%s" % (self.output_dir, self.basic_cases[case_id])
solution = "%s/%s/%s" % (self.basic_case_dir, "sample_solutions", self.basic_cases[case_id])
elif case_type == "advance":
output_file = "%s/%s" % (self.output_dir, self.advance_cases[case_id])
solution = "%s/%s/%s" % (self.advance_case_dir, "sample_solutions", self.advance_cases[case_id])
clist = ["diff", "-u", output_file, solution, f'--label="your output:({output_file})"', f'--label="answer:({solution})"']
cmd = " ".join(clist)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
except Exception as e:
print("Call of '%s' failed: %s" % (cmd, e))
return False
output = str(proc.stdout.read(), "utf-8")
retcode = proc.wait()
if retcode != 0:
if case_type == "basic":
self.diff_result += "{}\n".format(self.basic_cases[case_id])
elif case_type == "advance":
self.diff_result += "{}\n".format(self.advance_cases[case_id])
self.diff_result += "{}\n".format(output)
return retcode == 0
def run(self):
print("---\tCase\t\tPoints")
total_score = 0
max_score = 0
diff = open("{}/{}".format(self.output_dir, "diff.txt"), 'w')
self.diff_result = ""
for b_id in self.basic_id_list:
c_name = self.basic_cases[b_id]
print("+++ TESTING basic case %s:" % c_name)
ok = self.test_sample_case("basic", b_id)
max_val = self.basic_case_scores[b_id]
get_val = max_val if ok else 0
print("---\t%s\t%d/%d" % (c_name, get_val, max_val))
total_score += get_val
max_score += max_val
for a_id in self.advance_id_list:
c_name = self.advance_cases[a_id]
print("+++ TESTING advance case %s:" % c_name)
ok = self.test_sample_case("advance", a_id)
max_val = self.advance_case_scores[a_id]
get_val = max_val if ok else 0
print("---\t%s\t%d/%d" % (c_name, get_val, max_val))
total_score += get_val
max_score += max_val
print("---\tTOTAL\t\t%d/%d" % (total_score, max_score))
with open("{}/{}".format(self.output_dir, "score.txt"), "w") as result:
result.write("---\tTOTAL\t\t%d/%d" % (total_score, max_score))
diff.write(self.diff_result)
diff.close()
def main():
parser = ArgumentParser()
parser.add_argument("--parser", help="parser to test", default="../src/parser"
)
parser.add_argument("--basic_case_id", help="basic case's ID", type=int, default=
0)
parser.add_argument("--advance_case_id", help="advance case's ID", type=int, default=0)
args = parser.parse_args()
g = Grader(parser = args.parser)
g.get_case_id_list(args.basic_case_id, args.advance_case_id)
g.run()
if __name__ == "__main__":
main()
| 34.005587
| 129
| 0.555282
|
acfdc78e6e550872e9731dc2b66887569da15f1e
| 5,008
|
py
|
Python
|
examples/stats.py
|
eduardomelgar/Adafruit_Python_SSD1306
|
a435263e26e4a69533347ed4579c60aeac611ef9
|
[
"MIT"
] | null | null | null |
examples/stats.py
|
eduardomelgar/Adafruit_Python_SSD1306
|
a435263e26e4a69533347ed4579c60aeac611ef9
|
[
"MIT"
] | null | null | null |
examples/stats.py
|
eduardomelgar/Adafruit_Python_SSD1306
|
a435263e26e4a69533347ed4579c60aeac611ef9
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2017 Adafruit Industries
# Author: Tony DiCola & James DeVito
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time
import Adafruit_GPIO.SPI as SPI
import Adafruit_SSD1306
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont
import subprocess
# Raspberry Pi pin configuration:
RST = None # on the PiOLED this pin isnt used
# Note the following are only used with SPI:
DC = 23
SPI_PORT = 0
SPI_DEVICE = 0
# Beaglebone Black pin configuration:
# RST = 'P9_12'
# Note the following are only used with SPI:
# DC = 'P9_15'
# SPI_PORT = 1
# SPI_DEVICE = 0
# 128x32 display with hardware I2C:
disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST)
# 128x64 display with hardware I2C:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST)
# Note you can change the I2C address by passing an i2c_address parameter like:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, i2c_address=0x3C)
# Alternatively you can specify an explicit I2C bus number, for example
# with the 128x32 display you would use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, i2c_bus=2)
# 128x32 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# 128x64 display with hardware SPI:
# disp = Adafruit_SSD1306.SSD1306_128_64(rst=RST, dc=DC, spi=SPI.SpiDev(SPI_PORT, SPI_DEVICE, max_speed_hz=8000000))
# Alternatively you can specify a software SPI implementation by providing
# digital GPIO pin numbers for all the required display pins. For example
# on a Raspberry Pi with the 128x32 display you might use:
# disp = Adafruit_SSD1306.SSD1306_128_32(rst=RST, dc=DC, sclk=18, din=25, cs=22)
# Initialize library.
disp.begin()
# Clear display.
disp.clear()
disp.display()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
width = disp.width
height = disp.height
image = Image.new('1', (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height-padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Load default font.
# font = ImageFont.load_default()
# Alternatively load a TTF font. Make sure the .ttf font file is in the same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype('PixelOperator-Bold.ttf', 10)
while True:
# Draw a black filled box to clear the image.
draw.rectangle((0,0,width,height), outline=0, fill=0)
# Shell scripts for system monitoring from here : https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
# cmd = "hostname -I |cut -f 2 -d ' '"
cmd = "hostname -I | cut -d\' \' -f1"
IP = subprocess.check_output(cmd, shell = True )
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell = True )
cmd = "free -m | awk 'NR==2{printf \"Mem: %s of %sMB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell = True )
cmd = "df -h | awk '$NF==\"/\"{printf \"Disk: %d of %d GB %s\", $3,$2,$5}'"
Disk = subprocess.check_output(cmd, shell = True )
cmd = "vcgencmd measure_temp |cut -f 2 -d '='"
temp = subprocess.check_output(cmd, shell = True )
# Write two lines of text.
draw.text((x, top), "IP: " + str(IP,'utf-8'), font=font, fill=255)
draw.text((x, top+8), str(CPU,'utf-8') + " " + str(temp,'utf-8') , font=font, fill=255)
draw.text((x, top+16), str(MemUsage,'utf-8'), font=font, fill=255)
draw.text((x, top+25), str(Disk,'utf-8'), font=font, fill=255)
# Display image.
disp.image(image)
disp.display()
time.sleep(.1)
| 37.373134
| 157
| 0.717851
|
acfdc89cc3773cd0b0f371ae4927c4f88b54ba01
| 1,979
|
py
|
Python
|
source/tools/coco_tools.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | 11
|
2020-10-27T00:05:55.000Z
|
2021-08-25T08:42:34.000Z
|
source/tools/coco_tools.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | 1
|
2021-06-02T01:59:03.000Z
|
2021-06-02T01:59:03.000Z
|
source/tools/coco_tools.py
|
allenai/learning_from_interaction
|
a266bc16d682832aa854348fa557a30d86b84674
|
[
"Apache-2.0"
] | null | null | null |
import os
import json
from datetime import datetime
from pycocotools.mask import area, toBbox
from tools.logger import LOGGER
def save_coco_dataset(dataset_file, output_folder, classes=("light", "medium", "heavy"), force=False):
def get_dicts(jsonfile):
with open(jsonfile, "r") as f:
res = json.load(f)
return res
def data_to_coco(data, classes):
res = dict(
info=dict(
date_created=datetime.now().strftime("%Y%m%d%H%M%S"),
description="Automatically generated COCO json file",
),
categories=[dict(id=it, name=cl) for it, cl in enumerate(classes)],
images=[],
annotations=[],
)
for ep in data:
res["images"].append(dict(
id=ep["image_id"],
width=ep["width"],
height=ep["height"],
file_name=""
))
for ann in ep["annotations"]:
seg = ann["segmentation"]
res["annotations"].append(dict(
id=len(res["annotations"]) + 1,
image_id=ep["image_id"],
bbox=list(toBbox(seg)),
area=float(area(seg)),
iscrowd=0,
category_id=ann["category_id"],
segmentation=seg,
))
return res
dataset_base = os.path.basename(dataset_file)
json_file_name = os.path.join(output_folder, dataset_base.replace(".json", "__coco_format.json"))
if os.path.exists(json_file_name) and not force:
LOGGER.info("skipping conversion; {} already exists".format(json_file_name))
return json_file_name
json_dict = data_to_coco(get_dicts(dataset_file), classes)
with open(json_file_name, "w") as f:
json.dump(json_dict, f)
LOGGER.info("COCO gt annotations saved to {}".format(json_file_name))
return json_file_name
| 32.442623
| 102
| 0.557858
|
acfdc92023a5be10d1a32ab474b946b9900dd605
| 5,288
|
py
|
Python
|
WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/dash_html_components/Font.py
|
tnreddy09/WatchDogs_StockMarketAnalysis
|
0c72430da633785fcb14e40d8b007c86081d515d
|
[
"Apache-2.0"
] | 4
|
2020-02-05T11:26:47.000Z
|
2021-05-26T07:48:46.000Z
|
WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/dash_html_components/Font.py
|
prashanth-thipparthi/WatchDogs_StockMarketAnalysis
|
0c72430da633785fcb14e40d8b007c86081d515d
|
[
"Apache-2.0"
] | null | null | null |
WatchDogs_Visualisation/oldApps/tweet-map/venv2/lib/python3.7/site-packages/dash_html_components/Font.py
|
prashanth-thipparthi/WatchDogs_StockMarketAnalysis
|
0c72430da633785fcb14e40d8b007c86081d515d
|
[
"Apache-2.0"
] | null | null | null |
# AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class Font(Component):
"""A Font component.
Keyword arguments:
- children (a list of or a singular dash component, string or number; optional): The children of this component
- id (string; optional): The ID of this component, used to identify dash components
in callbacks. The ID needs to be unique across all of the
components in an app.
- n_clicks (optional): An integer that represents the number of times
that this element has been clicked on.
- n_clicks_timestamp (optional): An integer that represents the time (in ms since 1970)
at which n_clicks changed. This can be used to tell
which button was changed most recently.
- key (string; optional): A unique identifier for the component, used to improve
performance by React.js while rendering components
See https://reactjs.org/docs/lists-and-keys.html for more info
- role (string; optional): The ARIA role attribute
- data-* (string; optional): A wildcard data attribute
- aria-* (string; optional): A wildcard aria attribute
- accessKey (string; optional): Defines a keyboard shortcut to activate or add focus to the element.
- className (string; optional): Often used with CSS to style elements with common properties.
- contentEditable (string; optional): Indicates whether the element's content is editable.
- contextMenu (string; optional): Defines the ID of a <menu> element which will serve as the element's context menu.
- dir (string; optional): Defines the text direction. Allowed values are ltr (Left-To-Right) or rtl (Right-To-Left)
- draggable (string; optional): Defines whether the element can be dragged.
- hidden (string; optional): Prevents rendering of given element, while keeping child elements, e.g. script elements, active.
- lang (string; optional): Defines the language used in the element.
- spellCheck (string; optional): Indicates whether spell checking is allowed for the element.
- style (dict; optional): Defines CSS styles which will override styles previously set.
- tabIndex (string; optional): Overrides the browser's default tab order and follows the one specified instead.
- title (string; optional): Text to be displayed in a tooltip when hovering over the element.
Available events: 'click'"""
@_explicitize_args
def __init__(self, children=None, id=Component.UNDEFINED, n_clicks=Component.UNDEFINED, n_clicks_timestamp=Component.UNDEFINED, key=Component.UNDEFINED, role=Component.UNDEFINED, accessKey=Component.UNDEFINED, className=Component.UNDEFINED, contentEditable=Component.UNDEFINED, contextMenu=Component.UNDEFINED, dir=Component.UNDEFINED, draggable=Component.UNDEFINED, hidden=Component.UNDEFINED, lang=Component.UNDEFINED, spellCheck=Component.UNDEFINED, style=Component.UNDEFINED, tabIndex=Component.UNDEFINED, title=Component.UNDEFINED, **kwargs):
self._prop_names = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title']
self._type = 'Font'
self._namespace = 'dash_html_components'
self._valid_wildcard_attributes = ['data-', 'aria-']
self.available_events = ['click']
self.available_properties = ['children', 'id', 'n_clicks', 'n_clicks_timestamp', 'key', 'role', 'data-*', 'aria-*', 'accessKey', 'className', 'contentEditable', 'contextMenu', 'dir', 'draggable', 'hidden', 'lang', 'spellCheck', 'style', 'tabIndex', 'title']
self.available_wildcard_properties = ['data-', 'aria-']
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(Font, self).__init__(children=children, **args)
def __repr__(self):
if(any(getattr(self, c, None) is not None
for c in self._prop_names
if c is not self._prop_names[0])
or any(getattr(self, c, None) is not None
for c in self.__dict__.keys()
if any(c.startswith(wc_attr)
for wc_attr in self._valid_wildcard_attributes))):
props_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self._prop_names
if getattr(self, c, None) is not None])
wilds_string = ', '.join([c+'='+repr(getattr(self, c, None))
for c in self.__dict__.keys()
if any([c.startswith(wc_attr)
for wc_attr in
self._valid_wildcard_attributes])])
return ('Font(' + props_string +
(', ' + wilds_string if wilds_string != '' else '') + ')')
else:
return (
'Font(' +
repr(getattr(self, self._prop_names[0], None)) + ')')
| 63.710843
| 551
| 0.662821
|
acfdca5bf9bbc5cf022a98dd19c8aefa7bcb3551
| 9,982
|
py
|
Python
|
scripts/convert_bio_model_to_long.py
|
wonjininfo/lf_tmp
|
438e987bbaae20456cfef46969ed97526c5d5369
|
[
"Apache-2.0"
] | null | null | null |
scripts/convert_bio_model_to_long.py
|
wonjininfo/lf_tmp
|
438e987bbaae20456cfef46969ed97526c5d5369
|
[
"Apache-2.0"
] | null | null | null |
scripts/convert_bio_model_to_long.py
|
wonjininfo/lf_tmp
|
438e987bbaae20456cfef46969ed97526c5d5369
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import math
from dataclasses import dataclass, field
# from transformers import RobertaForMaskedLM, RobertaTokenizerFast
from transformers import BertForMaskedLM, BertTokenizerFast # BertTokenizerFast
from transformers import AutoTokenizer, AutoModel
from transformers import RobertaForMaskedLM, RobertaTokenizerFast
from transformers import TextDataset, DataCollatorForLanguageModeling, Trainer
from transformers import TrainingArguments, HfArgumentParser
from transformers.modeling_longformer import LongformerSelfAttention
import torch
import pdb
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class BertLongSelfAttention(LongformerSelfAttention):
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=False,
):
return super().forward(hidden_states, attention_mask=attention_mask, output_attentions=output_attentions)
class BertLongForMaskedLM(BertForMaskedLM):
def __init__(self, config):
super().__init__(config)
for i, layer in enumerate(self.bert.encoder.layer):
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
layer.attention.self = BertLongSelfAttention(config, layer_id=i)
def create_long_model(save_model_to, attention_window, max_pos):
model = BertForMaskedLM.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract")
config = model.config
tokenizer = BertTokenizerFast.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract", model_max_length=max_pos)
#tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base', model_max_length=max_pos)
#pdb.set_trace()
# extend position embeddings
tokenizer.model_max_length = max_pos
tokenizer.init_kwargs['model_max_length'] = max_pos
current_max_pos, embed_size = model.bert.embeddings.position_embeddings.weight.shape
#max_pos += 2 # NOTE: RoBERTa has positions 0,1 reserved, so embedding size is max position + 2
config.max_position_embeddings = max_pos
assert max_pos > current_max_pos
# allocate a larger position embedding matrix
new_pos_embed = model.bert.embeddings.position_embeddings.weight.new_empty(max_pos, embed_size)
model.bert.embeddings.register_buffer("position_ids",torch.arange(config.max_position_embeddings).expand((1, -1)),)
# copy position embeddings over and over to initialize the new position embeddings
k = 0
step = current_max_pos
while k < max_pos - 1:
new_pos_embed[k:(k + step)] = model.bert.embeddings.position_embeddings.weight
k += step
model.bert.embeddings.position_embeddings.weight.data = new_pos_embed
# replace the `modeling_bert.BertSelfAttention` object with `LongformerSelfAttention`
config.attention_window = [attention_window] * config.num_hidden_layers
for i, layer in enumerate(model.bert.encoder.layer):
longformer_self_attn = LongformerSelfAttention(config, layer_id=i)
longformer_self_attn.query = layer.attention.self.query
longformer_self_attn.key = layer.attention.self.key
longformer_self_attn.value = layer.attention.self.value
longformer_self_attn.query_global = layer.attention.self.query
longformer_self_attn.key_global = layer.attention.self.key
longformer_self_attn.value_global = layer.attention.self.value
layer.attention.self = longformer_self_attn
logger.info(f'saving model to {save_model_to}')
model.save_pretrained(save_model_to)
tokenizer.save_pretrained(save_model_to)
#pdb.set_trace()
return model, tokenizer
def copy_proj_layers(model):
for i, layer in enumerate(model.bert.encoder.layer):
layer.attention.self.query_global = layer.attention.self.query
layer.attention.self.key_global = layer.attention.self.key
layer.attention.self.value_global = layer.attention.self.value
return model
def pretrain_and_evaluate(args, model, tokenizer, eval_only, model_path):
if tokenizer.model_max_length > 1e8:
val_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.val_datapath,
block_size=512)
logger.info(f'[WARNING] tokenizer.model_max_length > 10^8: {tokenizer.model_max_length} setting the value as 512 instead.')
else:
val_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.val_datapath,
block_size=tokenizer.model_max_length) # The `max_len` attribute has been deprecated
if eval_only:
train_dataset = val_dataset
else:
logger.info(f'Loading and tokenizing training data is usually slow: {args.train_datapath}')
if tokenizer.model_max_length > 1e8:
train_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.train_datapath,
block_size=512)
logger.info(f'[WARNING] tokenizer.model_max_length > 10^8: {tokenizer.model_max_length} setting the value as 512 instead.')
else:
train_dataset = TextDataset(tokenizer=tokenizer,
file_path=args.train_datapath,
block_size=tokenizer.model_max_length)
data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=True, mlm_probability=0.15)
trainer = Trainer(model=model, args=args, data_collator=data_collator,
train_dataset=train_dataset, eval_dataset=val_dataset, prediction_loss_only=True,)
eval_loss = trainer.evaluate()
#pdb.set_trace()
eval_loss = eval_loss['eval_loss']
logger.info(f'Initial eval bpc: {eval_loss/math.log(2)}')
if not eval_only:
trainer.train(model_path=model_path)
trainer.save_model()
eval_loss = trainer.evaluate()
eval_loss = eval_loss['eval_loss']
logger.info(f'Eval bpc after pretraining: {eval_loss/math.log(2)}')
@dataclass
class ModelArgs:
attention_window: int = field(default=512, metadata={"help": "Size of attention window"})
max_pos: int = field(default=4096, metadata={"help": "Maximum position"})
parser = HfArgumentParser((TrainingArguments, ModelArgs,))
training_args, model_args = parser.parse_args_into_dataclasses(look_for_args_file=False, args=[
'--output_dir', 'tmp',
'--warmup_steps', '500',
'--learning_rate', '0.00003',
'--weight_decay', '0.01',
'--adam_epsilon', '1e-6',
'--max_steps', '3000',
'--logging_steps', '500',
'--save_steps', '500',
'--max_grad_norm', '5.0',
'--per_gpu_eval_batch_size', '8',
'--per_gpu_train_batch_size', '2', # 32GB gpu with fp32
'--gradient_accumulation_steps', '32',
'--evaluate_during_training',
'--do_train',
'--do_eval',
])
training_args.val_datapath = '/hdd2/wonjinlf/github/longformer/wikitext-103-raw/wiki.valid.raw'
training_args.train_datapath = '/hdd2/wonjinlf/github/longformer/wikitext-103-raw/wiki.train.raw'
#training_args.val_datapath = 'wikitext-103-raw/wiki.valid.raw'
#training_args.train_datapath = 'wikitext-103-raw/wiki.train.raw'
# Choose GPU
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
## Put it all together
# 1) Evaluating PubMedBERT on MLM to establish a baseline. Validation bpc = 2.536 which is higher than the bpc values in table 6 here because wikitext103 is harder than our pretraining corpus.
bert_base = BertForMaskedLM.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract")
# roberta_base_tokenizer = RobertaTokenizerFast.from_pretrained('PubMedBERT')
tokenizer = BertTokenizerFast.from_pretrained("microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract")
logger.info('Evaluating PubMedBERT (seqlen: 512) for refernece ...')
pretrain_and_evaluate(training_args, bert_base, tokenizer, eval_only=True, model_path=None)
# 2) As descriped in create_long_model, convert a PubMedBERT model into PubMedBERT-4096 which is an instance of RobertaLong, then save it to the disk.
model_path = f'{training_args.output_dir}/PubMedBERT-{model_args.max_pos}'
if not os.path.exists(model_path):
os.makedirs(model_path)
logger.info(f'Converting PubMedBERT into PubMedBERT-{model_args.max_pos}')
model, tokenizer = create_long_model(
save_model_to=model_path, attention_window=model_args.attention_window, max_pos=model_args.max_pos)
# 3) Load PubMedBERT-4096 from the disk. This model works for long sequences even without pretraining. If you don't want to pretrain, you can stop here and start finetuning your PubMedBERT\-4096 on downstream tasks
logger.info(f'Loading the model from {model_path}')
tokenizer = BertTokenizerFast.from_pretrained(model_path)
model = BertLongForMaskedLM.from_pretrained(model_path)
# 4) Pretrain PubMedBERT\-4096 for 3k steps, each steps has 2^18 tokens. Notes:
logger.info(f'Pretraining PubMedBERT-{model_args.max_pos} ... ')
training_args.max_steps = 3 ## <<<<<<<<<<<<<<<<<<<<<<<< REMOVE THIS <<<<<<<<<<<<<<<<<<<<<<<<
training_args.per_gpu_train_batch_size = 1
pretrain_and_evaluate(training_args, model, tokenizer, eval_only=False, model_path=training_args.output_dir)
# 5) Copy global projection layers. MLM pretraining doesn't train global projections, so we need to call copy_proj_layers to copy the local projection layers to the global ones.
logger.info("5) Copy global projection layers. MLM pretraining doesn't train global projections, so we need to call copy_proj_layers to copy the local projection layers to the global ones.")
logger.info(f'Copying local projection layers into global projection layers ... ')
model = copy_proj_layers(model)
logger.info(f'Saving model to {model_path}')
model.save_pretrained(model_path)
logger.info(f'DONE!!!!')
| 45.788991
| 215
| 0.734322
|
acfdcaf8422d4a199344bb8b8261b06d8962826f
| 184
|
py
|
Python
|
adaptmesh/mark.py
|
arturs-berzins/adaptmesh
|
8ce257d85b5943d2bca578ca67490e6b85ea8bec
|
[
"MIT"
] | 11
|
2020-09-01T23:14:52.000Z
|
2022-03-01T00:35:14.000Z
|
adaptmesh/mark.py
|
arturs-berzins/adaptmesh
|
8ce257d85b5943d2bca578ca67490e6b85ea8bec
|
[
"MIT"
] | 6
|
2021-01-16T20:21:51.000Z
|
2022-02-04T14:29:20.000Z
|
adaptmesh/mark.py
|
arturs-berzins/adaptmesh
|
8ce257d85b5943d2bca578ca67490e6b85ea8bec
|
[
"MIT"
] | 2
|
2021-01-20T03:16:13.000Z
|
2022-02-04T09:30:01.000Z
|
"""Marking the elements to refine."""
from skfem import adaptive_theta as atheta
def adaptive_theta(m, estimators, theta=0.5, **params):
return atheta(estimators, theta=theta)
| 20.444444
| 55
| 0.73913
|
acfdcb46126a7dc8447f4eda5c872c7dbfe59bf7
| 11,275
|
py
|
Python
|
bridges/bridges.py
|
acbart/bridges-python
|
5a18d2eb68df7cdff996120c0461b238ca599481
|
[
"MIT"
] | null | null | null |
bridges/bridges.py
|
acbart/bridges-python
|
5a18d2eb68df7cdff996120c0461b238ca599481
|
[
"MIT"
] | null | null | null |
bridges/bridges.py
|
acbart/bridges-python
|
5a18d2eb68df7cdff996120c0461b238ca599481
|
[
"MIT"
] | null | null | null |
from bridges.connector import *
from bridges import ColorGrid
import os
import json
##
# @brief The bridges class is the main class that provides interfaces to datasets,
# maintains user and assignment information, and connects to the bridges server.
#
# The bridges class is responsible for initializing the bridges system, specifying
# parameters (user id, assignment id, title, description, data structure
# type, etc) for the student assignment, generating the data structure representation
# and transmission to the bridges server. In addition, it provides interfaces to
# a number of real-world datasets, that makes it easy to access the data for use
# algorithms/data structure assignments. <br>
#
# <b>Datasets.</b> The datasets that are currently supported through the BRIDGES API
# include USGS Earthquake Data, IMDB Actor/Movie Data (2 versions), Gutenberg Book
# Collection Meta Data, a Video Game Dataset and Shakespeare Dataset. More information
# is found in the respective methods (below) and at <p>
# http://bridgesuncc.github.io/datasets.html <p>
#
# A typical bridges program includes creating the bridges object, followed by creation
# of the data structure by the user, assigning visual attributes to elements of the
# data structure, followed by specification of teh data structure type and the
# call to visualize the data structure (bridges::setDataStructure() and visualize()
# methods).
#
# @author Sean Gallagher, Kalpathi Subramanaian, Mihai Mehedint, David Burlinson, Matthew Mcquaigue
#
#
class Bridges:
_MaxTitleSize = 50
_MaxDescSize = 250
_projection_options = {"cartesian", "albersusa", "equirectangular", "window"}
@property
def window(self) -> [float]:
"""
his function enables specifying the window that will rendered by default in the view.
This only works for graph data types. And the coordinate system need to be set to "window"
using set_coord_system_type(), setting this value will set "window" for you.
:return: list of 4 floats [x1, x2, y1, y2]
"""
return self._window
@window.setter
def window(self, value: [float]) -> None:
try:
new_window = [float(x) for x in value]
except ValueError:
raise ValueError("Value for window should be a list of 4 numbers")
except TypeError:
raise TypeError("Value for window should be a list of 4 numbers")
self.set_coord_system_type("window")
self._window = new_window
def __init__(self, assignment, username, appl_id):
"""
Bridges constructor
Args:
(int) assignment: the number your bridges assignment will have
(str) username: your bridges username
(str) appl_id: your appl authentication key from bridges acc
Returns:
None
"""
self._assignment_part = 0
self._assignment = 0
self._username = str()
self._key = str()
self._title = str()
self._description = str()
self.set_assignment(assignment)
self.set_username(username)
self.set_key(appl_id)
self.connector = Connector(self.get_key(), self.get_username(), self.get_assignment())
self._coord_system_type = "cartesian"
self._json_flag = False
self._map_overlay = False
self._window = [0.0, 0.0, 0.0, 0.0]
self.ds_handle = None
self.vis_type = ""
def set_data_structure(self, ds):
"""
This method sets the handle to the current data structure; this can
be an array, the head of a linked list, root of a tree structure, a graph
Arrays of upto 3 dimensions are suppported. It can be any of the data
structures supported by BRIDGES. Polymorphism and type casting is used
to determine the actual data structure and extract its representtion.
Args:
ds: the data structure to visualize
Returns:
None
Raises:
ValueError: if it is not a BRIDGES data structure
"""
try:
self.ds_handle = ds
self.vis_type = ds.get_data_structure_type()
except ValueError:
print("Exception Thrown: Data structure passed to BRIDGES is null!\n")
def set_visualize_JSON(self, flag):
self._json_flag = flag
def visualize(self) -> None:
"""
Method for generating the representation of the data structure in the form of JSON
and sends the information to the bridges server for generating the visualization
Returns:
None
"""
nodes_links_str = ""
if self.vis_type == "Tree" or self.vis_type == "BinaryTree" or self.vis_type == "AVLTree" or\
self.vis_type == "SinglyLinkedList" or self.vis_type == "DoublyLinkedList" or \
self.vis_type == "MultiList" or self.vis_type == "CircularSinglyLinkedList" or \
self.vis_type == "CircularDoublyLinkedList" or self.vis_type == "Array" or \
self.vis_type == "GraphAdjacencyList" or self.vis_type == "ColorGrid" or self.vis_type == "GraphAdjacencyMatrix" or \
self.vis_type == "largegraph" or self.vis_type == "KdTree" or self.vis_type == "SymbolCollection" or \
self.vis_type == "GameGrid" or self.vis_type == "BinarySearchTree" or self.vis_type == "LineChart" or \
self.vis_type == "Audio":
nodes_links_str = self.ds_handle.get_data_structure_representation()
ds = {
"visual": self.vis_type,
"title": self._title,
"description": self._description,
"coord_system_type": self._coord_system_type,
"map_overlay": self._map_overlay,
}
if self.window is not None and len(self.window) == 4:
ds['window'] = self.window
ds.update(nodes_links_str)
ds_json = json.dumps(ds)
if self._json_flag:
print(ds_json)
response = self.connector.post("/assignments/" + self.get_assignment(), ds_json)
if response == 200:
print("\nCheck Your Visualization at the following link:\n\n" +
self.connector.get_server_url() + "/assignments/" + str(self._assignment) +
"/" + self._username + "\n\n")
self._assignment_part = self._assignment_part + 1
def set_assignment(self, assignment):
"""
Setter for assignment id (must be positive)
Args:
assignment: assignment number to be set
Returns:
None
"""
force = os.getenv("FORCE_BRIDGES_ASSIGNMENT", "")
if (force != ""):
assignment = int(force)
if assignment < 0:
ValueError("Assignment value must be >= 0")
elif self._assignment >= 0:
self._assignment_part = 0
self._assignment = assignment
def get_assignment(self) -> str:
"""
Getter for the assignment id
Returns:
str: representing the full assignment id including subassignment aspect
"""
if self._assignment_part < 10:
return str(self._assignment) + ".0" + str(self._assignment_part)
else:
return str(self._assignment) + "." + str(self._assignment_part)
def set_title(self, title) -> None:
"""
Setter for the title of the bridges visualization
Args:
(str) title: representing the title
Returns:
None
"""
if len(title) > self._MaxTitleSize:
print("Visualization Title restricted to" + str(self._MaxTitleSize) + " characters." + " truncated title...")
self._title = title[:self._MaxTitleSize]
else:
self._title = title
def set_description(self, description) -> None:
"""
Setter for the description of the bridges visualization
Args:
(str) description: representing the assignment description
Returns:
None
"""
if len(description) > self._MaxDescSize:
print("Visualization Description restricted to " + str(self._MaxDescSize) + " Truncating description..")
self._description = description[0:self._MaxDescSize]
else:
self._description = description
def set_map_overlay(self, flag):
"""
Setter for if the visualization will have a map overlay
Args:
(bool) flag: boolean for if map overlay
Returns:
None
"""
self._map_overlay = flag
def set_coord_system_type(self, coord):
"""
Setter for the coordinate system type to use in the visualization
Args:
coord: coordinate system type (used in map overlays (can be
"cartesian", "albersusa", "equirectangular")
"""
if coord in self._projection_options:
self._coord_system_type = coord
else:
print("Unrecognized coordinate system \'" + coord + "\', defaulting to cartesian. Options:")
self._coord_system_type = "cartesian"
def get_color_grid_from_assignment(self, user: str, assignment: int, subassignment: int = 0) -> ColorGrid:
"""
Reconstruct a ColorGrid from an existing ColorGrid on the bridges server
Args:
user(str): the name of the user who uploaded the assignment
assignment(int): the ID of the assignment to get
subassignment(int): the ID of the subassignment to get (default 0)
Returns:
ColorGrid: the ColorGrid stored in the bridges server
"""
from bridges.data_src_dependent.data_source import get_color_grid_from_assignment
return get_color_grid_from_assignment(self.connector.server_url, user, assignment, subassignment)
def set_username(self, username):
"""
Setter for username (must be a string)
Args:
username: username to be set
Returns:
None
"""
force = os.getenv("FORCE_BRIDGES_USERNAME", "")
if (force != ""):
username = force
self._username = username.replace(" ", "+")
def get_username(self):
"""
Getter for the assignment user name (BRIDGES credentials)
Returns:
str: user name
"""
return self._username
def get_assignment_id(self):
"""
Getter for the assignment number
Returns:
int: assignment number
"""
return self._assignment
def set_key(self, apikey):
"""
Setter for API Key (BRIDGES Credentials)
Args:
apikey: api key to be set
Returns:
None
"""
force = os.getenv("FORCE_BRIDGES_APIKEY", "")
if (force != ""):
apikey = force
self._key = apikey.replace(" ", "+")
def get_key(self):
"""
Getter for the API key (BRIDGES credentials)
Returns:
str: user's API key
"""
return self._key
| 37.583333
| 133
| 0.61286
|
acfdcd7f5f1246f449b868855b01070e71e231ac
| 2,854
|
py
|
Python
|
pinnwand/http.py
|
erlliam/pinnwand
|
a1d36f3a4aec85311d75e4648ba1dee23ce89f62
|
[
"MIT"
] | null | null | null |
pinnwand/http.py
|
erlliam/pinnwand
|
a1d36f3a4aec85311d75e4648ba1dee23ce89f62
|
[
"MIT"
] | null | null | null |
pinnwand/http.py
|
erlliam/pinnwand
|
a1d36f3a4aec85311d75e4648ba1dee23ce89f62
|
[
"MIT"
] | null | null | null |
import logging
import secrets
import zipfile
from typing import Any, List
import tornado.web
from pinnwand import path, configuration, handler
log = logging.getLogger(__name__)
def make_application() -> tornado.web.Application:
pages: List[Any] = [
(r"/", handler.website.Create),
(r"/\+(.*)", handler.website.Create),
(r"/create", handler.website.CreateAction),
(r"/show/([A-Z2-7]+)(?:#.+)?", handler.website.RedirectShow),
(r"/repaste/([A-Z2-7]+)(?:#.+)?", handler.website.Repaste),
(r"/raw/([A-Z2-7]+)(?:#.+)?", handler.website.FileRaw),
(r"/([A-Z2-7]+)(?:#.+)?/raw", handler.website.FileRaw),
(r"/hex/([A-Z2-7]+)(?:#.+)?", handler.website.FileHex),
(r"/([A-Z2-7]+)(?:#.+)?/hex", handler.website.FileHex),
(r"/download/([A-Z2-7]+)(?:#.+)?", handler.website.FileDownload),
(r"/([A-Z2-7]+)(?:#.+)?/download", handler.website.FileDownload),
(
r"/download-archive/([A-Z2-7]+)(?:#.+)?",
handler.website.PasteDownload,
),
(
r"/([A-Z2-7]+)(?:#.+)?/download-archive",
handler.website.PasteDownload,
),
(r"/remove/([A-Z2-7]+)", handler.website.Remove),
]
pages += [
(
f"/{file}",
handler.website.RestructuredTextPage,
{"file": f"{file}.rst"},
)
for file in configuration.page_list
]
pages += [
(r"/api/v1/paste", handler.api_v1.Paste),
(r"/api/v1/lexer", handler.api_v1.Lexer),
(r"/api/v1/expiry", handler.api_v1.Expiry),
(r"/json/new", handler.api_deprecated.Create),
(r"/json/remove", handler.api_deprecated.Remove),
(r"/json/show/([A-Z2-7]+)(?:#.+)?", handler.api_deprecated.Show),
(r"/json/lexers", handler.api_deprecated.Lexer),
(r"/json/expiries", handler.api_deprecated.Expiry),
(r"/curl", handler.api_curl.Create),
]
if configuration.logo_path:
pages += [
(
r"/static/logo.png",
handler.website.Logo,
{"path": configuration.logo_path},
),
(
r"/static/favicon.png",
handler.website.Logo,
{"path": configuration.logo_path},
),
]
pages += [
(
r"/static/(.*)",
tornado.web.StaticFileHandler,
{"path": path.static},
),
(r"/(.*)(?:#.+)?", handler.website.Show),
]
app = tornado.web.Application(
pages,
template_path=path.template,
default_handler_class=handler.website.Base,
xsrf_cookies=True,
cookie_secret=secrets.token_hex(),
static_path=path.static,
)
app.configuration = configuration # type: ignore
return app
| 30.361702
| 73
| 0.519622
|
acfdcf48a194a30cf93b3b451a5d5258ed0f42e0
| 7,188
|
py
|
Python
|
software/mesa/src/mesa/drivers/dri/common/xmlpool/gen_xmlpool.py
|
dhanna11/OpenGPU
|
ab2f01253bba311e082dfae695b9e70138de75d4
|
[
"Apache-2.0"
] | 7
|
2019-09-04T03:44:26.000Z
|
2022-01-06T02:54:24.000Z
|
software/mesa/src/mesa/drivers/dri/common/xmlpool/gen_xmlpool.py
|
dhanna11/OpenGPU
|
ab2f01253bba311e082dfae695b9e70138de75d4
|
[
"Apache-2.0"
] | null | null | null |
software/mesa/src/mesa/drivers/dri/common/xmlpool/gen_xmlpool.py
|
dhanna11/OpenGPU
|
ab2f01253bba311e082dfae695b9e70138de75d4
|
[
"Apache-2.0"
] | 3
|
2021-06-11T23:53:38.000Z
|
2021-08-31T03:18:34.000Z
|
#!/usr/bin/python
#
# Usage:
# gen_xmlpool.py /path/to/t_option.h localedir lang lang lang ...
#
# For each given language, this script expects to find a .mo file at
# `{localedir}/{language}/LC_MESSAGES/options.mo`.
#
import sys
import gettext
import re
# Path to t_options.h
template_header_path = sys.argv[1]
localedir = sys.argv[2]
# List of supported languages
languages = sys.argv[3:]
# Escape special characters in C strings
def escapeCString (s):
escapeSeqs = {'\a' : '\\a', '\b' : '\\b', '\f' : '\\f', '\n' : '\\n',
'\r' : '\\r', '\t' : '\\t', '\v' : '\\v', '\\' : '\\\\'}
# " -> '' is a hack. Quotes (") aren't possible in XML attributes.
# Better use Unicode characters for typographic quotes in option
# descriptions and translations.
i = 0
r = ''
while i < len(s):
# Special case: escape double quote with \u201c or \u201d, depending
# on whether it's an open or close quote. This is needed because plain
# double quotes are not possible in XML attributes.
if s[i] == '"':
if i == len(s)-1 or s[i+1].isspace():
# close quote
q = u'\u201c'
else:
# open quote
q = u'\u201d'
r = r + q
elif escapeSeqs.has_key(s[i]):
r = r + escapeSeqs[s[i]]
else:
r = r + s[i]
i = i + 1
return r
# Expand escape sequences in C strings (needed for gettext lookup)
def expandCString (s):
escapeSeqs = {'a' : '\a', 'b' : '\b', 'f' : '\f', 'n' : '\n',
'r' : '\r', 't' : '\t', 'v' : '\v',
'"' : '"', '\\' : '\\'}
i = 0
escape = False
hexa = False
octa = False
num = 0
digits = 0
r = ''
while i < len(s):
if not escape:
if s[i] == '\\':
escape = True
else:
r = r + s[i]
elif hexa:
if (s[i] >= '0' and s[i] <= '9') or \
(s[i] >= 'a' and s[i] <= 'f') or \
(s[i] >= 'A' and s[i] <= 'F'):
num = num * 16 + int(s[i],16)
digits = digits + 1
else:
digits = 2
if digits >= 2:
hexa = False
escape = False
r = r + chr(num)
elif octa:
if s[i] >= '0' and s[i] <= '7':
num = num * 8 + int(s[i],8)
digits = digits + 1
else:
digits = 3
if digits >= 3:
octa = False
escape = False
r = r + chr(num)
else:
if escapeSeqs.has_key(s[i]):
r = r + escapeSeqs[s[i]]
escape = False
elif s[i] >= '0' and s[i] <= '7':
octa = True
num = int(s[i],8)
if num <= 3:
digits = 1
else:
digits = 2
elif s[i] == 'x' or s[i] == 'X':
hexa = True
num = 0
digits = 0
else:
r = r + s[i]
escape = False
i = i + 1
return r
# Expand matches. The first match is always a DESC or DESC_BEGIN match.
# Subsequent matches are ENUM matches.
#
# DESC, DESC_BEGIN format: \1 \2=<lang> \3 \4=gettext(" \5=<text> \6=") \7
# ENUM format: \1 \2=gettext(" \3=<text> \4=") \5
def expandMatches (matches, translations, end=None):
assert len(matches) > 0
nTranslations = len(translations)
i = 0
# Expand the description+enums for all translations
for lang,trans in translations:
i = i + 1
# Make sure that all but the last line of a simple description
# are extended with a backslash.
suffix = ''
if len(matches) == 1 and i < len(translations) and \
not matches[0].expand (r'\7').endswith('\\'):
suffix = ' \\'
# Expand the description line. Need to use ugettext in order to allow
# non-ascii unicode chars in the original English descriptions.
text = escapeCString (trans.ugettext (unicode (expandCString (
matches[0].expand (r'\5')), "utf-8"))).encode("utf-8")
print matches[0].expand (r'\1' + lang + r'\3"' + text + r'"\7') + suffix
# Expand any subsequent enum lines
for match in matches[1:]:
text = escapeCString (trans.ugettext (unicode (expandCString (
match.expand (r'\3')), "utf-8"))).encode("utf-8")
print match.expand (r'\1"' + text + r'"\5')
# Expand description end
if end:
print end,
# Compile a list of translation classes to all supported languages.
# The first translation is always a NullTranslations.
translations = [("en", gettext.NullTranslations())]
for lang in languages:
try:
trans = gettext.translation ("options", localedir, [lang])
except IOError:
sys.stderr.write ("Warning: language '%s' not found.\n" % lang)
continue
translations.append ((lang, trans))
# Regular expressions:
reLibintl_h = re.compile (r'#\s*include\s*<libintl.h>')
reDESC = re.compile (r'(\s*DRI_CONF_DESC\s*\(\s*)([a-z]+)(\s*,\s*)(gettext\s*\(\s*")(.*)("\s*\))(\s*\)[ \t]*\\?)$')
reDESC_BEGIN = re.compile (r'(\s*DRI_CONF_DESC_BEGIN\s*\(\s*)([a-z]+)(\s*,\s*)(gettext\s*\(\s*")(.*)("\s*\))(\s*\)[ \t]*\\?)$')
reENUM = re.compile (r'(\s*DRI_CONF_ENUM\s*\([^,]+,\s*)(gettext\s*\(\s*")(.*)("\s*\))(\s*\)[ \t]*\\?)$')
reDESC_END = re.compile (r'\s*DRI_CONF_DESC_END')
# Print a header
print \
"/***********************************************************************\n" \
" *** THIS FILE IS GENERATED AUTOMATICALLY. DON'T EDIT! ***\n" \
" ***********************************************************************/"
# Process the options template and generate options.h with all
# translations.
template = file (template_header_path, "r")
descMatches = []
for line in template:
if len(descMatches) > 0:
matchENUM = reENUM .match (line)
matchDESC_END = reDESC_END.match (line)
if matchENUM:
descMatches.append (matchENUM)
elif matchDESC_END:
expandMatches (descMatches, translations, line)
descMatches = []
else:
sys.stderr.write (
"Warning: unexpected line inside description dropped:\n%s\n" \
% line)
continue
if reLibintl_h.search (line):
# Ignore (comment out) #include <libintl.h>
print "/* %s * commented out by gen_xmlpool.py */" % line
continue
matchDESC = reDESC .match (line)
matchDESC_BEGIN = reDESC_BEGIN.match (line)
if matchDESC:
assert len(descMatches) == 0
expandMatches ([matchDESC], translations)
elif matchDESC_BEGIN:
assert len(descMatches) == 0
descMatches = [matchDESC_BEGIN]
else:
print line,
if len(descMatches) > 0:
sys.stderr.write ("Warning: unterminated description at end of file.\n")
expandMatches (descMatches, translations)
| 35.063415
| 127
| 0.497496
|
acfdd17d85ef6586eb09eca4e20919c90c662639
| 4,937
|
py
|
Python
|
tests/test_tool.py
|
petli/brioche
|
b7cbdfae400facb59188a4954c8c4b1b4d14def9
|
[
"MIT"
] | null | null | null |
tests/test_tool.py
|
petli/brioche
|
b7cbdfae400facb59188a4954c8c4b1b4d14def9
|
[
"MIT"
] | null | null | null |
tests/test_tool.py
|
petli/brioche
|
b7cbdfae400facb59188a4954c8c4b1b4d14def9
|
[
"MIT"
] | null | null | null |
# Copyright 2020 Peter Liljenberg <peter.liljenberg@gmail.com>
# Open source under the MIT license (see LICENSE)
# pylint: disable=missing-function-docstring missing-module-docstring import-error
import pytest
import pandas as pd
from brioche.tool import main
SEPS = pytest.mark.parametrize('sep', [',', ';'])
@SEPS
def test_pollen_counts_for_multiple_sites(tmp_path, sep):
taxas = write_taxas(tmp_path, sep)
biomes = write_biomes(tmp_path, sep)
site1 = write_samples(tmp_path, 'site1', sep,
('depth', 'taxa1', 'taxa2', 'taxa3'),
(10, 1, 4, 45))
site2 = write_samples(tmp_path, 'site2', sep,
('depth', 'taxa1', 'taxa0'),
(20, 1, 9))
main(['--decimals=1',
'--separator', sep,
'--save-percentages',
'--save-stabilized',
'--taxas', taxas,
'--biomes', biomes,
site1, site2])
assert read_csv(tmp_path / 'site1_percentages.csv') == expected_csv(sep,
('depth', 'taxa1', 'taxa2', 'taxa3'),
(10, '2.0', '8.0', '90.0'))
assert read_csv(tmp_path / 'site1_stabilized.csv') == expected_csv(sep,
('depth', 'taxa1', 'taxa2', 'taxa3'),
(10, '1.2', '2.7', '9.5'))
assert read_csv(tmp_path / 'site1_scores.csv') == expected_csv(sep,
('depth', 'biome1', 'biome2', 'biome3'),
(10, '1.2', '2.7', '9.5'))
assert read_csv(tmp_path / 'site1_biomes.csv') == expected_csv(sep,
('depth', 'Biome'),
(10, 'biome3'))
assert read_csv(tmp_path / 'site2_percentages.csv') == expected_csv(sep,
('depth', 'taxa1', 'taxa0'),
(20, '10.0', '90.0'))
assert read_csv(tmp_path / 'site2_stabilized.csv') == expected_csv(sep,
('depth', 'taxa1', 'taxa0'),
(20, '3.1', '9.5'))
assert read_csv(tmp_path / 'site2_scores.csv') == expected_csv(sep,
('depth', 'biome1', 'biome2', 'biome3'),
(20, '12.6', '9.5', '9.5'))
assert read_csv(tmp_path / 'site2_biomes.csv') == expected_csv(sep,
('depth', 'Biome'),
(20, 'biome1'))
@SEPS
def test_pollen_percentages_with_high_default_threshold(tmp_path, sep):
taxas = write_taxas(tmp_path, sep)
biomes = write_biomes(tmp_path, sep)
site1 = write_samples(tmp_path, 'site1', sep,
('depth', 'taxa1', 'taxa2', 'taxa3'),
(10, 1, 9, 90),
(20, 50, 40, 10))
main(['--decimals=3',
'--separator', sep,
'--default-threshold=10.5',
'--save-stabilized',
'--type=percentages',
'--taxas', taxas,
'--biomes', biomes,
site1])
assert read_csv(tmp_path / 'site1_stabilized.csv') == expected_csv(sep,
('depth', 'taxa1', 'taxa2', 'taxa3'),
(10, '0.000', '0.000', '8.916'),
(20, '6.285', '5.431', '0.000'))
assert read_csv(tmp_path / 'site1_scores.csv') == expected_csv(sep,
('depth', 'biome1', 'biome2', 'biome3'),
(10, '0.000', '0.000', '8.916'),
(20, '6.285', '5.431', '0.000'))
assert read_csv(tmp_path / 'site1_biomes.csv') == expected_csv(sep,
('depth', 'Biome'),
(10, 'biome3'),
(20, 'biome1'))
@SEPS
def test_stabilzed_pollen_samples(tmp_path, sep):
taxas = write_taxas(tmp_path, sep)
biomes = write_biomes(tmp_path, sep)
site1 = write_samples(tmp_path, 'site1', sep,
('depth', 'taxa0', 'taxa2', 'taxa3'),
(10, '1.10', '3.33', '0.00'))
main(['--separator', sep,
'--type=stabilized',
'--taxas', taxas,
'--biomes', biomes,
site1])
assert read_csv(tmp_path / 'site1_scores.csv') == expected_csv(sep,
('depth', 'biome1', 'biome2', 'biome3'),
(10, '1.10', '4.43', '1.10'))
assert read_csv(tmp_path / 'site1_biomes.csv') == expected_csv(sep,
('depth', 'Biome'),
(10, 'biome2'))
# Simple taxa mapping: taxa1/2/3 maps to PFTs 1/2/3 respectively
# Also include taxa0 that maps to PFTs 1,2,3 to check that irregular CSV files work
def write_taxas(tmp_path, sep):
taxa_file = tmp_path / 'taxas.csv'
with open(taxa_file, 'wt') as f:
f.write(f'taxa0{sep}1{sep}2{sep}3\n')
for i in range(1,4):
f.write(f'taxa{i}{sep}{i}\n')
return str(taxa_file)
# Simple mapping: biome1/2/3 maps to PFTs 1/2/3 respectively
def write_biomes(tmp_path, sep):
biome_file = tmp_path / 'biomes.csv'
with open(biome_file, 'wt') as f:
for i in range(1,4):
f.write(f'biome{i}{sep}{i}\n')
return str(biome_file)
def write_samples(tmp_path, site, sep, *rows):
sample_file = tmp_path / f'{site}.csv'
with open(sample_file, 'wt') as f:
for row in rows:
f.write(sep.join(map(str, row)))
f.write('\n')
return str(sample_file)
def read_csv(path):
with open(path, 'rt') as f:
return f.read()
def expected_csv(sep, *rows):
return '\n'.join([sep.join(map(str, row)) for row in rows]) + '\n'
| 30.475309
| 83
| 0.572412
|
acfdd1fb3032f05418f05f6f4afc12d68e387730
| 460
|
py
|
Python
|
examples/basic.py
|
rtkefreure/redis-py-cluster
|
f0627c91ce23e8784dbc996078428c9bdbacb20b
|
[
"MIT"
] | 1,075
|
2015-01-01T17:46:25.000Z
|
2022-03-31T17:55:18.000Z
|
examples/basic.py
|
rtkefreure/redis-py-cluster
|
f0627c91ce23e8784dbc996078428c9bdbacb20b
|
[
"MIT"
] | 397
|
2015-01-04T08:39:03.000Z
|
2022-03-22T01:59:18.000Z
|
examples/basic.py
|
rtkefreure/redis-py-cluster
|
f0627c91ce23e8784dbc996078428c9bdbacb20b
|
[
"MIT"
] | 373
|
2015-01-13T08:44:40.000Z
|
2022-03-29T02:18:20.000Z
|
from rediscluster import RedisCluster
startup_nodes = [{"host": "127.0.0.1", "port": "7000"}]
# Note: decode_responses must be set to True when used with python3
rc = RedisCluster(startup_nodes=startup_nodes, decode_responses=True)
rc.set("foo", "bar")
print(rc.get("foo"))
# Alternate simple mode of pointing to one startup node
rc = RedisCluster(
host="127.0.0.1",
port=7000,
decode_responses=True,
)
rc.set("foo", "bar")
print(rc.get("foo"))
| 25.555556
| 69
| 0.704348
|
acfdd260571d554da07e8da3abf65c3ec4489991
| 585
|
py
|
Python
|
node/blockchain/tests/factories/block_message/genesis.py
|
thenewboston-developers/Node
|
e71a405f4867786a54dd17ddd97595dd3a630018
|
[
"MIT"
] | 18
|
2021-11-30T04:02:13.000Z
|
2022-03-24T12:33:57.000Z
|
node/blockchain/tests/factories/block_message/genesis.py
|
thenewboston-developers/Node
|
e71a405f4867786a54dd17ddd97595dd3a630018
|
[
"MIT"
] | 1
|
2022-02-04T17:07:38.000Z
|
2022-02-04T17:07:38.000Z
|
node/blockchain/tests/factories/block_message/genesis.py
|
thenewboston-developers/Node
|
e71a405f4867786a54dd17ddd97595dd3a630018
|
[
"MIT"
] | 5
|
2022-01-31T05:28:13.000Z
|
2022-03-08T17:25:31.000Z
|
from node.blockchain.inner_models import GenesisBlockMessage, GenesisSignedChangeRequest
def make_genesis_block_message(
genesis_signed_change_request_message, primary_validator_private_key, primary_validator_node
) -> GenesisBlockMessage:
request = GenesisSignedChangeRequest.create_from_signed_change_request_message(
message=genesis_signed_change_request_message,
signing_key=primary_validator_private_key,
)
return GenesisBlockMessage.create_from_signed_change_request(
request=request, primary_validator_node=primary_validator_node
)
| 41.785714
| 96
| 0.839316
|
acfdd284a16eaef29c2f82c3526985d6c182263d
| 15,838
|
py
|
Python
|
moldynplot/PDistFigureManager.py
|
KarlTDebiec/myplotspec_sim
|
f63ebf446ff6365857c544508931a21eb75e57e7
|
[
"BSD-3-Clause"
] | 8
|
2016-07-20T16:26:18.000Z
|
2020-05-22T21:58:27.000Z
|
moldynplot/PDistFigureManager.py
|
KarlTDebiec/myplotspec_sim
|
f63ebf446ff6365857c544508931a21eb75e57e7
|
[
"BSD-3-Clause"
] | 2
|
2016-07-23T17:17:16.000Z
|
2018-02-07T03:34:27.000Z
|
moldynplot/PDistFigureManager.py
|
KarlTDebiec/myplotspec_sim
|
f63ebf446ff6365857c544508931a21eb75e57e7
|
[
"BSD-3-Clause"
] | 4
|
2016-07-20T16:26:29.000Z
|
2022-03-27T18:28:50.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# moldynplot.PDistFigureManager.py
#
# Copyright (C) 2015-2017 Karl T Debiec
# All rights reserved.
#
# This software may be modified and distributed under the terms of the
# BSD license. See the LICENSE file for details.
"""
Generates probability distribution figures to specifications
"""
################################### MODULES ###################################
from __future__ import (absolute_import, division, print_function,
unicode_literals)
if __name__ == "__main__":
__package__ = str("moldynplot")
import moldynplot
from .myplotspec.FigureManager import FigureManager
from .myplotspec.manage_defaults_presets import manage_defaults_presets
from .myplotspec.manage_kwargs import manage_kwargs
################################### CLASSES ###################################
class PDistFigureManager(FigureManager):
"""
Manages the generation of probability distribution figures.
"""
defaults = """
draw_figure:
subplot_kw:
autoscale_on: False
multi_tick_params:
left: on
right: off
bottom: on
top: off
shared_legend: True
shared_legend_kw:
spines: False
handle_kw:
ls: none
marker: s
mec: black
legend_kw:
borderaxespad: 0
frameon: False
handletextpad: 0
loc: 9
numpoints: 1
draw_subplot:
title_kw:
verticalalignment: bottom
ylabel: "Probability Distribution"
yticklabels: []
tick_params:
direction: out
left: on
right: off
bottom: on
top: off
grid: True
grid_kw:
b: True
color: [0.7,0.7,0.7]
linestyle: '-'
linewidth: 0.5
label_kw:
zorder: 10
horizontalalignment: left
verticalalignment: top
draw_dataset:
plot_kw:
zorder: 10
fill_between_kw:
color: [0.7, 0.7, 0.7]
lw: 0
ylb: 0
yub: 1
zorder: 1
handle_kw:
ls: none
marker: s
mec: black
mean_kw:
ls: none
marker: o
mec: black
zorder: 11
"""
available_presets = """
pmf:
class: content
help: Plot potential of mean force (PMF)
draw_figure:
multi_xticklabels: [2,3,4,5,6,7,8]
multi_yticklabels: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5]
draw_subplot:
xlabel: Minimum N-O distance
xticks: [2,3,4,5,6,7,8]
ybound: [-3.2,0.8]
ylabel: "Potential of Mean Force\\n(kcal/mol)"
yticks: [-3.0,-2.5,-2.0,-1.5,-1.0,-0.5,0.0,0.5]
draw_dataset:
column: pmf
dataset_kw:
cls: moldynplot.dataset.H5Dataset
default_address: /kde/pmf
default_key: pmf
draw_zero_line: True
radgyr:
class: content
help: Radius of Gyration (Rg)
draw_figure:
multi_xticklabels: [0,5,10,15,20,25,30]
draw_subplot:
xlabel: $R_g$ (Å)
xticks: [0,5,10,15,20,25,30]
draw_dataset:
column: rg
dataset_kw:
cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset
calc_pdist: True
pdist_kw:
bandwidth: 0.1
grid: !!python/object/apply:numpy.linspace [0,30,1000]
read_csv_kw:
delim_whitespace: True
header: 0
names: [frame, rg, rgmax]
rmsd:
class: content
help: Root Mean Standard Deviation (RMSD)
draw_figure:
multi_xticklabels: [0,1,2,3,4,5]
draw_subplot:
xlabel: RMSD (Å)
xticks: [0,1,2,3,4,5]
draw_dataset:
column: rmsd
dataset_kw:
cls: moldynplot.dataset.TimeSeriesDataset.TimeSeriesDataset
calc_pdist: True
pdist_kw:
bandwidth: 0.1
grid: !!python/object/apply:numpy.linspace [0,5,1000]
read_csv_kw:
delim_whitespace: True
header: 0
names: [frame, rmsd]
r1:
class: content
help: Format subplot for R1 relaxation
draw_subplot:
xlabel: "$R_1$"
xticks: [0.0,0.5,1.0,1.5,2.0,2.5,3.0]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth: 0.02
column: r1
r2:
class: content
help: Format subplot for R2 relaxation
draw_subplot:
xlabel: "$R_2$"
xticks: [0,2,4,6,8,10,12,14,16,18,20]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth: 0.3
column: r2
r2/r1:
class: content
help: Format subplot for R2/R1 relaxation
draw_subplot:
xlabel: "$R_2$/$R_1$"
xticks: [3,4,5,6,7,8,9,10,11]
draw_dataset:
dataset_kw:
pdist_kw:
bandwidth:
r2/r1: 0.1
column: r2/r1
hetnoe:
class: content
help: Format subplot for Heteronuclear NOE relaxation
draw_subplot:
xlabel: "Heteronuclear NOE"
xticks: [0.0,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0]
draw_dataset:
column: noe
dataset_kw:
pdist_kw:
bandwidth: 0.03
rotdif:
class: content
help: Format subplot for rotational diffusion
draw_subplot:
xlabel: "$τ_c$ (ns)"
xticks: [5,6,7,8,9,10,11,12,13,14]
draw_dataset:
column: rotdif
dataset_kw:
pdist_kw:
bandwidth: 0.2
relaxation_3:
class: content
help: Three stacked plots including R1, R2, and HetNOE
draw_figure:
nrows: 3
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r1
ylabel: null
1:
preset: r2
ylabel: null
2:
preset: hetnoe
ylabel: null
relaxation_4:
class: content
help: Four stacked plots including R1, R2, R2/R1, and HetNOE
draw_figure:
nrows: 4
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r1
ylabel: null
1:
preset: r2
ylabel: null
2:
preset: r2/r1
ylabel: null
3:
preset: hetnoe
ylabel: null
rotdif_2:
class: content
help: Two stacked plots including R2/R1 rotdif
draw_figure:
nrows: 2
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r2/r1
ylabel: null
1:
preset: rotdif
ylabel: null
rotdif_4:
class: content
help: Two stacked plots including R2/R1 rotdif
draw_figure:
nrows: 2
ncols: 2
shared_ylabel: "Probability Distribution"
subplots:
0:
preset: r2/r1
ylabel: null
1:
preset: r2/r1
ylabel: null
2:
preset: rotdif
ylabel: null
3:
preset: rotdif
ylabel: null
manuscript:
class: target
inherits: manuscript
draw_figure:
bottom: 0.55
hspace: 0.10
left: 0.30
right: 0.10
sub_height: 1.00
sub_width: 2.65
top: 0.10
wspace: 0.10
shared_legend_kw:
left: 0.30
sub_width: 2.65
bottom: 0.00
sub_height: 0.20
handle_kw:
mew: 0.5
ms: 5
legend_kw:
labelspacing: 0.5
ncol: 6
shared_xlabel_kw:
bottom: -0.24
title_kw:
top: -0.1
draw_subplot:
xlabel_kw:
labelpad: 3
ylabel_kw:
labelpad: 6
y2ticks: []
y2label_kw:
rotation: 270
verticalalignment: bottom
grid_kw:
linewidth: 0.5
draw_label: True
label_kw:
border_lw: 1
xabs: 0.020
yabs: -0.025
draw_dataset:
mean_kw:
mew: 0.5
ms: 2
handle_kw:
mew: 0.5
ms: 5
presentation_wide:
class: target
inherits: presentation_wide
draw_figure:
bottom: 1.80
hspace: 0.20
left: 0.80
right: 0.80
sub_height: 2.00
sub_width: 4.00
top: 0.60
wspace: 0.20
shared_legend_kw:
left: 0.80
sub_width: 16.60
bottom: 0.00
sub_height: 0.60
handle_kw:
mew: 2.0
ms: 20
legend_kw:
labelspacing: 0.5
ncol: 6
shared_ylabel_kw:
left: -0.5
shared_xlabel_kw:
bottom: -0.9
draw_subplot:
y2ticks: []
y2label_kw:
labelpad: 10
rotation: 270
verticalalignment: bottom
draw_dataset:
mean_kw:
mew: 2.0
ms: 8
handle_kw:
mew: 2.0
ms: 20
"""
@manage_defaults_presets()
@manage_kwargs()
def draw_dataset(self, subplot, column=None,
draw_pdist=True, draw_fill_between=False, draw_mean=False,
draw_plot=False, draw_zero_line=False, **kwargs):
"""
Loads a dataset and draws it on a subplot.
Loaded dataset should have attribute `pdist_df`.
Arguments:
subplot (Axes): :class:`Axes<matplotlib.axes.Axes>` on
which to draw
dataset_kw (dict): Keyword arguments passed to
:meth:`load_dataset
<myplotspec.FigureManager.FigureManager.load_dataset>`
plot_kw (dict): Keyword arguments passed to methods of
:class:`Axes<matplotlib.axes.Axes>`
column (str): Column within `pdist_df` to use
draw_fill_between (bool): Fill between specified region
fill_between_kw (dict): Keyword arguments used to configure
call to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[x] (list, ndarray): x values passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[ylb] (list, ndarray): y lower bound values
passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
fill_between_kw[yub] (list, ndarray): y upper bound values
passed to
:meth:`fill_between<matplotlib.axes.Axes.fill_between>`
draw_pdist (bool): Draw probability distribution
pdist_kw (dict): Keyword arguments using to configure call to
:meth:`plot<matplotlib.axes.Axes.plot>`
draw_mean (bool): Draw point at mean value
mean_kw (dict): Keyword arguments used to configure call to
:meth:`plot<matplotlib.axes.Axes.plot>`
verbose (int): Level of verbose output
kwargs (dict): Additional keyword arguments
"""
from warnings import warn
import pandas as pd
import numpy as np
from .myplotspec import get_colors, multi_get_copy
# Process arguments
verbose = kwargs.get("verbose", 1)
dataset_kw = multi_get_copy("dataset_kw", kwargs, {})
if "infile" in kwargs:
dataset_kw["infile"] = kwargs["infile"]
dataset = self.load_dataset(verbose=verbose, **dataset_kw)
if dataset is not None and hasattr(dataset, "pdist_df"):
pdist_df = dataset.pdist_df
elif dataset is not None and hasattr(dataset, "datasets"):
try:
pdist_df = dataset.pdist_df = pd.DataFrame(
dataset.datasets["pmf"]["pmf"],
index=dataset.datasets["pmf"]["x"],
columns = ["pmf"])
except:
pdist_df = dataset.pdist_df = pd.DataFrame(
dataset.datasets["pmf"]["pmf"],
index=dataset.datasets["pmf"]["center"],
columns = ["pmf"])
dataset.pdist_df.index.name = "x"
else:
pdist_df = None
# Configure plot settings
plot_kw = multi_get_copy("plot_kw", kwargs, {})
get_colors(plot_kw, kwargs)
# Draw fill_between
if draw_fill_between:
fill_between_kw = multi_get_copy("fill_between_kw", kwargs, {})
get_colors(fill_between_kw, plot_kw)
if "x" in fill_between_kw:
fb_x = fill_between_kw.pop("x")
if "ylb" in fill_between_kw:
fb_ylb = fill_between_kw.pop("ylb")
if "yub" in fill_between_kw:
fb_yub = fill_between_kw.pop("yub")
subplot.fill_between(fb_x, fb_ylb, fb_yub, **fill_between_kw)
# Draw pdist
if draw_pdist:
if not hasattr(dataset, "pdist_df"):
warn("'draw_pdist' is enabled but dataset does not have the "
"necessary attribute 'pdist_df', skipping.")
else:
pdist = pdist_df[column]
pdist_kw = plot_kw.copy()
pdist_kw.update(kwargs.get("pdist_kw", {}))
pd_x = pdist.index.values
pd_y = np.squeeze(pdist.values)
subplot.plot(pd_x, pd_y, **pdist_kw)
pdist_rescale = True
if pdist_rescale:
pdist_max = pd_y.max()
y_max = subplot.get_ybound()[1]
if (pdist_max > y_max / 1.25
or not hasattr(subplot, "_mps_rescaled")):
# print("\nPIDST MAX: {0}\n".format(pdist_max))
subplot.set_ybound(0, pdist_max*1.25)
yticks = [0, pdist_max*0.25, pdist_max*0.50,
pdist_max*0.75, pdist_max, pdist_max*1.25]
subplot.set_yticks(yticks)
subplot._mps_rescaled = True
if draw_mean:
mean_kw = plot_kw.copy()
mean_kw.update(kwargs.get("mean_kw", {}))
mean = np.sum(np.array(pd_x, np.float64)
*np.array(pd_y, np.float64))
if verbose >= 1:
print("mean: {0:6.3f}".format(mean))
subplot.plot(mean, pd_y[np.abs(pd_x - mean).argmin()],
**mean_kw)
if draw_plot:
if "x" in kwargs:
x = kwargs.get("x")
subplot.plot([x, x], [0,1], **plot_kw)
if draw_zero_line:
subplot.plot([0, 10], [0,0], linewidth=0.5, color="black")
#################################### MAIN #####################################
if __name__ == "__main__":
PDistFigureManager().main()
| 31.931452
| 79
| 0.493307
|
acfdd327c3b518cc786f6d5a07faca68ed892a25
| 4,959
|
py
|
Python
|
asv_bench/benchmarks/io/json.py
|
henriqueribeiro/pandas
|
996f361f8e6986ea1c65ccb164a4c585e1f4a027
|
[
"BSD-3-Clause"
] | 2
|
2019-01-09T07:43:12.000Z
|
2020-05-30T05:49:11.000Z
|
asv_bench/benchmarks/io/json.py
|
henriqueribeiro/pandas
|
996f361f8e6986ea1c65ccb164a4c585e1f4a027
|
[
"BSD-3-Clause"
] | 3
|
2018-09-24T22:09:28.000Z
|
2018-10-01T21:10:00.000Z
|
asv_bench/benchmarks/io/json.py
|
henriqueribeiro/pandas
|
996f361f8e6986ea1c65ccb164a4c585e1f4a027
|
[
"BSD-3-Clause"
] | 2
|
2019-03-08T19:59:05.000Z
|
2020-09-27T03:18:37.000Z
|
import numpy as np
import pandas.util.testing as tm
from pandas import DataFrame, date_range, timedelta_range, concat, read_json
from ..pandas_vb_common import setup, BaseIO # noqa
class ReadJSON(BaseIO):
goal_time = 0.2
fname = "__test__.json"
params = (['split', 'index', 'records'], ['int', 'datetime'])
param_names = ['orient', 'index']
def setup(self, orient, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient=orient)
def time_read_json(self, orient, index):
read_json(self.fname, orient=orient)
class ReadJSONLines(BaseIO):
goal_time = 0.2
fname = "__test_lines__.json"
params = ['int', 'datetime']
param_names = ['index']
def setup(self, index):
N = 100000
indexes = {'int': np.arange(N),
'datetime': date_range('20000101', periods=N, freq='H')}
df = DataFrame(np.random.randn(N, 5),
columns=['float_{}'.format(i) for i in range(5)],
index=indexes[index])
df.to_json(self.fname, orient='records', lines=True)
def time_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def time_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
def peakmem_read_json_lines(self, index):
read_json(self.fname, orient='records', lines=True)
def peakmem_read_json_lines_concat(self, index):
concat(read_json(self.fname, orient='records', lines=True,
chunksize=25000))
class ToJSON(BaseIO):
goal_time = 0.2
fname = "__test__.json"
params = ['split', 'columns', 'index']
param_names = ['orient']
def setup(self, lines_orient):
N = 10**5
ncols = 5
index = date_range('20000101', periods=N, freq='H')
timedeltas = timedelta_range(start=1, periods=N, freq='s')
datetimes = date_range(start=1, periods=N, freq='s')
ints = np.random.randint(100000000, size=N)
floats = np.random.randn(N)
strings = tm.makeStringIndex(N)
self.df = DataFrame(np.random.randn(N, ncols), index=np.arange(N))
self.df_date_idx = DataFrame(np.random.randn(N, ncols), index=index)
self.df_td_int_ts = DataFrame({'td_1': timedeltas,
'td_2': timedeltas,
'int_1': ints,
'int_2': ints,
'ts_1': datetimes,
'ts_2': datetimes},
index=index)
self.df_int_floats = DataFrame({'int_1': ints,
'int_2': ints,
'int_3': ints,
'float_1': floats,
'float_2': floats,
'float_3': floats},
index=index)
self.df_int_float_str = DataFrame({'int_1': ints,
'int_2': ints,
'float_1': floats,
'float_2': floats,
'str_1': strings,
'str_2': strings},
index=index)
def time_floats_with_int_index(self, orient):
self.df.to_json(self.fname, orient=orient)
def time_floats_with_dt_index(self, orient):
self.df_date_idx.to_json(self.fname, orient=orient)
def time_delta_int_tstamp(self, orient):
self.df_td_int_ts.to_json(self.fname, orient=orient)
def time_float_int(self, orient):
self.df_int_floats.to_json(self.fname, orient=orient)
def time_float_int_str(self, orient):
self.df_int_float_str.to_json(self.fname, orient=orient)
def time_floats_with_int_idex_lines(self, orient):
self.df.to_json(self.fname, orient='records', lines=True)
def time_floats_with_dt_index_lines(self, orient):
self.df_date_idx.to_json(self.fname, orient='records', lines=True)
def time_delta_int_tstamp_lines(self, orient):
self.df_td_int_ts.to_json(self.fname, orient='records', lines=True)
def time_float_int_lines(self, orient):
self.df_int_floats.to_json(self.fname, orient='records', lines=True)
def time_float_int_str_lines(self, orient):
self.df_int_float_str.to_json(self.fname, orient='records', lines=True)
| 38.742188
| 79
| 0.548296
|
acfdd442e787313c226d40b16542b4d15fa17fc8
| 2,107
|
py
|
Python
|
zhusuan/distributions/possion.py
|
thuwzy/ZhuSuan-PyTorch
|
471e4d401a6edce07312b01b2b76fa2c56b15c0f
|
[
"MIT"
] | 12
|
2021-08-11T10:28:21.000Z
|
2022-03-12T14:20:02.000Z
|
zhusuan/distributions/possion.py
|
thuwzy/ZhuSuan-PyTorch
|
471e4d401a6edce07312b01b2b76fa2c56b15c0f
|
[
"MIT"
] | null | null | null |
zhusuan/distributions/possion.py
|
thuwzy/ZhuSuan-PyTorch
|
471e4d401a6edce07312b01b2b76fa2c56b15c0f
|
[
"MIT"
] | 2
|
2021-08-17T12:05:15.000Z
|
2022-01-12T09:47:49.000Z
|
import torch
from zhusuan.distributions import Distribution
class Possion(Distribution):
"""
The class of univariate Possion distribution
See :class:`~zhusuan.distributions.base.Distribution` for details.
:param rate: A 'float' Var. Rate parameter of the Possion distribution.
"""
def __init__(self,
dtype=torch.float32,
is_continues=True,
group_ndims=0,
device=torch.device('cpu'),
**kwargs):
super(Possion, self).__init__(dtype,
is_continues,
is_reparameterized=False, # reparameterization trick is not applied for Possion distribution
group_ndims=group_ndims,
device=device,
**kwargs)
self._rate = torch.as_tensor(kwargs['rate'], dtype = self._dtype).to(device) if type(kwargs['rate']) in [int, float] else kwargs['rate'].to(device)
@property
def rate(self):
"""Shape parameter of the Possion distribution."""
return self._rate
def _sample(self, n_samples=1):
if n_samples > 1:
_shape = self._rate.shape
_shape = torch.Size([n_samples]) + _shape
_len = len(self._rate.shape)
_rate = self._rate.repeat([n_samples, *_len * [1]])
else:
_shape = self._rate.shape
_rate = torch.as_tensor(self._rate, dtype=self._dtype)
_sample = torch.distributions.poisson.Poisson(_rate).sample()
self.sample_cache = _sample
return _sample
def _log_prob(self, sample=None):
if sample is None:
sample = self.sample_cache
if len(sample.shape) > len(self._rate.shape):
n_samples = sample.shape[0]
_len = len(self._rate.shape)
_rate = self._rate.repeat([n_samples, *_len * [1]])
else:
_rate = self._rate
return torch.distributions.poisson.Poisson(_rate).log_prob(sample)
| 38.309091
| 155
| 0.563835
|
acfdd577d2b484e79f8a017fff259f7160ae51d0
| 8,506
|
py
|
Python
|
tests/test_cache_control.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 16
|
2021-12-13T01:27:44.000Z
|
2022-02-28T02:58:46.000Z
|
tests/test_cache_control.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 23
|
2022-01-03T15:57:39.000Z
|
2022-03-28T22:25:08.000Z
|
tests/test_cache_control.py
|
obendidi/httpx-cache
|
897dd8da5bb377ed7f61b367716976bdc0d581b1
|
[
"BSD-3-Clause"
] | 2
|
2022-01-21T17:57:19.000Z
|
2022-01-21T18:18:47.000Z
|
from datetime import datetime, timedelta, timezone
from email.utils import format_datetime
import httpx
import pytest
import httpx_cache
from httpx_cache.cache_control import _PERMANENT_REDIRECT_STATUSES, CacheControl
def test_is_request_cacheable(httpx_request):
controller = httpx_cache.CacheControl()
assert controller.is_request_cacheable(httpx_request) is True
def test_is_request_cacheable_with_relative_url():
request = httpx.Request("GET", "/path")
controller = httpx_cache.CacheControl()
assert controller.is_request_cacheable(request) is False
@pytest.mark.parametrize(
"cacheable_methods,method,expected",
[
(("GET",), "POST", False),
(("GET", "POST"), "POST", True),
],
)
def test_is_request_cacheable_with_method(cacheable_methods, method, expected):
request = httpx.Request(method, "http://testurl/path")
controller = httpx_cache.CacheControl(cacheable_methods=cacheable_methods)
assert controller.is_request_cacheable(request) is expected
def test_is_request_cacheable_with_no_cache_headers():
request = httpx.Request(
"GET", "http://testurl/path", headers={"cache-control": "no-cache"}
)
controller = httpx_cache.CacheControl()
assert controller.is_request_cacheable(request) is False
def test_is_request_cacheable_with_max_age_0_headers():
request = httpx.Request(
"GET", "http://testurl/path", headers={"cache-control": "max-age=0"}
)
controller = httpx_cache.CacheControl()
assert controller.is_request_cacheable(request) is False
def test_is_response_cacheable(httpx_request, httpx_response):
controller = httpx_cache.CacheControl()
assert (
controller.is_response_cacheable(request=httpx_request, response=httpx_response)
is True
)
@pytest.mark.parametrize(
"cacheable_status_codes,code,expected",
[
((200, 203, 300, 301, 308), 200, True),
((500, 404), 400, False),
],
)
def test_is_response_cacheable_with_status_code(
cacheable_status_codes, code, expected, httpx_request
):
response = httpx.Response(code)
controller = httpx_cache.CacheControl(cacheable_status_codes=cacheable_status_codes)
assert (
controller.is_response_cacheable(request=httpx_request, response=response)
is expected
)
def test_is_response_cacheable_with_response_no_store_header(
httpx_request,
):
response = httpx.Response(200, headers={"cache-control": "no-store"})
controller = httpx_cache.CacheControl()
assert (
controller.is_response_cacheable(request=httpx_request, response=response)
is False
)
def test_is_response_cacheable_with_request_no_store_header():
request = httpx.Request(
"GET", "http://testurl", headers={"cache-control": "no-store"}
)
response = httpx.Response(200)
controller = httpx_cache.CacheControl()
assert controller.is_response_cacheable(request=request, response=response) is False
def test_is_response_fresh(httpx_request, httpx_response):
controller = httpx_cache.CacheControl()
assert (
controller.is_response_fresh(request=httpx_request, response=httpx_response)
is True
)
@pytest.mark.parametrize("code", _PERMANENT_REDIRECT_STATUSES)
def test_is_response_fresh_with_permanent_redirect(httpx_request, code):
controller = httpx_cache.CacheControl()
response = httpx.Response(code)
assert (
controller.is_response_fresh(request=httpx_request, response=response) is True
)
def test_is_response_fresh_with_expires_header_no_date():
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(200, headers={"expires": "Tue, 15 Nov 1994 12:45:26 GMT"})
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_invalid_expires_header():
date = datetime.now(tz=timezone.utc)
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"expires": "lala",
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_expires_header_fresh():
date = datetime.now(tz=timezone.utc)
expires = datetime.now(tz=timezone.utc) + timedelta(hours=1)
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"expires": format_datetime(expires, usegmt=True),
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is True
def test_is_response_fresh_with_expires_header_not_fresh():
expires = datetime.now(tz=timezone.utc) - timedelta(minutes=5)
date = expires - timedelta(minutes=5)
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"expires": format_datetime(expires, usegmt=True),
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_max_age_response_header_fresh():
expires = datetime.now(tz=timezone.utc) - timedelta(minutes=5)
date = expires - timedelta(minutes=5)
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"expires": format_datetime(expires, usegmt=True),
"cache-control": "max-age=900",
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is True
def test_is_response_fresh_with_max_age_response_header_not_fresh():
date = datetime.now(tz=timezone.utc) - timedelta(days=1)
expires = datetime.now(tz=timezone.utc) + timedelta(hours=1)
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"expires": format_datetime(expires, usegmt=True),
"cache-control": "max-age=900",
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_max_age_response_header_no_date():
request = httpx.Request("GET", "http://testurl")
response = httpx.Response(
200,
headers={"cache-control": "max-age=900"},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_max_age_request_header_fresh():
date = datetime.now(tz=timezone.utc) - timedelta(days=1)
request = httpx.Request(
"GET", "http://testurl", headers={"cache-control": "max-age=100000"}
)
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"cache-control": "max-age=900",
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is True
def test_is_response_fresh_with_max_age_request_header_not_fresh():
date = datetime.now(tz=timezone.utc) - timedelta(days=1)
request = httpx.Request(
"GET", "http://testurl", headers={"cache-control": "max-age=900"}
)
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
"cache-control": "max-age=100000",
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
def test_is_response_fresh_with_max_age_request_header_fresh_with_min_fresh_header():
date = datetime.now(tz=timezone.utc) - timedelta(minutes=51)
request = httpx.Request(
"GET", "http://testurl", headers={"cache-control": "max-age=3600,min-fresh=600"}
)
response = httpx.Response(
200,
headers={
"date": format_datetime(date, usegmt=True),
},
)
controller = CacheControl()
assert controller.is_response_fresh(request=request, response=response) is False
| 32.841699
| 88
| 0.695862
|
acfdd57f45b01b6333a56207a0eee4bee2921546
| 2,537
|
py
|
Python
|
simplejson/tests/test_dump.py
|
koodaa-team/simplejson
|
e6133d7e333d3ab24d32903b1ae38bd3b6875d55
|
[
"MIT"
] | null | null | null |
simplejson/tests/test_dump.py
|
koodaa-team/simplejson
|
e6133d7e333d3ab24d32903b1ae38bd3b6875d55
|
[
"MIT"
] | null | null | null |
simplejson/tests/test_dump.py
|
koodaa-team/simplejson
|
e6133d7e333d3ab24d32903b1ae38bd3b6875d55
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from io import StringIO
import simplejson as json
class TestDump(TestCase):
def test_dump(self):
sio = StringIO()
json.dump({}, sio)
self.assertEqual(sio.getvalue(), '{}')
def test_dumps(self):
self.assertEqual(json.dumps({}), '{}')
def test_encode_truefalse(self):
self.assertEqual(json.dumps(
{True: False, False: True}, sort_keys=True),
'{"false": true, "true": false}')
self.assertEqual(json.dumps(
{2: 3.0, 4.0: 5, False: 1, 6: True, "7": 0}, sort_keys=True),
'{"false": 1, "2": 3.0, "4.0": 5, "6": true, "7": 0}')
def test_ordered_dict(self):
# http://bugs.python.org/issue6105
items = [('one', 1), ('two', 2), ('three', 3), ('four', 4), ('five', 5)]
s = json.dumps(json.OrderedDict(items))
self.assertEqual(s, '{"one": 1, "two": 2, "three": 3, "four": 4, "five": 5}')
def test_indent_unknown_type_acceptance(self):
"""
A test against the regression mentioned at `github issue 29`_.
The indent parameter should accept any type which pretends to be
an instance of int or long when it comes to being multiplied by
strings, even if it is not actually an int or long, for
backwards compatibility.
.. _github issue 29:
http://github.com/simplejson/simplejson/issue/29
"""
class AwesomeInt(object):
"""An awesome reimplementation of integers"""
def __init__(self, *args, **kwargs):
if len(args) > 0:
# [construct from literals, objects, etc.]
# ...
# Finally, if args[0] is an integer, store it
if isinstance(args[0], int):
self._int = args[0]
# [various methods]
def __mul__(self, other):
# [various ways to multiply AwesomeInt objects]
# ... finally, if the right-hand operand is not awesome enough,
# try to do a normal integer multiplication
if hasattr(self, '_int'):
return self._int * other
else:
raise NotImplementedError("To do non-awesome things with"
" this object, please construct it from an integer!")
s = json.dumps(list(range(3)), indent=AwesomeInt(3))
self.assertEqual(s, '[\n 0,\n 1,\n 2\n]')
| 37.308824
| 85
| 0.53646
|
acfdd59a973118e4ad652ae789cd28091a097417
| 3,946
|
py
|
Python
|
Code/MACDonGDXJ.py
|
BambooFlower/MACD-Strategy
|
896226f46dc42bcb7153e34518f8e01164ec644e
|
[
"MIT"
] | 2
|
2020-01-31T09:52:34.000Z
|
2020-12-06T12:24:07.000Z
|
Code/MACDonGDXJ.py
|
BambooFlower/MACD-Strategy
|
896226f46dc42bcb7153e34518f8e01164ec644e
|
[
"MIT"
] | null | null | null |
Code/MACDonGDXJ.py
|
BambooFlower/MACD-Strategy
|
896226f46dc42bcb7153e34518f8e01164ec644e
|
[
"MIT"
] | null | null | null |
from quantopian.pipeline.data.builtin import USEquityPricing
import statsmodels.api as sm
import quantopian.pipeline.data
import numpy as np
import pandas as pd
import talib
import scipy
def initialize(context):
set_benchmark(symbol('GDXJ'))
context.GDXJ = symbol('GDXJ')
context.allocation = 1
context.TakeProfitPct = 0.25
context.StopLossPct = 0.05
context.BuyPrice = 0
context.bought = False
context.sold = False
# 30 min scheduler
for x in [0,1,2,3,4,5]:
schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_open(hours=x, minutes=29))
schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_open(hours=x, minutes=59))
schedule_function(my_rebalance, date_rules.every_day(), time_rules.market_close())
schedule_function(my_record_vars, date_rules.every_day(), time_rules.market_close())
# Set commission and slippage
#set_commission(commission.PerShare(cost=0.005, min_trade_cost=1.0))
#set_slippage(slippage.FixedSlippage(spread=0.01))
def my_rebalance(context,data):
GDXJ_prices = data.history(context.GDXJ, "price", 10000, "1m").resample('30T', closed='right', label='right') .last().dropna()
#GDXJ_prices = data.history(context.GDXJ, "price", 100, "1d")
ema12 = talib.EMA(GDXJ_prices,12)
ema26 = talib.EMA(GDXJ_prices,26)
macd = ema12 - ema26
signal = talib.EMA(macd,9)
record(SIG=macd[-1] - signal[-1])
record(MACD=macd[-1])
if macd[-2] < signal[-2] and macd[-1] >= signal[-1] and not context.bought:
set_fixed_stop_long(context, data)
order_target_percent(context.GDXJ, context.allocation)
context.bought = True
context.sold = False
if macd[-1] < signal[-1] and not context.sold:
set_fixed_stop_short(context, data)
order_target_percent(context.GDXJ, -context.allocation)
context.bought = False
context.sold = True
def my_record_vars(context, data):
leverage = context.account.leverage
#record(leverage=leverage)
def set_fixed_stop_long(context, data):
#Only call this once when the stock is bought
if data.can_trade(context.GDXJ):
price = data.current(context.GDXJ, 'price')
context.BuyPrice = price
context.SellLossPrice= price - (context.StopLossPct * price)
context.SellProfitPrice= (price * context.TakeProfitPct) + price
def set_fixed_stop_short(context, data):
#Only call this once when the stock is bought
if data.can_trade(context.GDXJ):
price = data.current(context.GDXJ, 'price')
context.BuyPrice = price
context.SellLossPrice= price + (context.StopLossPct * price)
context.SellProfitPrice= price - (price * context.TakeProfitPct)
def handle_data(context, data):
#If we have a position check sell conditions
if context.portfolio.positions[context.GDXJ].amount != 0 and context.bought:
price = data.current(context.GDXJ, 'price')
if price > context.SellProfitPrice and len(get_open_orders()) == 0:
order_target_percent(context.GDXJ, 0)
context.bought = False
if price < context.SellLossPrice and len(get_open_orders()) == 0:
order_target_percent(context.GDXJ, 0)
context.bought = False
if context.portfolio.positions[context.GDXJ].amount != 0 and context.sold:
price = data.current(context.GDXJ, 'price')
if price < context.SellProfitPrice and len(get_open_orders()) == 0:
order_target_percent(context.GDXJ, 0)
context.sold = False
if price > context.SellLossPrice and len(get_open_orders()) == 0:
order_target_percent(context.GDXJ, 0)
context.sold = False
| 39.46
| 132
| 0.655854
|
acfdd5c1fc31a8ed7d7a6b46076700e07e089487
| 2,558
|
py
|
Python
|
examples/ttgo_scroll.py
|
manahter/st7789_mpy
|
6a27c17e574fe34c450672f3181ac001f2b40ea1
|
[
"MIT"
] | 1
|
2020-12-19T12:30:11.000Z
|
2020-12-19T12:30:11.000Z
|
examples/ttgo_scroll.py
|
manahter/st7789_mpy
|
6a27c17e574fe34c450672f3181ac001f2b40ea1
|
[
"MIT"
] | null | null | null |
examples/ttgo_scroll.py
|
manahter/st7789_mpy
|
6a27c17e574fe34c450672f3181ac001f2b40ea1
|
[
"MIT"
] | 1
|
2020-12-19T12:28:30.000Z
|
2020-12-19T12:28:30.000Z
|
"""
ttgo_scroll.py
Smoothly scroll all characters of a font up the LILYGO® TTGO T-Display
screen. Fonts heights must be even multiples of the screen height
(i.e. 8 or 16 pixels high).
https://youtu.be/GQa-RzHLBak
"""
import utime
import random
from machine import Pin, SPI
import st7789
# choose a font
# import vga1_8x8 as font
# import vga2_8x8 as font
# import vga1_8x16 as font
# import vga2_8x16 as font
# import vga1_16x16 as font
# import vga1_bold_16x16 as font
# import vga2_16x16 as font
import vga2_bold_16x16 as font
def cycle(p):
try:
len(p)
except TypeError:
cache = []
for i in p:
yield i
cache.append(i)
p = cache
while p:
yield from p
def main():
tft = st7789.ST7789(
SPI(2, baudrate=30000000, polarity=1, phase=1, sck=Pin(18), mosi=Pin(19)),
135,
240,
reset=Pin(23, Pin.OUT),
cs=Pin(5, Pin.OUT),
dc=Pin(16, Pin.OUT),
backlight=Pin(4, Pin.OUT),
rotation=0)
colors = cycle([0xe000, 0xece0, 0xe7e0, 0x5e0, 0x00d3, 0x7030])
foreground = next(colors)
background = st7789.BLACK
tft.init()
tft.fill(background)
utime.sleep(1)
height = tft.height()
width = tft.width()
last_line = height - font.HEIGHT
tfa = 40 # top free area
tfb = 40 # bottom free area
tft.vscrdef(tfa, height, tfb)
scroll = 0
character = 0
while True:
# clear top line before scrolling off display
tft.fill_rect(0, scroll, width, 1, background)
# Write new line when we have scrolled the height of a character
if scroll % font.HEIGHT == 0:
line = (scroll + last_line) % height
# write character hex value as a string
tft.text(
font,
'x{:02x}'.format(character),
16,
line,
foreground,
background)
# write character using a integer (could be > 0x7f)
tft.text(
font,
character,
90,
line,
foreground,
background)
# change color for next line
foreground = next(colors)
# next character with rollover at 256
character += 1
character %= 256
# scroll the screen up 1 row
tft.vscsad(scroll+tfa)
scroll += 1
scroll %= height
utime.sleep(0.01)
main()
| 23.46789
| 82
| 0.550821
|
acfdd7e383cce189a692b4ee30fdc243aef55f50
| 1,039
|
py
|
Python
|
neural_networks/not_perceptron.py
|
parphane/udacity-self_driving_cars
|
069762a5320a109ebe4f7c23997631a2998a0076
|
[
"MIT"
] | null | null | null |
neural_networks/not_perceptron.py
|
parphane/udacity-self_driving_cars
|
069762a5320a109ebe4f7c23997631a2998a0076
|
[
"MIT"
] | null | null | null |
neural_networks/not_perceptron.py
|
parphane/udacity-self_driving_cars
|
069762a5320a109ebe4f7c23997631a2998a0076
|
[
"MIT"
] | null | null | null |
import pandas as pd
# TODO: Set weight1, weight2, and bias
weight1 = 0
weight2 = -1
bias = 0
# DON'T CHANGE ANYTHING BELOW
# Inputs and outputs
test_inputs = [(0, 0), (0, 1), (1, 0), (1, 1)]
correct_outputs = [True, False, True, False]
outputs = []
# Generate and check output
for test_input, correct_output in zip(test_inputs, correct_outputs):
linear_combination = weight1 * test_input[0] + weight2 * test_input[1] + bias
output = int(linear_combination >= 0)
is_correct_string = 'Yes' if output == correct_output else 'No'
outputs.append([test_input[0], test_input[1], linear_combination, output, is_correct_string])
# Print output
num_wrong = len([output[4] for output in outputs if output[4] == 'No'])
output_frame = pd.DataFrame(outputs, columns=['Input 1', ' Input 2', ' Linear Combination', ' Activation Output', ' Is Correct'])
if not num_wrong:
print('Nice! You got it all correct.\n')
else:
print('You got {} wrong. Keep trying!\n'.format(num_wrong))
print(output_frame.to_string(index=False))
| 35.827586
| 133
| 0.699711
|
acfdd81f33edbb425fa645fbce05b51283b0c66d
| 800
|
py
|
Python
|
day02/main.py
|
kentquirk/aoc2020
|
9223bd7ac9d1b1c26a65b809206105177e2bda22
|
[
"Unlicense"
] | null | null | null |
day02/main.py
|
kentquirk/aoc2020
|
9223bd7ac9d1b1c26a65b809206105177e2bda22
|
[
"Unlicense"
] | null | null | null |
day02/main.py
|
kentquirk/aoc2020
|
9223bd7ac9d1b1c26a65b809206105177e2bda22
|
[
"Unlicense"
] | null | null | null |
#! /usr/bin/env python3
import re
import itertools
def validateA(d):
n = d["password"].count(d["ch"])
return n >= int(d["ix1"]) and n <= int(d["ix2"])
def validateB(d):
ix1 = int(d["ix1"]) - 1
ix2 = int(d["ix2"]) - 1
at1 = ix1 < len(d["password"]) and d["password"][ix1 : ix1 + 1] == d["ch"]
at2 = ix2 < len(d["password"]) and d["password"][ix2 : ix2 + 1] == d["ch"]
return at1 != at2
if __name__ == "__main__":
f = open("./input.txt")
lines = f.readlines()
pat = re.compile(
"(?P<ix1>[0-9]+)-(?P<ix2>[0-9]+) (?P<ch>[a-z]): (?P<password>[a-z]+)"
)
data = [pat.match(l).groupdict() for l in lines]
valids = [x for x in data if validateA(x)]
print(len(valids))
valids2 = [x for x in data if validateB(x)]
print(len(valids2))
| 27.586207
| 78
| 0.53625
|
acfdd83330f479f6ac5fdde4386e74c7b3116e12
| 3,444
|
py
|
Python
|
qualifier/qualifier.py
|
Tobi-De/cj8-qualifier
|
7454ff5e8f1d1365cd232219677299935653c8c2
|
[
"MIT"
] | null | null | null |
qualifier/qualifier.py
|
Tobi-De/cj8-qualifier
|
7454ff5e8f1d1365cd232219677299935653c8c2
|
[
"MIT"
] | null | null | null |
qualifier/qualifier.py
|
Tobi-De/cj8-qualifier
|
7454ff5e8f1d1365cd232219677299935653c8c2
|
[
"MIT"
] | null | null | null |
from typing import Any, List, Optional
SPACE_AROUND = 2
def space_content(value: Any, max_space_to_fill: int, centered: bool):
if centered:
return str(value).center(max_space_to_fill + SPACE_AROUND)
else:
return f" {str(value).ljust(max_space_to_fill)}" + " "
def build_content(row: List[Any], max_sizes: List[int], centered: bool) -> str:
line = "│"
for index, value in enumerate(row):
max_space_to_fill = max_sizes[index]
line += space_content(value, max_space_to_fill, centered) + "│"
return line
def build_horizontal_border(
max_sizes: List[int], join_char: str, start_char: str, end_char: str
) -> str:
nbr_columns = len(max_sizes)
nbr_separators = nbr_columns + 1
table_length = sum(max_sizes) + (SPACE_AROUND * nbr_columns) + nbr_separators
# 2 is for the left and right edges of the table
space_to_fill = table_length - 2
border = start_char + "─" * space_to_fill + end_char
# add the middle join if nbr_columns > 1
if nbr_columns > 1:
extra_chars = SPACE_AROUND + 1
for index, ln in enumerate(max_sizes):
# to skip the last border
if index < (nbr_columns - 1):
join_pos = sum(max_sizes[: index + 1]) + extra_chars
border = border[:join_pos] + join_char + border[join_pos + 1 :]
extra_chars += SPACE_AROUND + 1
return border
def build_header(labels: List[Any], max_sizes: List[int], centered: bool) -> str:
header = (
build_horizontal_border(max_sizes, join_char="┬", start_char="┌", end_char="┐")
+ "\n"
+ build_content(row=labels, max_sizes=max_sizes, centered=centered)
)
return header
def make_table(
rows: List[List[Any]], labels: Optional[List[Any]] = None, centered: bool = False
) -> str:
"""
:param rows: 2D list containing objects that have a single-line representation (via `str`).
All rows must be of the same length.
:param labels: List containing the column labels. If present, the length must equal to that of each row.
:param centered: If the items should be aligned to the center, else they are left aligned.
:return: A table representing the rows passed in.
"""
has_header = bool(labels)
# initialize max_sizes with the length of all elements
# of the first row if the labels are present, use them instead
if has_header:
max_sizes = [len(str(el)) for el in labels]
else:
max_sizes = [len(str(el)) for el in rows[0]]
# for each item in a row, replace at its index its length value if
# it is greater than the current value in max_sizes
for row in rows:
for index, el in enumerate(row):
if max_sizes[index] < len(str(el)):
max_sizes[index] = len(str(el))
content = "\n".join([build_content(row, max_sizes, centered) for row in rows])
bottom_border = "\n" + build_horizontal_border(
max_sizes, join_char="┴", start_char="└", end_char="┘"
)
kwargs = (
{"join_char": "┼", "start_char": "├", "end_char": "┤"}
if has_header
else {"join_char": "┬", "start_char": "┌", "end_char": "┐"}
)
top_border = build_horizontal_border(max_sizes, **kwargs) + "\n"
content = top_border + content + bottom_border
if has_header:
content = build_header(labels, max_sizes, centered) + "\n" + content
return content
| 35.505155
| 108
| 0.639954
|
acfdd86070c4a71e8f5b20b172587c71f8e5ce37
| 904
|
py
|
Python
|
digsby/src/tests/testgui/uberdemos/UberProgressBarDemo.py
|
ifwe/digsby
|
f5fe00244744aa131e07f09348d10563f3d8fa99
|
[
"Python-2.0"
] | 35
|
2015-08-15T14:32:38.000Z
|
2021-12-09T16:21:26.000Z
|
digsby/src/tests/testgui/uberdemos/UberProgressBarDemo.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 4
|
2015-09-12T10:42:57.000Z
|
2017-02-27T04:05:51.000Z
|
digsby/src/tests/testgui/uberdemos/UberProgressBarDemo.py
|
niterain/digsby
|
16a62c7df1018a49eaa8151c0f8b881c7e252949
|
[
"Python-2.0"
] | 15
|
2015-07-10T23:58:07.000Z
|
2022-01-23T22:16:33.000Z
|
import wx
from gui.uberwidgets.UberProgressBar import UberProgressBar
from gui import skin as skincore
class F(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.NewId(), "Progress Bar sampler",(0,0),(600,250))
self.Bind(wx.EVT_SLIDER, self.on_slide)
self.content = wx.BoxSizer(wx.VERTICAL)
self.g = UberProgressBar(self,wx.NewId(),100,'progressbar',showlabel=True,size=(300,20))
self.s = wx.Slider(self, -1, 0, 0, 100, (0,0), (300, 50))
self.content.Add(self.g,0,wx.ALIGN_CENTER_HORIZONTAL)
self.content.Add(self.s,0,wx.ALIGN_CENTER_HORIZONTAL)
self.SetSizer(self.content)
def on_slide(self,e):
self.g.SetValue(self.s.GetValue())
print self.s.GetValue()
if __name__=='__main__':
a = wx.PySimpleApp( 0 )
skincore.skininit('../../../../res')
f=F()
f.Show(True)
a.MainLoop()
| 27.393939
| 96
| 0.639381
|
acfdd906a42cf7a8e42d1f120c339a30de881969
| 21,896
|
py
|
Python
|
pyNastran/bdf/cards/base_card.py
|
Gypaets/pyNastran
|
33372e4b4b2a2b9cd93824235eaf884772e67269
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/bdf/cards/base_card.py
|
Gypaets/pyNastran
|
33372e4b4b2a2b9cd93824235eaf884772e67269
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/bdf/cards/base_card.py
|
Gypaets/pyNastran
|
33372e4b4b2a2b9cd93824235eaf884772e67269
|
[
"BSD-3-Clause"
] | null | null | null |
"""
defines:
- BaseCard()
- Element()
- Property()
- Material()
- word, num = break_word_by_trailing_integer(pname_fid)
- word, num = break_word_by_trailing_parentheses_integer_ab(pname_fid)
"""
from __future__ import annotations
from abc import abstractmethod, abstractproperty, abstractclassmethod
from typing import List, Tuple, Union, Optional, Any, TYPE_CHECKING
import numpy as np
#from numpy import nan, empty, unique
from pyNastran.bdf.bdf_interface.bdf_card import BDFCard
from pyNastran.utils import object_attributes, object_methods
from pyNastran.utils.numpy_utils import integer_types
from pyNastran.bdf.field_writer import print_card, print_card_8, print_card_16, print_card_double
from pyNastran.bdf.field_writer_8 import is_same
from pyNastran.utils import deprecated
from pyNastran.bdf.cards.expand_card import expand_thru, expand_thru_by
if TYPE_CHECKING: # pragma: no cover
from pyNastran.bdf.bdf import BDF
#from abc import ABC, abstractmethod
def write_card(comment: str, card: List[Optional[int, float, str]],
size: int, is_double: bool) -> str:
if size == 8:
try:
return comment + print_card_8(card)
except RuntimeError:
return comment + print_card_16(card)
elif is_double:
return comment + print_card_double(card)
return comment + print_card_16(card)
class BaseCard:
"""
Defines a series of base methods for every card class
(e.g., GRID, CTRIA3) including:
- deepcopy()
- get_stats()
- validate()
- object_attributes(mode='public', keys_to_skip=None)
- object_methods(self, mode='public', keys_to_skip=None)
- comment
- update_field(self, n, value)
"""
def __init__(self) -> None:
pass
#ABC.__init__(self)
#@abstractproperty
#def _field_map(self) -> str:
#return ''
@abstractproperty
def type(self) -> str:
return ''
@abstractmethod
def raw_fields(self): # pragma: no cover
return []
@abstractclassmethod
def add_card(self, card, comment=''): # pragma: no cover
return BaseCard()
def __deepcopy__(self, memo_dict):
#raw_fields = self.repr_fields()
raw_fields = self.raw_fields()
card = BDFCard(raw_fields)
return self.add_card(card, comment=self.comment)
def get_stats(self) -> str:
"""Prints out an easy to read summary of the card"""
msg = '---%s---\n' % self.type
for name in sorted(self.object_attributes()):
#if short and '_ref' in name:
#continue
value = getattr(self, name)
msg += ' %-6s : %r\n' % (name, value)
return msg
def deprecated(self, old_name: str, new_name: str, deprecated_version: str) -> None:
"""deprecates methods"""
deprecated(old_name, new_name, deprecated_version, levels=[0, 1, 2])
def validate(self) -> None:
"""card checking method that should be overwritten"""
pass
def object_attributes(self, mode: str='public',
keys_to_skip: Optional[List[str]]=None,
filter_properties: bool=False) -> List[str]:
""".. seealso:: `pyNastran.utils.object_attributes(...)`"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [] # type: List[str]
return object_attributes(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip,
filter_properties=filter_properties)
def object_methods(self, mode: str='public',
keys_to_skip: Optional[List[str]]=None) -> List[str]:
""".. seealso:: `pyNastran.utils.object_methods(...)`"""
if keys_to_skip is None:
keys_to_skip = []
my_keys_to_skip = [] # type: List[str]
return object_methods(self, mode=mode, keys_to_skip=keys_to_skip+my_keys_to_skip)
@property
def comment(self) -> str:
"""accesses the comment"""
# just for testing
#self.deprecated('comment()', 'comment2()', '0.7')
if hasattr(self, '_comment'):
return '%s' % self._comment
return ''
@comment.setter
def comment(self, new_comment: str) -> None:
"""sets a comment"""
#comment = new_comment.rstrip()
#self._comment = comment + '\n' if comment else ''
self._comment = _format_comment(new_comment)
def _test_update_fields(self) -> None:
n = 1
while 1:
try:
self.update_field(n, 1.0) # dummy updating the field
except IndexError:
return
except KeyError:
return
def update_field(self, n: int, value: Optional[Union[int, float, str]]) -> None:
"""
Updates a field based on it's field number.
Parameters
----------
n : int
the field number
value : int/float/str/None
the value to update the field to
.. note::
This is dynamic if the card length changes.
update_field can be used as follows to change the z coordinate
of a node::
>>> nid = 1
>>> node = model.nodes[nid]
>>> node.update_field(3, 0.1)
"""
try:
key_name = self._field_map[n]
setattr(self, key_name, value)
except KeyError:
self._update_field_helper(n, value)
def _update_field_helper(self, n: int, value: Optional[Union[int, float, str]]):
"""
dynamic method for non-standard attributes
(e.g., node.update_field(3, 0.1) to update z)
"""
msg = '%s has not overwritten _update_field_helper; out of range' % self.__class__.__name__
raise IndexError(msg)
def _get_field_helper(self, n: int):
"""dynamic method for non-standard attributes (e.g., node.get_field(3, 0.1) to get z)"""
msg = '%s has not overwritten _get_field_helper; out of range' % self.__class__.__name__
raise IndexError(msg)
def get_field(self, n: int) -> Optional[Union[int, float, str]]:
"""
Gets a field based on it's field number
Parameters
----------
n : int
the field number
Returns
-------
value : int/float/str/None
the value of the field
.. code-block:: python
nid = 1
node = model.nodes[nid]
# ['GRID', nid, cp, x, y, z]
z = node.get_field(5)
"""
try:
key_name = self._field_map[n]
value = getattr(self, key_name)
except KeyError:
value = self._get_field_helper(n)
return value
def _verify(self, xref: bool) -> None:
"""
Verifies all methods for this object work
Parameters
----------
xref : bool
has this model been cross referenced
"""
print('# skipping _verify (type=%s) because _verify is '
'not implemented' % self.type)
def __eq__(self, card: BDFCard) -> bool:
"""
Enables functions like:
.. code-block:: python
>>> GRID(nid=1, ...) === GRID(nid=1, ...)
True
>>> GRID(nid=1, ...) === GRID(nid=2, ...)
False
>>> GRID(nid=1, ...) === CQUAD4(eid=1, ...)
False
"""
if not isinstance(card, self.__class__):
return False
if self.type != card.type:
return False
fields1 = self.raw_fields()
fields2 = card.raw_fields()
return self._is_same_fields(fields1, fields2)
def _is_same_fields(self,
fields1: List[Union[int, float, str, None]],
fields2: List[Union[int, float, str, None]]) -> bool:
for (field1, field2) in zip(fields1, fields2):
if not is_same(field1, field2):
return False
return True
def _is_same_fields_long(self, fields1, fields2): # pragma: no cover
"""helper for __eq__"""
out = []
for (field1, field2) in zip(fields1, fields2):
is_samei = is_same(field1, field2)
out.append(is_samei)
return out
def print_raw_card(self, size: int=8, is_double: bool=False) -> str:
"""A card's raw fields include all defaults for all fields"""
list_fields = self.raw_fields()
return self.comment + print_card(list_fields, size=size, is_double=is_double)
def repr_fields(self) -> List[Union[int, float, str, None]]:
"""
Gets the fields in their simplified form
Returns
-------
fields : List[varies]
the fields that define the card
"""
return self.raw_fields()
def print_card(self, size: int=8, is_double: bool=False) -> str:
"""prints the card in 8/16/16-double format"""
list_fields = self.repr_fields()
return self.comment + print_card(list_fields, size=size, is_double=is_double)
def print_repr_card(self, size: int=8, is_double: bool=False) -> str:
"""prints the card in 8/16/16-double format"""
list_fields = self.repr_fields()
return self.comment + print_card(list_fields, size=size, is_double=is_double)
def __repr__(self) -> str:
"""
Prints a card in the simplest way possible
(default values are left blank).
"""
comment = self.comment
list_fields = self.repr_fields()
try:
return comment + print_card(list_fields, size=8)
except Exception:
try:
return comment + print_card(list_fields, size=16)
except Exception:
print('problem printing %s card' % self.type)
print("list_fields = ", list_fields)
raise
def rstrip(self) -> str:
try:
msg = '%s' % str(self)
except UnicodeEncodeError:
comment = self.comment
self.comment = ''
msg = '$ dropped comment due to unicode error\n%s' % str(self)
self.comment = comment
return msg.rstrip()
def write_card(self, size: int=8, is_double: bool=False) -> str:
"""
Writes the card with the specified width and precision
Parameters
----------
size : int (default=8)
size of the field; {8, 16}
is_double : bool (default=False)
is this card double precision
Returns
-------
msg : str
the string representation of the card
"""
raise NotImplementedError('%s has not overwritten write_card' % self.__class__.__name__)
def write_card_16(self, is_double: bool=False) -> str:
fields = self.repr_fields()
return print_card(fields, size=16, is_double=False)
class Property(BaseCard):
"""Base Property Class"""
def __init__(self) -> None:
"""dummy init"""
pass
def Pid(self) -> int:
"""
returns the property ID of an property
Returns
-------
pid : int
the Property ID
"""
return self.pid
def Mid(self) -> int:
"""
returns the material ID of an element
Returns
-------
mid : int
the Material ID
"""
if self.mid_ref is None:
return self.mid
return self.mid_ref.mid
#@abstractmethod
#def cross_reference(self, model: BDF) -> None:
#pass
#@abstractmethod
#def uncross_reference(self) -> None:
#pass
def write_card_8(self) -> str:
return self.write_card()
def write_card_16(self, is_double: bool=False) -> str:
return self.write_card()
class Material(BaseCard):
"""Base Material Class"""
def __init__(self) -> None:
"""dummy init"""
BaseCard.__init__(self)
@property
def TRef(self) -> float: # pramga: no cover
if not hasattr(self, 'tref'):
raise AttributeError('%r object has no attribute tref' % self.type)
return self.tref
@TRef.setter
def TRef(self, tref: float) -> None: # pramga: no cover
"""sets the self.Tref attributes"""
if not hasattr(self, 'tref'):
raise AttributeError('%r object has no attribute tref' % self.type)
self.tref = tref
def cross_reference(self, model: BDF) -> None:
"""dummy cross reference method for a Material"""
pass
def Mid(self) -> Any:
"""
returns the material ID of an element
Returns
-------
mid : int
the Material ID
"""
return self.mid
class Element(BaseCard):
"""defines the Element class"""
pid = 0 # CONM2, rigid
def __init__(self) -> None:
"""dummy init"""
BaseCard.__init__(self)
#: the list of node IDs for an element (default=None)
#self.nodes = None
def verify_unique_node_ids(self) -> None:
node_ids = self.node_ids
self._verify_unique_node_ids(node_ids)
def _verify_unique_node_ids(self, required_node_ids, non_required_node_ids=None) -> None:
# type (Any, Any) -> None
if required_node_ids:
if non_required_node_ids:
raise NotImplementedError('only required nodes implemented')
else:
urnids = np.unique(required_node_ids)
n_unique_node_ids = len(urnids)
n_node_ids = len(required_node_ids)
if n_unique_node_ids != n_node_ids:
msg = 'nunique_node_ids=%s nnode_ids=%s' % (n_unique_node_ids, n_node_ids)
raise RuntimeError(msg)
else:
raise NotImplementedError('only required nodes implemented')
def Pid(self) -> int:
"""
Gets the Property ID of an element
Returns
-------
pid : int
the Property ID
"""
if self.pid_ref is None:
return self.pid
return self.pid_ref.pid
def get_node_positions(self, nodes: Any=None) -> np.ndarray:
"""returns the positions of multiple node objects"""
if nodes is None:
nodes = self.nodes_ref
nnodes = len(nodes)
positions = np.empty((nnodes, 3), dtype='float64')
positions.fill(np.nan)
for i, node in enumerate(nodes):
if isinstance(node, int):
raise TypeError("node=%s; type=%s must be a Node\n%s" % (
str(node), type(node), self.get_stats()))
if node is not None:
positions[i, :] = node.get_position()
return positions
def get_node_positions_no_xref(self, model: BDF, nodes: List[Any]=None) -> np.ndarray:
"""returns the positions of multiple node objects"""
if not nodes:
nodes = self.nodes
nnodes = len(nodes)
positions = np.empty((nnodes, 3), dtype='float64')
positions.fill(np.nan)
for i, nid in enumerate(nodes):
if nid is not None:
node = model.Node(nid)
positions[i, :] = node.get_position_no_xref(model)
return positions
def _node_ids(self, nodes: Optional[List[Any]]=None,
allow_empty_nodes: bool=False, msg: str='') -> List[int]:
"""returns nodeIDs for repr functions"""
return _node_ids(self, nodes=nodes, allow_empty_nodes=allow_empty_nodes, msg=msg)
def prepare_node_ids(self, nids: List[int], allow_empty_nodes: bool=False) -> None:
"""Verifies all node IDs exist and that they're integers"""
#self.nodes = nids
nids = self.validate_node_ids(nids, allow_empty_nodes)
return nids
def validate_node_ids(self, nodes: List[int], allow_empty_nodes: bool=False) -> None:
if allow_empty_nodes:
# verify we have nodes
if len(nodes) == 0:
msg = '%s requires at least one node id be specified; node_ids=%s' % (
self.type, nodes)
raise ValueError(msg)
#unique_nodes = unique(nodes)
#if len(nodes) != len(unique_nodes):
#msg = '%s requires that all node ids be unique; node_ids=%s' % (self.type, nodes)
#raise IndexError(msg)
# remove 0 nodes
nodes2 = [nid if nid != 0 else None
for nid in nodes]
else:
nodes2 = nodes
#unique_nodes = unique(self.nodes)
#if len(self.nodes) != len(unique_nodes):
#msg = '%s requires that all node ids be unique; node_ids=%s' % (
#self.type, self.nodes)
#raise IndexError(msg)
#nodes3 = []
#for nid in nodes:
#if isinstance(nid, integer_types):
#nodes3.append(nid)
#elif nid is None and allow_empty_nodes or np.isnan(nid):
#nodes3.append(None)
#else: # string???
#msg = 'this element may have missing nodes...\n'
#msg += 'nids=%s allow_empty_nodes=False;\ntype(nid)=%s' % (nodes, type(nid))
#raise RuntimeError(msg)
#print('nodes', nodes)
#print('nodes2', nodes2)
#print('nodes3 =', nodes3)
#self.nodes = nodes2
return nodes2
def _format_comment(comment: str) -> str:
r"""Format a card comment to precede the card using
nastran-compatible comment character $. The comment
string can have multiple lines specified as linebreaks.
Empty comments or just spaces are returned as an empty string.
Examples
--------
>>> _format_comment('a comment\ntaking two lines')
$a comment
$taking two lines
>>> _format_comment('')
<empty string>
>>> _format_comment(' ')
<empty string>
>>> _format_comment('$ a comment within a comment looks weird')
'$$ a comment within a comment looks weird'
>>> _format_comment('no trailing whitespace ')
$no trailing extra whitespace
"""
if comment.strip() == '': # deals with a bunch of spaces
return ''
return ''.join(['${}\n'.format(comment_line)
for comment_line in comment.rstrip().split('\n')])
def _node_ids(card, nodes=None, allow_empty_nodes: bool=False, msg: str='') -> Any:
try:
if not nodes:
nodes = card.nodes
assert nodes is not None, card.__dict__
if allow_empty_nodes:
nodes2 = []
for node in nodes:
if node == 0 or node is None:
nodes2.append(None)
elif isinstance(node, integer_types):
nodes2.append(node)
else:
nodes2.append(node.nid)
assert nodes2 is not None, str(card)
return nodes2
try:
node_ids = []
for node in nodes:
if isinstance(node, integer_types):
node_ids.append(node)
else:
node_ids.append(node.nid)
#if isinstance(nodes[0], integer_types):
#node_ids = [node for node in nodes]
#else:
#node_ids = [node.nid for node in nodes]
except Exception:
print('type=%s nodes=%s allow_empty_nodes=%s\nmsg=%s' % (
card.type, nodes, allow_empty_nodes, msg))
raise
assert 0 not in node_ids, 'node_ids = %s' % node_ids
assert node_ids is not None, str(card)
return node_ids
except Exception:
print('type=%s nodes=%s allow_empty_nodes=%s\nmsg=%s' % (
card.type, nodes, allow_empty_nodes, msg))
raise
raise RuntimeError('huh...')
def break_word_by_trailing_integer(pname_fid: str) -> Tuple[str, str]:
"""
Splits a word that has a value that is an integer
Parameters
----------
pname_fid : str
the DVPRELx term (e.g., A(11), NSM(5))
Returns
-------
word : str
the value not in parentheses
value : int
the value in parentheses
Examples
--------
>>> break_word_by_trailing_integer('T11')
('T', '11')
>>> break_word_by_trailing_integer('THETA11')
('THETA', '11')
"""
nums = []
i = 0
for i, letter in enumerate(reversed(pname_fid)):
if letter.isdigit():
nums.append(letter)
else:
break
num = ''.join(nums[::-1])
if not num:
msg = ("pname_fid=%r does not follow the form 'T1', 'T11', 'THETA42' "
"(letters and a number)" % pname_fid)
raise SyntaxError(msg)
word = pname_fid[:-i]
assert len(word)+len(num) == len(pname_fid), 'word=%r num=%r pname_fid=%r' % (word, num, pname_fid)
return word, num
def break_word_by_trailing_parentheses_integer_ab(pname_fid: str) -> Tuple[str, str]:
"""
Splits a word that has a value that can be A/B as well as an integer
Parameters
----------
pname_fid : str
the DVPRELx term; A(11), NSM(5), NSM(B)
Returns
-------
word : str
the value not in parentheses
value : int/str
the value in parenthese
Examples
--------
>>> break_word_by_trailing_parentheses_integer('A(11)')
('A', '11')
>>> break_word_by_trailing_parentheses_integer('NSM(11)')
('NSM', '11')
>>> break_word_by_trailing_parentheses_integer('NSM(B)')
('NSM', 'B')
"""
assert pname_fid.endswith(')'), pname_fid
word, num = pname_fid[:-1].split('(')
if num not in ['A', 'B']:
num = int(num)
return word, num
| 31.505036
| 103
| 0.567684
|
acfdd9164174236132befbbd93e7ad5207f2e3e8
| 568
|
py
|
Python
|
src/entities/__init__.py
|
michaeltcoelho/python-ebi
|
45f83328faad4345e937b8518ee1dd771fdde1a8
|
[
"MIT"
] | 3
|
2018-04-03T17:07:18.000Z
|
2022-02-13T06:28:53.000Z
|
src/entities/__init__.py
|
michaeltcoelho/python-ebi
|
45f83328faad4345e937b8518ee1dd771fdde1a8
|
[
"MIT"
] | null | null | null |
src/entities/__init__.py
|
michaeltcoelho/python-ebi
|
45f83328faad4345e937b8518ee1dd771fdde1a8
|
[
"MIT"
] | null | null | null |
import abc
class UnitOfWork(abc.ABC):
@abc.abstractmethod
def commit(self):
pass
@abc.abstractmethod
def rollback(self):
pass
@property
@abc.abstractmethod
def repositories(self):
pass
class Repository(abc.ABC):
def __init__(self, session):
self.session = session
class RepositoryContainer:
def __init__(self, session):
self.session = session
@property
def boards(self):
from src.entities.boards import BoardRepository
return BoardRepository(self.session)
| 16.228571
| 55
| 0.647887
|
acfdd9952397a98feebf674e7962439fbf029b34
| 342
|
py
|
Python
|
ote/ote/modules/trainers/__init__.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 256
|
2020-09-09T03:27:57.000Z
|
2022-03-30T10:06:06.000Z
|
ote/ote/modules/trainers/__init__.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 604
|
2020-09-08T12:29:49.000Z
|
2022-03-31T21:51:08.000Z
|
ote/ote/modules/trainers/__init__.py
|
dqawami/openvino_training_extensions
|
dddda1dfd651eaae2d59cecda84275b1b03bd0ad
|
[
"Apache-2.0"
] | 160
|
2020-09-09T14:06:07.000Z
|
2022-03-30T14:50:48.000Z
|
from .base import BaseTrainer
from .instance_segmentation import InstanceSegmentationTrainer
from .mmaction import MMActionTrainer
from .mmdetection import MMDetectionTrainer
from .reid import ReidTrainer
__all__ = [
'BaseTrainer',
'MMActionTrainer',
'MMDetectionTrainer',
'InstanceSegmentationTrainer',
'ReidTrainer',
]
| 24.428571
| 62
| 0.78655
|
acfdd9a4b97f88264c249c8239f3f61ca180b507
| 274
|
py
|
Python
|
texaslan/users/apps.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 2
|
2018-02-06T06:24:03.000Z
|
2018-03-20T03:32:13.000Z
|
texaslan/users/apps.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 32
|
2017-02-21T20:01:43.000Z
|
2020-02-08T21:52:16.000Z
|
texaslan/users/apps.py
|
hsmeans/texaslan.org
|
a981e7835381e77320e39536a619981ba9d03451
|
[
"MIT"
] | 6
|
2017-03-21T21:16:40.000Z
|
2020-02-08T20:46:20.000Z
|
from django.apps import AppConfig
class UsersConfig(AppConfig):
name = 'texaslan.users'
verbose_name = "Users"
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| 19.571429
| 37
| 0.594891
|
acfddbccfd1c02ef6e7ba605df7d6d49b28ae8ea
| 832
|
py
|
Python
|
ScikitLearn/NN.py
|
AutuanLiu/Machine-Learning-on-docker
|
00eb7211a3a40a9da02114923647dfd6ac24f138
|
[
"Apache-2.0"
] | 11
|
2018-03-18T11:06:59.000Z
|
2020-02-23T03:24:43.000Z
|
ScikitLearn/NN.py
|
AutuanLiu/Machine-Learning-on-docker
|
00eb7211a3a40a9da02114923647dfd6ac24f138
|
[
"Apache-2.0"
] | null | null | null |
ScikitLearn/NN.py
|
AutuanLiu/Machine-Learning-on-docker
|
00eb7211a3a40a9da02114923647dfd6ac24f138
|
[
"Apache-2.0"
] | 4
|
2018-03-28T13:04:26.000Z
|
2019-05-29T05:49:52.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name:NN
Description : 最近邻
Email : autuanliu@163.com
Date:2017/12/22
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.neighbors import NearestNeighbors, KDTree
# 无监督
# 找到两组数据集中最近邻点的简单任务
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
nbrs = NearestNeighbors(n_neighbors=2, algorithm='auto').fit(X)
distances, indices = nbrs.kneighbors(X)
print(indices, distances)
# 生成一个稀疏图来标识相连点之间的连接情况
print(nbrs.kneighbors_graph(X).toarray())
plt.plot(X, 'o')
plt.show()
# KD tree
kdt = KDTree(X, leaf_size=30, metric='euclidean')
res = kdt.query(X, k=2, return_distance=False)
print(res)
# KNN 分类
# clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
# clf.fit(X, y)
| 23.771429
| 68
| 0.644231
|
acfddc5adf1f7fc37a1299e0a17435846f69ce37
| 5,071
|
py
|
Python
|
django_backend/wallet/views.py
|
emilioivan12/Bank-Online-Django-React
|
f25cabbebd47baa55cc1ebb135c49b766aa3303a
|
[
"MIT"
] | 1
|
2021-04-20T04:21:10.000Z
|
2021-04-20T04:21:10.000Z
|
django_backend/wallet/views.py
|
emilioivan12/Bank-Online-Django-React
|
f25cabbebd47baa55cc1ebb135c49b766aa3303a
|
[
"MIT"
] | null | null | null |
django_backend/wallet/views.py
|
emilioivan12/Bank-Online-Django-React
|
f25cabbebd47baa55cc1ebb135c49b766aa3303a
|
[
"MIT"
] | null | null | null |
from rest_framework import generics
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .models import Wallet, Currency, Transaction
from .serializers import WalletSerializer, CurrencySerializer, TransactionSerializer
from django.http import JsonResponse
from django.db.models import Q
from rest_framework.permissions import SAFE_METHODS, IsAuthenticated, IsAuthenticatedOrReadOnly, BasePermission, IsAdminUser, DjangoModelPermissions
class WalletCreateDetail(generics.ListCreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class=WalletSerializer
queryset=Wallet.objects.all()
def get(self, request, format=None):
content = {
'user': str(request.user), # `django.contrib.auth.User` instance.
'auth': str(request.auth), # None
}
queryset=Wallet.objects.filter(owner=request.user.id)
serializer=WalletSerializer(queryset, many=True)
return Response(serializer.data)
def perform_create(self, WalletSerializer):
WalletSerializer.save(owner=self.request.user)
class CurrencyCreateDetail(generics.ListCreateAPIView):
queryset=Currency.objects.all()
serializer_class=CurrencySerializer
class WalletDetail(generics.ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class=WalletSerializer
queryset=Wallet.objects.all()
def get(self, request, format=None):
content = {
'user': str(request.user), # `django.contrib.auth.User` instance.
'auth': str(request.auth), # None
}
queryset=Wallet.objects.filter(owner=request.user.id)
serializer=WalletSerializer(queryset, many=True)
return Response(serializer.data)
class WalletCreate(generics.CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class=WalletSerializer
queryset=Wallet.objects.all()
def perform_create(self, WalletSerializer):
WalletSerializer.save(owner=self.request.user)
class WalletUpdate(generics.UpdateAPIView):
permission_classes = [IsAuthenticated]
serializer_class=WalletSerializer
queryset=Wallet.objects.all()
def update(self, request, *args, **kwargs):
queryset=Wallet.objects.get(pk=kwargs['pk'])
#this validate if the user who is trying to update the field is the same who own it.
#maybe we should allow admin users?
if queryset.owner.id is not request.user.id:
return Response({}, status=status.HTTP_403_FORBIDDEN)
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(instance, data=request.data, partial=partial)
serializer.is_valid(raise_exception=True)
self.perform_update(serializer)
return Response(serializer.data)
class WalletDelete(generics.DestroyAPIView):
permission_classes = [IsAuthenticated]
serializer_class=WalletSerializer
queryset=Wallet.objects.all()
def delete(self, request, *args, **kwargs):
queryset=Wallet.objects.get(pk=kwargs['pk'])
if queryset.owner.id is not request.user.id:
return Response({}, status=status.HTTP_403_FORBIDDEN)
return self.destroy(request, *args, **kwargs)
class TransactionCreate(generics.CreateAPIView):
permission_classes = [IsAuthenticated]
serializer_class=TransactionSerializer
queryset=Transaction.objects.all()
def perform_create(self, TransactionSerializer):
queryset_origin=Wallet.objects.get(pk=self.request.data['origin'])
if (queryset_origin and (queryset_origin.value >= int(self.request.data['value'])) and (queryset_origin.owner == self.request.user)):
queryset_destination=Wallet.objects.get(pk=self.request.data['destination'])
if (queryset_destination and queryset_destination.currency==queryset_origin.currency):
queryset_destination.value=queryset_destination.value+int(self.request.data['value'])
queryset_origin.value=queryset_origin.value-int(self.request.data['value'])
queryset_destination.save()
queryset_origin.save()
TransactionSerializer.save(successful=True)
else:
TransactionSerializer.save(successful=False)
else:
TransactionSerializer.save(successful=False)
return Response({}, status=status.HTTP_403_FORBIDDEN)
class TransactionDetail(generics.ListAPIView):
permission_classes = [IsAuthenticated]
serializer_class=TransactionSerializer
queryset=Transaction.objects.all()
def get(self, request, format=None):
triggeredUser = Q(origin__owner=request.user.id)
receivedUser = Q(destination__owner=request.user.id)
queryset=Transaction.objects.filter(triggeredUser or receivedUser)
serializer=TransactionSerializer(queryset, many=True)
return Response(serializer.data)
| 44.095652
| 148
| 0.714849
|
acfddc60a2bb47c9ebd1baaa95846782b2431bf4
| 75,949
|
py
|
Python
|
great_expectations/dataset/sqlalchemy_dataset.py
|
jstammers/great_expectations
|
e4270cfd38c101e7b811e1ea60aa73f8e934fd48
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/sqlalchemy_dataset.py
|
jstammers/great_expectations
|
e4270cfd38c101e7b811e1ea60aa73f8e934fd48
|
[
"Apache-2.0"
] | null | null | null |
great_expectations/dataset/sqlalchemy_dataset.py
|
jstammers/great_expectations
|
e4270cfd38c101e7b811e1ea60aa73f8e934fd48
|
[
"Apache-2.0"
] | null | null | null |
import inspect
import logging
import traceback
import uuid
import warnings
from datetime import datetime
from functools import wraps
from typing import Dict, Iterable, List
import numpy as np
import pandas as pd
from dateutil.parser import parse
from great_expectations.data_asset import DataAsset
from great_expectations.data_asset.util import DocInherit, parse_result_format
from great_expectations.dataset.util import (
check_sql_engine_dialect,
get_approximate_percentile_disc_sql,
get_sql_dialect_floating_point_infinity_value,
)
from great_expectations.util import import_library_module
from ..core import convert_to_json_serializable
from .dataset import Dataset
from .pandas_dataset import PandasDataset
logger = logging.getLogger(__name__)
try:
import sqlalchemy as sa
from sqlalchemy.dialects import registry
from sqlalchemy.engine import reflection
from sqlalchemy.sql.expression import BinaryExpression, literal
from sqlalchemy.sql.selectable import Select, CTE
from sqlalchemy.sql.operators import custom_op
from sqlalchemy.sql.elements import Label, WithinGroup, TextClause
from sqlalchemy.engine.result import RowProxy
from sqlalchemy.engine.default import DefaultDialect
from sqlalchemy.exc import ProgrammingError
except ImportError:
logger.debug(
"Unable to load SqlAlchemy context; install optional sqlalchemy dependency for support"
)
sa = None
registry = None
reflection = None
BinaryExpression = None
literal = None
Select = None
CTE = None
custom_op = None
Label = None
WithinGroup = None
TextClause = None
RowProxy = None
DefaultDialect = None
ProgrammingError = None
try:
import psycopg2
import sqlalchemy.dialects.postgresql.psycopg2 as sqlalchemy_psycopg2
except (ImportError, KeyError):
sqlalchemy_psycopg2 = None
try:
import sqlalchemy_redshift.dialect
except ImportError:
sqlalchemy_redshift = None
try:
import snowflake.sqlalchemy.snowdialect
# Sometimes "snowflake-sqlalchemy" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("snowflake", "snowflake.sqlalchemy", "dialect")
except (ImportError, KeyError):
snowflake = None
try:
import pybigquery.sqlalchemy_bigquery
# Sometimes "pybigquery.sqlalchemy_bigquery" fails to self-register in certain environments, so we do it explicitly.
# (see https://stackoverflow.com/questions/53284762/nosuchmoduleerror-cant-load-plugin-sqlalchemy-dialectssnowflake)
registry.register("bigquery", "pybigquery.sqlalchemy_bigquery", "BigQueryDialect")
try:
getattr(pybigquery.sqlalchemy_bigquery, "INTEGER")
bigquery_types_tuple = None
except AttributeError:
# In older versions of the pybigquery driver, types were not exported, so we use a hack
logger.warning(
"Old pybigquery driver version detected. Consider upgrading to 0.4.14 or later."
)
from collections import namedtuple
BigQueryTypes = namedtuple(
"BigQueryTypes", sorted(pybigquery.sqlalchemy_bigquery._type_map)
)
bigquery_types_tuple = BigQueryTypes(**pybigquery.sqlalchemy_bigquery._type_map)
except ImportError:
bigquery_types_tuple = None
pybigquery = None
try:
# SQLAlchemy does not export the "INT" type for the MS SQL Server dialect; however "INT" is supported by the engine.
# Since SQLAlchemy exports the "INTEGER" type for the MS SQL Server dialect, alias "INT" to the "INTEGER" type.
import sqlalchemy.dialects.mssql as mssqltypes
try:
getattr(mssqltypes, "INT")
except AttributeError:
mssqltypes.INT = mssqltypes.INTEGER
except ImportError:
pass
class SqlAlchemyBatchReference(object):
def __init__(self, engine, table_name=None, schema=None, query=None):
self._engine = engine
if table_name is None and query is None:
raise ValueError("Table_name or query must be specified")
self._table_name = table_name
self._schema = schema
self._query = query
def get_init_kwargs(self):
if self._table_name and self._query:
# This is allowed in BigQuery where a temporary table name must be provided *with* the
# custom sql to execute.
kwargs = {
"engine": self._engine,
"table_name": self._table_name,
"custom_sql": self._query,
}
elif self._table_name:
kwargs = {"engine": self._engine, "table_name": self._table_name}
else:
kwargs = {"engine": self._engine, "custom_sql": self._query}
if self._schema:
kwargs["schema"] = self._schema
return kwargs
class MetaSqlAlchemyDataset(Dataset):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@classmethod
def column_map_expectation(cls, func):
"""For SqlAlchemy, this decorator allows individual column_map_expectations to simply return the filter
that describes the expected condition on their data.
The decorator will then use that filter to obtain unexpected elements, relevant counts, and return the formatted
object.
"""
argspec = inspect.getfullargspec(func)[0][1:]
@cls.expectation(argspec)
@wraps(func)
def inner_wrapper(
self, column, mostly=None, result_format=None, *args, **kwargs
):
if result_format is None:
result_format = self.default_expectation_args["result_format"]
result_format = parse_result_format(result_format)
if result_format["result_format"] == "COMPLETE":
warnings.warn(
"Setting result format to COMPLETE for a SqlAlchemyDataset can be dangerous because it will not limit the number of returned results."
)
unexpected_count_limit = None
else:
unexpected_count_limit = result_format["partial_unexpected_count"]
expected_condition: BinaryExpression = func(self, column, *args, **kwargs)
# Added to prepare for when an ignore_values argument is added to the expectation
ignore_values: list = [None]
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
ignore_values = []
# Counting the number of unexpected values can be expensive when there is a large
# number of np.nan values.
# This only happens on expect_column_values_to_not_be_null expectations.
# Since there is no reason to look for most common unexpected values in this case,
# we will instruct the result formatting method to skip this step.
result_format["partial_unexpected_count"] = 0
ignore_values_conditions: List[BinaryExpression] = []
if (
len(ignore_values) > 0
and None not in ignore_values
or len(ignore_values) > 1
and None in ignore_values
):
ignore_values_conditions += [
sa.column(column).in_(
[val for val in ignore_values if val is not None]
)
]
if None in ignore_values:
ignore_values_conditions += [sa.column(column).is_(None)]
ignore_values_condition: BinaryExpression
if len(ignore_values_conditions) > 1:
ignore_values_condition = sa.or_(*ignore_values_conditions)
elif len(ignore_values_conditions) == 1:
ignore_values_condition = ignore_values_conditions[0]
else:
ignore_values_condition = BinaryExpression(
sa.literal(False), sa.literal(True), custom_op("=")
)
count_query: Select
if self.sql_engine_dialect.name.lower() == "mssql":
count_query = self._get_count_query_mssql(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
else:
count_query = self._get_count_query_generic_sqlalchemy(
expected_condition=expected_condition,
ignore_values_condition=ignore_values_condition,
)
count_results: dict = dict(self.engine.execute(count_query).fetchone())
# Handle case of empty table gracefully:
if (
"element_count" not in count_results
or count_results["element_count"] is None
):
count_results["element_count"] = 0
if "null_count" not in count_results or count_results["null_count"] is None:
count_results["null_count"] = 0
if (
"unexpected_count" not in count_results
or count_results["unexpected_count"] is None
):
count_results["unexpected_count"] = 0
# Some engines may return Decimal from count queries (lookin' at you MSSQL)
# Convert to integers
count_results["element_count"] = int(count_results["element_count"])
count_results["null_count"] = int(count_results["null_count"])
count_results["unexpected_count"] = int(count_results["unexpected_count"])
# Retrieve unexpected values
unexpected_query_results = self.engine.execute(
sa.select([sa.column(column)])
.select_from(self._table)
.where(
sa.and_(
sa.not_(expected_condition), sa.not_(ignore_values_condition)
)
)
.limit(unexpected_count_limit)
)
nonnull_count: int = count_results["element_count"] - count_results[
"null_count"
]
if "output_strftime_format" in kwargs:
output_strftime_format = kwargs["output_strftime_format"]
maybe_limited_unexpected_list = []
for x in unexpected_query_results.fetchall():
if isinstance(x[column], str):
col = parse(x[column])
else:
col = x[column]
maybe_limited_unexpected_list.append(
datetime.strftime(col, output_strftime_format)
)
else:
maybe_limited_unexpected_list = [
x[column] for x in unexpected_query_results.fetchall()
]
success_count = nonnull_count - count_results["unexpected_count"]
success, percent_success = self._calc_map_expectation_success(
success_count, nonnull_count, mostly
)
return_obj = self._format_map_output(
result_format,
success,
count_results["element_count"],
nonnull_count,
count_results["unexpected_count"],
maybe_limited_unexpected_list,
None,
)
if func.__name__ in [
"expect_column_values_to_not_be_null",
"expect_column_values_to_be_null",
]:
# These results are unnecessary for the above expectations
del return_obj["result"]["unexpected_percent_nonmissing"]
del return_obj["result"]["missing_count"]
del return_obj["result"]["missing_percent"]
try:
del return_obj["result"]["partial_unexpected_counts"]
del return_obj["result"]["partial_unexpected_list"]
except KeyError:
pass
return return_obj
inner_wrapper.__name__ = func.__name__
inner_wrapper.__doc__ = func.__doc__
return inner_wrapper
def _get_count_query_mssql(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
# mssql expects all temporary table names to have a prefix '#'
temp_table_name: str = f"#ge_tmp_{str(uuid.uuid4())[:8]}"
with self.engine.begin():
metadata: sa.MetaData = sa.MetaData(self.engine)
temp_table_obj: sa.Table = sa.Table(
temp_table_name,
metadata,
sa.Column("condition", sa.Integer, primary_key=False, nullable=False),
)
temp_table_obj.create(self.engine, checkfirst=True)
count_case_statement: List[sa.sql.elements.Label] = [
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
).label("condition")
]
inner_case_query: sa.sql.dml.Insert = temp_table_obj.insert().from_select(
count_case_statement,
sa.select(count_case_statement).select_from(self._table),
)
self.engine.execute(inner_case_query)
element_count_query: Select = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
]
).select_from(self._table).alias("ElementAndNullCountsSubquery")
unexpected_count_query: Select = sa.select(
[sa.func.sum(sa.column("condition")).label("unexpected_count"),]
).select_from(temp_table_obj).alias("UnexpectedCountSubquery")
count_query: Select = sa.select(
[
element_count_query.c.element_count,
element_count_query.c.null_count,
unexpected_count_query.c.unexpected_count,
]
)
return count_query
def _get_count_query_generic_sqlalchemy(
self,
expected_condition: BinaryExpression,
ignore_values_condition: BinaryExpression,
) -> Select:
return sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(sa.case([(ignore_values_condition, 1)], else_=0)).label(
"null_count"
),
sa.func.sum(
sa.case(
[
(
sa.and_(
sa.not_(expected_condition),
sa.not_(ignore_values_condition),
),
1,
)
],
else_=0,
)
).label("unexpected_count"),
]
).select_from(self._table)
class SqlAlchemyDataset(MetaSqlAlchemyDataset):
"""
--ge-feature-maturity-info--
id: validation_engine_sqlalchemy
title: Validation Engine - SQLAlchemy
icon:
short_description: Use SQLAlchemy to validate data in a database
description: Use SQLAlchemy to validate data in a database
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/creating_batches/how_to_load_a_database_table_or_a_query_result_as_a_batch.html
maturity: Production
maturity_details:
api_stability: High
implementation_completeness: Moderate (temp table handling/permissions not universal)
unit_test_coverage: High
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal (none)
bug_risk: Low
--ge-feature-maturity-info--
"""
@classmethod
def from_dataset(cls, dataset=None):
if isinstance(dataset, SqlAlchemyDataset):
return cls(table_name=str(dataset._table.name), engine=dataset.engine)
else:
raise ValueError("from_dataset requires a SqlAlchemy dataset")
def __init__(
self,
table_name=None,
engine=None,
connection_string=None,
custom_sql=None,
schema=None,
*args,
**kwargs,
):
if custom_sql and not table_name:
# NOTE: Eugene 2020-01-31: @James, this is a not a proper fix, but without it the "public" schema
# was used for a temp table and raising an error
schema = None
table_name = f"ge_tmp_{str(uuid.uuid4())[:8]}"
# mssql expects all temporary table names to have a prefix '#'
if engine.dialect.name.lower() == "mssql":
table_name = f"#{table_name}"
generated_table_name = table_name
else:
generated_table_name = None
if table_name is None:
raise ValueError("No table_name provided.")
if engine is None and connection_string is None:
raise ValueError("Engine or connection_string must be provided.")
if engine is not None:
self.engine = engine
else:
try:
self.engine = sa.create_engine(connection_string)
except Exception as err:
# Currently we do no error handling if the engine doesn't work out of the box.
raise err
if self.engine.dialect.name.lower() == "bigquery":
# In BigQuery the table name is already qualified with its schema name
self._table = sa.Table(table_name, sa.MetaData(), schema=None)
else:
self._table = sa.Table(table_name, sa.MetaData(), schema=schema)
# Get the dialect **for purposes of identifying types**
if self.engine.dialect.name.lower() in [
"postgresql",
"mysql",
"sqlite",
"oracle",
"mssql",
"oracle",
]:
# These are the officially included and supported dialects by sqlalchemy
self.dialect = import_library_module(
module_name="sqlalchemy.dialects." + self.engine.dialect.name
)
if engine and engine.dialect.name.lower() in ["sqlite", "mssql"]:
# sqlite/mssql temp tables only persist within a connection so override the engine
self.engine = engine.connect()
elif self.engine.dialect.name.lower() == "snowflake":
self.dialect = import_library_module(
module_name="snowflake.sqlalchemy.snowdialect"
)
elif self.engine.dialect.name.lower() == "redshift":
self.dialect = import_library_module(
module_name="sqlalchemy_redshift.dialect"
)
elif self.engine.dialect.name.lower() == "bigquery":
self.dialect = import_library_module(
module_name="pybigquery.sqlalchemy_bigquery"
)
else:
self.dialect = None
if schema is not None and custom_sql is not None:
# temporary table will be written to temp schema, so don't allow
# a user-defined schema
# NOTE: 20200306 - JPC - Previously, this would disallow both custom_sql (a query) and a schema, but
# that is overly restrictive -- snowflake could have had a schema specified, for example, in which to create
# a temporary table.
# raise ValueError("Cannot specify both schema and custom_sql.")
pass
if custom_sql is not None and self.engine.dialect.name.lower() == "bigquery":
if (
generated_table_name is not None
and self.engine.dialect.dataset_id is None
):
raise ValueError(
"No BigQuery dataset specified. Use bigquery_temp_table batch_kwarg or a specify a "
"default dataset in engine url"
)
if (
custom_sql is not None
and self.engine.dialect.name.lower() == "snowflake"
and generated_table_name is not None
):
raise ValueError(
"No snowflake_transient_table specified. Snowflake with a query batch_kwarg will create "
"a transient table, so you must provide a user-selected name."
)
if custom_sql:
self.create_temporary_table(table_name, custom_sql, schema_name=schema)
if (
generated_table_name is not None
and self.engine.dialect.name.lower() == "bigquery"
):
logger.warning(
"Created permanent table {table_name}".format(table_name=table_name)
)
try:
insp = reflection.Inspector.from_engine(self.engine)
self.columns = insp.get_columns(table_name, schema=schema)
except KeyError:
# we will get a KeyError for temporary tables, since
# reflection will not find the temporary schema
self.columns = self.column_reflection_fallback()
# Use fallback because for mssql reflection doesn't throw an error but returns an empty list
if len(self.columns) == 0:
self.columns = self.column_reflection_fallback()
# Only call super once connection is established and table_name and columns known to allow autoinspection
super().__init__(*args, **kwargs)
@property
def sql_engine_dialect(self) -> DefaultDialect:
return self.engine.dialect
def attempt_allowing_relative_error(self):
detected_redshift: bool = (
sqlalchemy_redshift is not None
and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_redshift.dialect.RedshiftDialect,
)
)
# noinspection PyTypeChecker
detected_psycopg2: bool = (
sqlalchemy_psycopg2 is not None
and check_sql_engine_dialect(
actual_sql_engine_dialect=self.sql_engine_dialect,
candidate_sql_engine_dialect=sqlalchemy_psycopg2.PGDialect_psycopg2,
)
)
return detected_redshift or detected_psycopg2
def head(self, n=5):
"""Returns a *PandasDataset* with the first *n* rows of the given Dataset"""
try:
df = next(
pd.read_sql_table(
table_name=self._table.name,
schema=self._table.schema,
con=self.engine,
chunksize=n,
)
)
except (ValueError, NotImplementedError):
# it looks like MetaData that is used by pd.read_sql_table
# cannot work on a temp table.
# If it fails, we are trying to get the data using read_sql
head_sql_str = "select * from "
if self._table.schema and self.engine.dialect.name.lower() != "bigquery":
head_sql_str += self._table.schema + "." + self._table.name
elif self.engine.dialect.name.lower() == "bigquery":
head_sql_str += "`" + self._table.name + "`"
else:
head_sql_str += self._table.name
head_sql_str += " limit {0:d}".format(n)
# Limit is unknown in mssql! Use top instead!
if self.engine.dialect.name.lower() == "mssql":
head_sql_str = "select top({n}) * from {table}".format(
n=n, table=self._table.name
)
df = pd.read_sql(head_sql_str, con=self.engine)
except StopIteration:
df = pd.DataFrame(columns=self.get_table_columns())
return PandasDataset(
df,
expectation_suite=self.get_expectation_suite(
discard_failed_expectations=False,
discard_result_format_kwargs=False,
discard_catch_exceptions_kwargs=False,
discard_include_config_kwargs=False,
),
)
def get_row_count(self, table_name=None):
if table_name is None:
table_name = self._table
else:
table_name = sa.table(table_name)
count_query = sa.select([sa.func.count()]).select_from(table_name)
return int(self.engine.execute(count_query).scalar())
def get_column_count(self):
return len(self.columns)
def get_table_columns(self) -> List[str]:
return [col["name"] for col in self.columns]
def get_column_nonnull_count(self, column):
ignore_values = [None]
count_query = sa.select(
[
sa.func.count().label("element_count"),
sa.func.sum(
sa.case(
[
(
sa.or_(
sa.column(column).in_(ignore_values),
# Below is necessary b/c sa.in_() uses `==` but None != None
# But we only consider this if None is actually in the list of ignore values
sa.column(column).is_(None)
if None in ignore_values
else False,
),
1,
)
],
else_=0,
)
).label("null_count"),
]
).select_from(self._table)
count_results = dict(self.engine.execute(count_query).fetchone())
element_count = int(count_results.get("element_count") or 0)
null_count = int(count_results.get("null_count") or 0)
return element_count - null_count
def get_column_sum(self, column):
return self.engine.execute(
sa.select([sa.func.sum(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_max(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.max(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_min(self, column, parse_strings_as_datetimes=False):
if parse_strings_as_datetimes:
raise NotImplementedError
return self.engine.execute(
sa.select([sa.func.min(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_value_counts(self, column, sort="value", collate=None):
if sort not in ["value", "count", "none"]:
raise ValueError("sort must be either 'value', 'count', or 'none'")
query = (
sa.select(
[
sa.column(column).label("value"),
sa.func.count(sa.column(column)).label("count"),
]
)
.where(sa.column(column) != None)
.group_by(sa.column(column))
)
if sort == "value":
# NOTE: depending on the way the underlying database collates columns,
# ordering can vary. postgresql collate "C" matches default sort
# for python and most other systems, but is not universally supported,
# so we use the default sort for the system, unless specifically overridden
if collate is not None:
query = query.order_by(sa.column(column).collate(collate))
else:
query = query.order_by(sa.column(column))
elif sort == "count":
query = query.order_by(sa.column("count").desc())
results = self.engine.execute(query.select_from(self._table)).fetchall()
series = pd.Series(
[row[1] for row in results],
index=pd.Index(data=[row[0] for row in results], name="value"),
name="count",
)
return series
def get_column_mean(self, column):
return self.engine.execute(
sa.select([sa.func.avg(sa.column(column))]).select_from(self._table)
).scalar()
def get_column_unique_count(self, column):
return self.engine.execute(
sa.select([sa.func.count(sa.func.distinct(sa.column(column)))]).select_from(
self._table
)
).scalar()
def get_column_median(self, column):
nonnull_count = self.get_column_nonnull_count(column)
element_values = self.engine.execute(
sa.select([sa.column(column)])
.order_by(sa.column(column))
.where(sa.column(column) != None)
.offset(max(nonnull_count // 2 - 1, 0))
.limit(2)
.select_from(self._table)
)
column_values = list(element_values.fetchall())
if len(column_values) == 0:
column_median = None
elif nonnull_count % 2 == 0:
# An even number of column values: take the average of the two center values
column_median = (
float(
column_values[0][0]
+ column_values[1][0] # left center value # right center value
)
/ 2.0
) # Average center values
else:
# An odd number of column values, we can just take the center value
column_median = column_values[1][0] # True center value
return column_median
def get_column_quantiles(
self, column: str, quantiles: Iterable, allow_relative_error: bool = False
) -> list:
if self.sql_engine_dialect.name.lower() == "mssql":
return self._get_column_quantiles_mssql(column=column, quantiles=quantiles)
elif self.sql_engine_dialect.name.lower() == "bigquery":
return self._get_column_quantiles_bigquery(
column=column, quantiles=quantiles
)
elif self.sql_engine_dialect.name.lower() == "mysql":
return self._get_column_quantiles_mysql(column=column, quantiles=quantiles)
else:
return self._get_column_quantiles_generic_sqlalchemy(
column=column,
quantiles=quantiles,
allow_relative_error=allow_relative_error,
)
def _get_column_quantiles_mssql(self, column: str, quantiles: Iterable) -> list:
# mssql requires over(), so we add an empty over() clause
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile)
.within_group(sa.column(column).asc())
.over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
def _get_column_quantiles_bigquery(self, column: str, quantiles: Iterable) -> list:
# BigQuery does not support "WITHIN", so we need a special case for it
selects: List[WithinGroup] = [
sa.func.percentile_disc(sa.column(column), quantile).over()
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
def _get_column_quantiles_mysql(self, column: str, quantiles: Iterable) -> list:
# MySQL does not support "percentile_disc", so we implement it as a compound query.
# Please see https://stackoverflow.com/questions/19770026/calculate-percentile-value-using-mysql for reference.
percent_rank_query: CTE = sa.select(
[
sa.column(column),
sa.cast(
sa.func.percent_rank().over(order_by=sa.column(column).asc()),
sa.dialects.mysql.DECIMAL(18, 15),
).label("p"),
]
).order_by(sa.column("p").asc()).select_from(self._table).cte("t")
selects: List[WithinGroup] = []
for idx, quantile in enumerate(quantiles):
# pymysql cannot handle conversion of numpy float64 to float; convert just in case
if np.issubdtype(type(quantile), np.float_):
quantile = float(quantile)
quantile_column: Label = sa.func.first_value(sa.column(column)).over(
order_by=sa.case(
[
(
percent_rank_query.c.p
<= sa.cast(quantile, sa.dialects.mysql.DECIMAL(18, 15)),
percent_rank_query.c.p,
)
],
else_=None,
).desc()
).label(f"q_{idx}")
selects.append(quantile_column)
quantiles_query: Select = sa.select(selects).distinct().order_by(
percent_rank_query.c.p.desc()
)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
# Support for computing the quantiles column for PostGreSQL and Redshift is included in the same method as that for
# the generic sqlalchemy compatible DBMS engine, because users often use the postgresql driver to connect to Redshift
# The key functional difference is that Redshift does not support the aggregate function
# "percentile_disc", but does support the approximate percentile_disc or percentile_cont function version instead.```
def _get_column_quantiles_generic_sqlalchemy(
self, column: str, quantiles: Iterable, allow_relative_error: bool
) -> list:
selects: List[WithinGroup] = [
sa.func.percentile_disc(quantile).within_group(sa.column(column).asc())
for quantile in quantiles
]
quantiles_query: Select = sa.select(selects).select_from(self._table)
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query
).fetchone()
return list(quantiles_results)
except ProgrammingError:
# ProgrammingError: (psycopg2.errors.SyntaxError) Aggregate function "percentile_disc" is not supported;
# use approximate percentile_disc or percentile_cont instead.
if self.attempt_allowing_relative_error():
# Redshift does not have a percentile_disc method, but does support an approximate version.
sql_approx: str = get_approximate_percentile_disc_sql(
selects=selects, sql_engine_dialect=self.sql_engine_dialect
)
selects_approx: List[TextClause] = [sa.text(sql_approx)]
quantiles_query_approx: Select = sa.select(selects_approx).select_from(
self._table
)
if allow_relative_error:
try:
quantiles_results: RowProxy = self.engine.execute(
quantiles_query_approx
).fetchone()
return list(quantiles_results)
except ProgrammingError as pe:
exception_message: str = "An SQL syntax Exception occurred."
exception_traceback: str = traceback.format_exc()
exception_message += f'{type(pe).__name__}: "{str(pe)}". Traceback: "{exception_traceback}".'
logger.error(exception_message)
raise pe
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles '
"without approximation error; set allow_relative_error to True to allow approximate quantiles."
)
else:
raise ValueError(
f'The SQL engine dialect "{str(self.sql_engine_dialect)}" does not support computing quantiles with '
"approximation error; set allow_relative_error to False to disable approximate quantiles."
)
def get_column_stdev(self, column):
if self.sql_engine_dialect.name.lower() == "mssql":
# Note: "stdev_samp" is not a recognized built-in function name (but "stdev" does exist for "mssql").
# This function is used to compute statistical standard deviation from sample data (per the reference in
# https://sqlserverrider.wordpress.com/2013/03/06/standard-deviation-functions-stdev-and-stdevp-sql-server).
res = self.engine.execute(
sa.select([sa.func.stdev(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
else:
res = self.engine.execute(
sa.select([sa.func.stddev_samp(sa.column(column))])
.select_from(self._table)
.where(sa.column(column) is not None)
).fetchone()
return float(res[0])
def get_column_hist(self, column, bins):
"""return a list of counts corresponding to bins
Args:
column: the name of the column for which to get the histogram
bins: tuple of bin edges for which to get histogram values; *must* be tuple to support caching
"""
case_conditions = []
idx = 0
bins = list(bins)
# If we have an infinte lower bound, don't express that in sql
if (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
bins[0]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
case_conditions.append(
sa.func.sum(
sa.case([(sa.column(column) < bins[idx + 1], 1)], else_=0)
).label("bin_" + str(idx))
)
idx += 1
for idx in range(idx, len(bins) - 2):
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[idx] <= sa.column(column),
sa.column(column) < bins[idx + 1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(idx))
)
if (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
bins[-1]
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
case_conditions.append(
sa.func.sum(
sa.case([(bins[-2] <= sa.column(column), 1)], else_=0)
).label("bin_" + str(len(bins) - 1))
)
else:
case_conditions.append(
sa.func.sum(
sa.case(
[
(
sa.and_(
bins[-2] <= sa.column(column),
sa.column(column) <= bins[-1],
),
1,
)
],
else_=0,
)
).label("bin_" + str(len(bins) - 1))
)
query = (
sa.select(case_conditions)
.where(sa.column(column) != None,)
.select_from(self._table)
)
# Run the data through convert_to_json_serializable to ensure we do not have Decimal types
hist = convert_to_json_serializable(list(self.engine.execute(query).fetchone()))
return hist
def get_column_count_in_range(
self, column, min_val=None, max_val=None, strict_min=False, strict_max=True
):
if min_val is None and max_val is None:
raise ValueError("Must specify either min or max value")
if min_val is not None and max_val is not None and min_val > max_val:
raise ValueError("Min value must be <= to max value")
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
min_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
min_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=True
)
if (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
max_val
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
max_val = get_sql_dialect_floating_point_infinity_value(
schema=self.sql_engine_dialect.name.lower(), negative=False
)
min_condition = None
max_condition = None
if min_val is not None:
if strict_min:
min_condition = sa.column(column) > min_val
else:
min_condition = sa.column(column) >= min_val
if max_val is not None:
if strict_max:
max_condition = sa.column(column) < max_val
else:
max_condition = sa.column(column) <= max_val
if min_condition is not None and max_condition is not None:
condition = sa.and_(min_condition, max_condition)
elif min_condition is not None:
condition = min_condition
else:
condition = max_condition
query = (
sa.select([sa.func.count((sa.column(column)))])
.where(sa.and_(sa.column(column) != None, condition))
.select_from(self._table)
)
return self.engine.execute(query).scalar()
def create_temporary_table(self, table_name, custom_sql, schema_name=None):
"""
Create Temporary table based on sql query. This will be used as a basis for executing expectations.
WARNING: this feature is new in v0.4.
It hasn't been tested in all SQL dialects, and may change based on community feedback.
:param custom_sql:
"""
###
# NOTE: 20200310 - The update to support snowflake transient table creation revealed several
# import cases that are not fully handled.
# The snowflake-related change updated behavior to allow both custom_sql and schema to be specified. But
# the underlying incomplete handling of schema remains.
#
# Several cases we need to consider:
#
# 1. Distributed backends (e.g. Snowflake and BigQuery) often use a `<database>.<schema>.<table>`
# syntax, but currently we are biased towards only allowing schema.table
#
# 2. In the wild, we see people using several ways to declare the schema they want to use:
# a. In the connection string, the original RFC only specifies database, but schema is supported by some
# backends (Snowflake) as a query parameter.
# b. As a default for a user (the equivalent of USE SCHEMA being provided at the beginning of a session)
# c. As part of individual queries.
#
# 3. We currently don't make it possible to select from a table in one query, but create a temporary table in
# another schema, except for with BigQuery and (now) snowflake, where you can specify the table name (and
# potentially triple of database, schema, table) in the batch_kwargs.
#
# The SqlAlchemyDataset interface essentially predates the batch_kwargs concept and so part of what's going
# on, I think, is a mismatch between those. I think we should rename custom_sql -> "temp_table_query" or
# similar, for example.
###
if self.sql_engine_dialect.name.lower() == "bigquery":
stmt = "CREATE OR REPLACE TABLE `{table_name}` AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name.lower() == "snowflake":
logger.info("Creating transient table %s" % table_name)
if schema_name is not None:
table_name = schema_name + "." + table_name
stmt = "CREATE OR REPLACE TRANSIENT TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mysql":
# Note: We can keep the "MySQL" clause separate for clarity, even though it is the same as the generic case.
stmt = "CREATE TEMPORARY TABLE {table_name} AS {custom_sql}".format(
table_name=table_name, custom_sql=custom_sql
)
elif self.sql_engine_dialect.name == "mssql":
# Insert "into #{table_name}" in the custom sql query right before the "from" clause
# Split is case sensitive so detect case.
# Note: transforming custom_sql to uppercase/lowercase has uninteded consequences (i.e., changing column names), so this is not an option!
if "from" in custom_sql:
strsep = "from"
else:
strsep = "FROM"
custom_sqlmod = custom_sql.split(strsep, maxsplit=1)
stmt = (
custom_sqlmod[0] + "into {table_name} from" + custom_sqlmod[1]
).format(table_name=table_name)
else:
stmt = 'CREATE TEMPORARY TABLE "{table_name}" AS {custom_sql}'.format(
table_name=table_name, custom_sql=custom_sql
)
self.engine.execute(stmt)
def column_reflection_fallback(self):
"""If we can't reflect the table, use a query to at least get column names."""
col_info_dict_list: List[Dict]
if self.sql_engine_dialect.name.lower() == "mssql":
type_module = self._get_dialect_type_module()
# Get column names and types from the database
# StackOverflow to the rescue: https://stackoverflow.com/a/38634368
col_info_query: TextClause = sa.text(
f"""
SELECT
cols.NAME, ty.NAME
FROM
tempdb.sys.columns AS cols
JOIN
sys.types AS ty
ON
cols.user_type_id = ty.user_type_id
WHERE
object_id = OBJECT_ID('tempdb..{self._table}')
"""
)
col_info_tuples_list = self.engine.execute(col_info_query).fetchall()
col_info_dict_list = [
{"name": col_name, "type": getattr(type_module, col_type.upper())()}
for col_name, col_type in col_info_tuples_list
]
else:
query: Select = sa.select([sa.text("*")]).select_from(self._table).limit(1)
col_names: list = self.engine.execute(query).keys()
col_info_dict_list = [{"name": col_name} for col_name in col_names]
return col_info_dict_list
###
###
###
#
# Table Expectation Implementations
#
###
###
###
# noinspection PyUnusedLocal
@DocInherit
@MetaSqlAlchemyDataset.expectation(["other_table_name"])
def expect_table_row_count_to_equal_other_table(
self,
other_table_name,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
"""Expect the number of rows in this table to equal the number of rows in a different table.
expect_table_row_count_to_equal is a :func:`expectation \
<great_expectations.data_asset.data_asset.DataAsset.expectation>`, not a
``column_map_expectation`` or ``column_aggregate_expectation``.
Args:
other_table_name (str): \
The name of the other table to which to compare.
Other Parameters:
result_format (string or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
expect_table_row_count_to_be_between
"""
row_count = self.get_row_count()
other_table_row_count = self.get_row_count(table_name=other_table_name)
return {
"success": row_count == other_table_row_count,
"result": {
"observed_value": {"self": row_count, "other": other_table_row_count,}
},
}
###
###
###
#
# Column Map Expectation Implementations
#
###
###
###
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) == None
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_null(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.column(column) != None
def _get_dialect_type_module(self):
if self.dialect is None:
logger.warning(
"No sqlalchemy dialect found; relying in top-level sqlalchemy types."
)
return sa
try:
# Redshift does not (yet) export types to top level; only recognize base SA types
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
return self.dialect.sa
except (TypeError, AttributeError):
pass
# Bigquery works with newer versions, but use a patch if we had to define bigquery_types_tuple
try:
if (
isinstance(
self.sql_engine_dialect,
pybigquery.sqlalchemy_bigquery.BigQueryDialect,
)
and bigquery_types_tuple is not None
):
return bigquery_types_tuple
except (TypeError, AttributeError):
pass
return self.dialect
@DocInherit
@DataAsset.expectation(["column", "type_", "mostly"])
def expect_column_values_to_be_of_type(
self,
column,
type_,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
try:
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type if that's possible.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_ is None:
# vacuously true
success = True
else:
type_module = self._get_dialect_type_module()
success = issubclass(col_type, getattr(type_module, type_))
return {"success": success, "result": {"observed_value": col_type.__name__}}
except AttributeError:
raise ValueError("Type not recognized by current driver: %s" % type_)
@DocInherit
@DataAsset.expectation(["column", "type_", "mostly"])
def expect_column_values_to_be_in_type_list(
self,
column,
type_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if mostly is not None:
raise ValueError(
"SqlAlchemyDataset does not support column map semantics for column types"
)
try:
col_data = [col for col in self.columns if col["name"] == column][0]
col_type = type(col_data["type"])
except IndexError:
raise ValueError("Unrecognized column: %s" % column)
except KeyError:
raise ValueError("No database type data available for column: %s" % column)
# Our goal is to be as explicit as possible. We will match the dialect
# if that is possible. If there is no dialect available, we *will*
# match against a top-level SqlAlchemy type.
#
# This is intended to be a conservative approach.
#
# In particular, we *exclude* types that would be valid under an ORM
# such as "float" for postgresql with this approach
if type_list is None:
success = True
else:
types = []
type_module = self._get_dialect_type_module()
for type_ in type_list:
try:
type_class = getattr(type_module, type_)
types.append(type_class)
except AttributeError:
logger.debug("Unrecognized type: %s" % type_)
if len(types) == 0:
logger.warning(
"No recognized sqlalchemy types in type_list for current dialect."
)
types = tuple(types)
success = issubclass(col_type, types)
return {"success": success, "result": {"observed_value": col_type.__name__}}
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if value_set is None:
# vacuously true
return True
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).in_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_be_in_set(
self,
column,
value_set,
mostly=None,
parse_strings_as_datetimes=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
parsed_value_set = self._parse_value_set(value_set)
else:
parsed_value_set = value_set
return sa.column(column).notin_(tuple(parsed_value_set))
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_between(
self,
column,
min_value=None,
max_value=None,
strict_min=False,
strict_max=False,
allow_cross_type_comparisons=None,
parse_strings_as_datetimes=None,
output_strftime_format=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if parse_strings_as_datetimes:
if min_value:
min_value = parse(min_value)
if max_value:
max_value = parse(max_value)
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
return sa.column(column) < max_value
else:
return sa.column(column) <= max_value
elif max_value is None:
if strict_min:
return min_value < sa.column(column)
else:
return min_value <= sa.column(column)
else:
if strict_min and strict_max:
return sa.and_(
min_value < sa.column(column), sa.column(column) < max_value
)
elif strict_min:
return sa.and_(
min_value < sa.column(column), sa.column(column) <= max_value
)
elif strict_max:
return sa.and_(
min_value <= sa.column(column), sa.column(column) < max_value
)
else:
return sa.and_(
min_value <= sa.column(column), sa.column(column) <= max_value
)
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_equal(
self,
column,
value,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
return sa.func.length(sa.column(column)) == value
@DocInherit
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_value_lengths_to_be_between(
self,
column,
min_value=None,
max_value=None,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
# Assert that min_value and max_value are integers
try:
if min_value is not None and not float(min_value).is_integer():
raise ValueError("min_value and max_value must be integers")
if max_value is not None and not float(max_value).is_integer():
raise ValueError("min_value and max_value must be integers")
except ValueError:
raise ValueError("min_value and max_value must be integers")
if min_value is not None and max_value is not None:
return sa.and_(
sa.func.length(sa.column(column)) >= min_value,
sa.func.length(sa.column(column)) <= max_value,
)
elif min_value is None and max_value is not None:
return sa.func.length(sa.column(column)) <= max_value
elif min_value is not None and max_value is None:
return sa.func.length(sa.column(column)) >= min_value
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_be_unique(
self,
column,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
# Duplicates are found by filtering a group by query
dup_query = (
sa.select([sa.column(column)])
.select_from(self._table)
.group_by(sa.column(column))
.having(sa.func.count(sa.column(column)) > 1)
)
return sa.column(column).notin_(dup_query)
def _get_dialect_regex_expression(self, column, regex, positive=True):
try:
# postgres
if isinstance(self.sql_engine_dialect, sa.dialects.postgresql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except AttributeError:
pass
try:
# redshift
if isinstance(
self.sql_engine_dialect, sqlalchemy_redshift.dialect.RedshiftDialect
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("~")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("!~")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# MySQL
if isinstance(self.sql_engine_dialect, sa.dialects.mysql.dialect):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("REGEXP")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT REGEXP")
)
except AttributeError:
pass
try:
# Snowflake
if isinstance(
self.sql_engine_dialect,
snowflake.sqlalchemy.snowdialect.SnowflakeDialect,
):
if positive:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("RLIKE")
)
else:
return BinaryExpression(
sa.column(column), literal(regex), custom_op("NOT RLIKE")
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
if positive:
return sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
else:
return sa.not_(
sa.func.REGEXP_CONTAINS(sa.column(column), literal(regex))
)
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(column, regex)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex(
self,
column,
regex,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
regex_expression = self._get_dialect_regex_expression(
column, regex, positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return regex_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_regex_list(
self,
column,
regex_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(column, regex_list[0])
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_regex_expression(column, regex)
for regex in regex_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_regex_list(
self,
column,
regex_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(regex_list) == 0:
raise ValueError("At least one regex must be supplied in the regex_list.")
regex_expression = self._get_dialect_regex_expression(
column, regex_list[0], positive=False
)
if regex_expression is None:
logger.warning(
"Regex is not supported for dialect %s" % str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_regex_expression(column, regex, positive=False)
for regex in regex_list
]
)
def _get_dialect_like_pattern_expression(self, column, like_pattern, positive=True):
dialect_supported: bool = False
try:
# Bigquery
if isinstance(
self.sql_engine_dialect, pybigquery.sqlalchemy_bigquery.BigQueryDialect
):
dialect_supported = True
except (
AttributeError,
TypeError,
): # TypeError can occur if the driver was not installed and so is None
pass
if isinstance(
self.sql_engine_dialect,
(
sa.dialects.sqlite.dialect,
sa.dialects.postgresql.dialect,
sqlalchemy_redshift.dialect.RedshiftDialect,
sa.dialects.mysql.dialect,
sa.dialects.mssql.dialect,
),
):
dialect_supported = True
if dialect_supported:
try:
if positive:
return sa.column(column).like(literal(like_pattern))
else:
return sa.not_(sa.column(column).like(literal(like_pattern)))
except AttributeError:
pass
return None
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern(
self,
column,
like_pattern,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return like_pattern_expression
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_match_like_pattern_list(
self,
column,
like_pattern_list,
match_on="any",
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if match_on not in ["any", "all"]:
raise ValueError("match_on must be any or all")
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0]
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
if match_on == "any":
condition = sa.or_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
else:
condition = sa.and_(
*[
self._get_dialect_like_pattern_expression(column, like_pattern)
for like_pattern in like_pattern_list
]
)
return condition
@MetaSqlAlchemyDataset.column_map_expectation
def expect_column_values_to_not_match_like_pattern_list(
self,
column,
like_pattern_list,
mostly=None,
result_format=None,
include_config=True,
catch_exceptions=None,
meta=None,
):
if len(like_pattern_list) == 0:
raise ValueError(
"At least one like_pattern must be supplied in the like_pattern_list."
)
like_pattern_expression = self._get_dialect_like_pattern_expression(
column, like_pattern_list[0], positive=False
)
if like_pattern_expression is None:
logger.warning(
"Like patterns are not supported for dialect %s"
% str(self.sql_engine_dialect)
)
raise NotImplementedError
return sa.and_(
*[
self._get_dialect_like_pattern_expression(
column, like_pattern, positive=False
)
for like_pattern in like_pattern_list
]
)
| 37.654437
| 159
| 0.574767
|
acfddc67b8ea5550cfcef6875682da2f0ef63d49
| 960
|
py
|
Python
|
models/braintumor/brainapp.py
|
kushal-h/Medical-AI
|
de100f6ce3b783086d57dfb0ceab3fa28544df53
|
[
"MIT"
] | 11
|
2020-11-08T11:06:16.000Z
|
2022-03-14T18:09:55.000Z
|
models/braintumor/brainapp.py
|
kushal-h/Medical-AI
|
de100f6ce3b783086d57dfb0ceab3fa28544df53
|
[
"MIT"
] | null | null | null |
models/braintumor/brainapp.py
|
kushal-h/Medical-AI
|
de100f6ce3b783086d57dfb0ceab3fa28544df53
|
[
"MIT"
] | 14
|
2020-10-26T18:10:16.000Z
|
2021-08-05T17:06:22.000Z
|
import os
from flask import Flask, request, render_template,url_for,Blueprint
from flask_cors import CORS, cross_origin
import shutil
import models.braintumor.src.predict as predict
import base64
import numpy as np
from io import BytesIO
#brainapp = Flask(__name__)
brainapp=Blueprint("brainapp",__name__,template_folder="templates",static_folder="static")
#CORS(brainapp)
upload_folder="./models/braintumor/static"
@brainapp.route("/", methods=["GET","POST"])
def index():
if request.method=="POST":
image_file=request.files["file"]
if image_file:
npimg = np.fromstring(image_file.read(),np.uint8)
classifier=predict.predict_img(npimg)
uri=classifier.predict_image()
return render_template('/btindex.html',image_loc=uri)
return render_template('/btindex.html',image_loc=None)
# if __name__ == '__main__':
# brainapp.run(debug=True,port=8000)
| 29.090909
| 90
| 0.696875
|
acfddc98c31b35fbb20c80c7222db175b8611747
| 628
|
py
|
Python
|
advancedbot/components/__init__.py
|
sdallaboratory/advanced-telegram-bot
|
7bf107b448cdd0e5d7f1cf85726b06c677ed922d
|
[
"MIT"
] | 3
|
2020-08-28T12:35:55.000Z
|
2020-10-29T12:26:49.000Z
|
advancedbot/components/__init__.py
|
sdallaboratory/advanced-telegram-bot
|
7bf107b448cdd0e5d7f1cf85726b06c677ed922d
|
[
"MIT"
] | null | null | null |
advancedbot/components/__init__.py
|
sdallaboratory/advanced-telegram-bot
|
7bf107b448cdd0e5d7f1cf85726b06c677ed922d
|
[
"MIT"
] | 2
|
2021-11-13T15:03:35.000Z
|
2022-01-10T13:54:53.000Z
|
from .role_managing.roleauth import RoleAuth
from .state_managing.statemanager import StateManager
from .user_meta.usermetastorage import UserMetaStorage
from .locales.localemanager import LocaleManager
from .logs.botlogger import BotLogger
from .storage_managing.storage import Storage
from .storage_managing.localjsonstorage import LocalJSONStorage
from .storage_managing.mongodbstorage import MongoDBStorage
from .routing.router import Router
from .routing.routes import *
from .models import User, DocumentLink
from .messaging.messagesender import MessageSender
from .exceptions.telegramboterror import TelegramBotError
| 34.888889
| 63
| 0.866242
|
acfddcd92b769330ea84762b5768210f93ee19d3
| 13,378
|
py
|
Python
|
docs/conf.py
|
kmatt/toyplot
|
d6784ab176c93aebf9b12831ced8f435bdcfeab1
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
kmatt/toyplot
|
d6784ab176c93aebf9b12831ced8f435bdcfeab1
|
[
"BSD-3-Clause"
] | null | null | null |
docs/conf.py
|
kmatt/toyplot
|
d6784ab176c93aebf9b12831ced8f435bdcfeab1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
# -*- coding: utf-8 -*-
#
# toyplot documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 18 18:22:53 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Provide stubs for external dependencies, so we can generate our reference
# documentation without having to install them.
class module_proxy(object):
__all__ = []
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return module_proxy()
@classmethod
def __getattr__(cls, name):
if name in ("__file__", "__path__"):
return "/dev/null"
elif name[0] == name[0].upper():
proxy_type = type(name, (), {})
proxy_type.__module__ = __name__
return proxy_type
else:
return module_proxy()
for module_name in [
"numpy",
"numpy.linalg",
"numpy.ma",
"numpy.testing",
]:
sys.modules[module_name] = module_proxy()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath(".."))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
"sphinxcontrib.napoleon",
]
napoleon_use_param = False
# Complain about all cross reference targets that can't be found.
nitpicky = True
nitpick_ignore = [
("py:class", "QApplication"),
]
intersphinx_mapping = {
"arrow": ("http://arrow.readthedocs.io/en/latest", "arrow.inv"),
"numpy": ("http://docs.scipy.org/doc/numpy-1.13.0", "numpy.inv"),
"pandas": ("http://pandas-docs.github.io/pandas-docs-travis", "pandas.inv"),
"python": ("http://docs.python.org/3.6", "python.inv"),
"PIL": ("http://pillow.readthedocs.io/en/3.2.x", "pillow.inv"),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Toyplot'
copyright = u"""2014, Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
rights in this software"""
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import toyplot
version = toyplot.__version__
# The full version, including alpha/beta/rc tags.
release = toyplot.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.io, this line of code grabbed
# from docs.readthedocs.io
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
else:
html_context = {
'css_files': [
'https://media.readthedocs.io/css/sphinx_rtd_theme.css',
'https://media.readthedocs.io/css/readthedocs-doc-embed.css',
'_static/toyplot.css',
],
}
# otherwise, readthedocs.io uses their theme by default, so no need to
# specify it
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['css']
html_style = "toyplot.css"
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'toyplotdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'toyplot.tex', u'Toyplot Documentation',
u'Sandia National Laboratories', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "../artwork/toyplot.png"
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'toyplot', u'Toyplot Documentation',
[u'Sandia National Laboratories'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index',
'toyplot',
u'Toyplot Documentation',
u'Sandia National Laboratories',
'toyplot',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'toyplot'
epub_author = u'Sandia National Laboratories'
epub_publisher = u'Sandia National Laboratories'
epub_copyright = u'Copyright 2014 Sandia Corporation. Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain rights in this software.'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'toyplot'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# set up the types of member to check that are documented
def warn_undocumented_members(app, what, name, obj, options, lines):
if what not in [] and len(lines) == 0:
print("WARNING: %s is undocumented: %s" % (what, name))
lines.append(".. Warning:: %s '%s' undocumented" % (what, name))
def setup(app):
app.connect('autodoc-process-docstring', warn_undocumented_members);
| 31.551887
| 186
| 0.70459
|
acfddd02a20f5a928aa653dc83a6751041dbf037
| 25,375
|
py
|
Python
|
intersight/model/workflow_error_response_handler.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 5
|
2021-12-16T15:13:32.000Z
|
2022-03-29T16:09:54.000Z
|
intersight/model/workflow_error_response_handler.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 4
|
2022-01-25T19:05:51.000Z
|
2022-03-29T20:18:37.000Z
|
intersight/model/workflow_error_response_handler.py
|
CiscoDevNet/intersight-python
|
04b721f37c3044646a91c185c7259edfb991557a
|
[
"Apache-2.0"
] | 2
|
2020-07-07T15:01:08.000Z
|
2022-01-31T04:27:35.000Z
|
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from intersight.model.content_complex_type import ContentComplexType
from intersight.model.content_parameter import ContentParameter
from intersight.model.display_names import DisplayNames
from intersight.model.mo_base_mo import MoBaseMo
from intersight.model.mo_base_mo_relationship import MoBaseMoRelationship
from intersight.model.mo_tag import MoTag
from intersight.model.mo_version_context import MoVersionContext
from intersight.model.workflow_catalog_relationship import WorkflowCatalogRelationship
from intersight.model.workflow_error_response_handler_all_of import WorkflowErrorResponseHandlerAllOf
globals()['ContentComplexType'] = ContentComplexType
globals()['ContentParameter'] = ContentParameter
globals()['DisplayNames'] = DisplayNames
globals()['MoBaseMo'] = MoBaseMo
globals()['MoBaseMoRelationship'] = MoBaseMoRelationship
globals()['MoTag'] = MoTag
globals()['MoVersionContext'] = MoVersionContext
globals()['WorkflowCatalogRelationship'] = WorkflowCatalogRelationship
globals()['WorkflowErrorResponseHandlerAllOf'] = WorkflowErrorResponseHandlerAllOf
class WorkflowErrorResponseHandler(ModelComposed):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
('class_id',): {
'WORKFLOW.ERRORRESPONSEHANDLER': "workflow.ErrorResponseHandler",
},
('object_type',): {
'WORKFLOW.ERRORRESPONSEHANDLER': "workflow.ErrorResponseHandler",
},
('platform_type',): {
'EMPTY': "",
'APIC': "APIC",
'DCNM': "DCNM",
'UCSFI': "UCSFI",
'UCSFIISM': "UCSFIISM",
'IMC': "IMC",
'IMCM4': "IMCM4",
'IMCM5': "IMCM5",
'IMCRACK': "IMCRack",
'UCSIOM': "UCSIOM",
'HX': "HX",
'HYPERFLEXAP': "HyperFlexAP",
'IWE': "IWE",
'UCSD': "UCSD",
'INTERSIGHTAPPLIANCE': "IntersightAppliance",
'INTERSIGHTASSIST': "IntersightAssist",
'PURESTORAGEFLASHARRAY': "PureStorageFlashArray",
'NEXUSDEVICE': "NexusDevice",
'UCSC890': "UCSC890",
'NETAPPONTAP': "NetAppOntap",
'NETAPPACTIVEIQUNIFIEDMANAGER': "NetAppActiveIqUnifiedManager",
'EMCSCALEIO': "EmcScaleIo",
'EMCVMAX': "EmcVmax",
'EMCVPLEX': "EmcVplex",
'EMCXTREMIO': "EmcXtremIo",
'VMWAREVCENTER': "VmwareVcenter",
'MICROSOFTHYPERV': "MicrosoftHyperV",
'APPDYNAMICS': "AppDynamics",
'DYNATRACE': "Dynatrace",
'NEWRELIC': "NewRelic",
'SERVICENOW': "ServiceNow",
'READHATOPENSTACK': "ReadHatOpenStack",
'CLOUDFOUNDRY': "CloudFoundry",
'MICROSOFTAZUREAPPLICATIONINSIGHTS': "MicrosoftAzureApplicationInsights",
'OPENSTACK': "OpenStack",
'MICROSOFTSQLSERVER': "MicrosoftSqlServer",
'KUBERNETES': "Kubernetes",
'AMAZONWEBSERVICE': "AmazonWebService",
'AMAZONWEBSERVICEBILLING': "AmazonWebServiceBilling",
'MICROSOFTAZURESERVICEPRINCIPAL': "MicrosoftAzureServicePrincipal",
'MICROSOFTAZUREENTERPRISEAGREEMENT': "MicrosoftAzureEnterpriseAgreement",
'DELLCOMPELLENT': "DellCompellent",
'HPE3PAR': "HPE3Par",
'REDHATENTERPRISEVIRTUALIZATION': "RedHatEnterpriseVirtualization",
'NUTANIXACROPOLIS': "NutanixAcropolis",
'HPEONEVIEW': "HPEOneView",
'SERVICEENGINE': "ServiceEngine",
'HITACHIVIRTUALSTORAGEPLATFORM': "HitachiVirtualStoragePlatform",
'IMCBLADE': "IMCBlade",
'TERRAFORMCLOUD': "TerraformCloud",
'TERRAFORMAGENT': "TerraformAgent",
'CUSTOMTARGET': "CustomTarget",
'ANSIBLEENDPOINT': "AnsibleEndpoint",
'HTTPENDPOINT': "HTTPEndpoint",
'SSHENDPOINT': "SSHEndpoint",
'CISCOCATALYST': "CiscoCatalyst",
'POWERSHELLENDPOINT': "PowerShellEndpoint",
},
}
validations = {
('name',): {
'regex': {
'pattern': r'^[a-zA-Z0-9_.:-]{1,64}$', # noqa: E501
},
},
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'class_id': (str,), # noqa: E501
'object_type': (str,), # noqa: E501
'description': (str,), # noqa: E501
'name': (str,), # noqa: E501
'parameters': ([ContentParameter], none_type,), # noqa: E501
'platform_type': (str,), # noqa: E501
'types': ([ContentComplexType], none_type,), # noqa: E501
'catalog': (WorkflowCatalogRelationship,), # noqa: E501
'account_moid': (str,), # noqa: E501
'create_time': (datetime,), # noqa: E501
'domain_group_moid': (str,), # noqa: E501
'mod_time': (datetime,), # noqa: E501
'moid': (str,), # noqa: E501
'owners': ([str], none_type,), # noqa: E501
'shared_scope': (str,), # noqa: E501
'tags': ([MoTag], none_type,), # noqa: E501
'version_context': (MoVersionContext,), # noqa: E501
'ancestors': ([MoBaseMoRelationship], none_type,), # noqa: E501
'parent': (MoBaseMoRelationship,), # noqa: E501
'permission_resources': ([MoBaseMoRelationship], none_type,), # noqa: E501
'display_names': (DisplayNames,), # noqa: E501
}
@cached_property
def discriminator():
val = {
}
if not val:
return None
return {'class_id': val}
attribute_map = {
'class_id': 'ClassId', # noqa: E501
'object_type': 'ObjectType', # noqa: E501
'description': 'Description', # noqa: E501
'name': 'Name', # noqa: E501
'parameters': 'Parameters', # noqa: E501
'platform_type': 'PlatformType', # noqa: E501
'types': 'Types', # noqa: E501
'catalog': 'Catalog', # noqa: E501
'account_moid': 'AccountMoid', # noqa: E501
'create_time': 'CreateTime', # noqa: E501
'domain_group_moid': 'DomainGroupMoid', # noqa: E501
'mod_time': 'ModTime', # noqa: E501
'moid': 'Moid', # noqa: E501
'owners': 'Owners', # noqa: E501
'shared_scope': 'SharedScope', # noqa: E501
'tags': 'Tags', # noqa: E501
'version_context': 'VersionContext', # noqa: E501
'ancestors': 'Ancestors', # noqa: E501
'parent': 'Parent', # noqa: E501
'permission_resources': 'PermissionResources', # noqa: E501
'display_names': 'DisplayNames', # noqa: E501
}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
'_composed_instances',
'_var_name_to_model_instances',
'_additional_properties_model_instances',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""WorkflowErrorResponseHandler - a model defined in OpenAPI
Args:
Keyword Args:
class_id (str): The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.. defaults to "workflow.ErrorResponseHandler", must be one of ["workflow.ErrorResponseHandler", ] # noqa: E501
object_type (str): The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.. defaults to "workflow.ErrorResponseHandler", must be one of ["workflow.ErrorResponseHandler", ] # noqa: E501
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
description (str): A detailed description about the error response handler.. [optional] # noqa: E501
name (str): Name for the error response handler.. [optional] # noqa: E501
parameters ([ContentParameter], none_type): [optional] # noqa: E501
platform_type (str): The platform type for which the error response handler is defined. * `` - The device reported an empty or unrecognized platform type. * `APIC` - An Application Policy Infrastructure Controller cluster. * `DCNM` - A Data Center Network Manager instance. Data Center Network Manager (DCNM) is the network management platform for all NX-OS-enabled deployments, spanning new fabric architectures, IP Fabric for Media, and storage networking deployments for the Cisco Nexus-powered data center. * `UCSFI` - A UCS Fabric Interconnect in HA or standalone mode, which is being managed by UCS Manager (UCSM). * `UCSFIISM` - A UCS Fabric Interconnect in HA or standalone mode, managed directly by Intersight. * `IMC` - A standalone UCS Server Integrated Management Controller. * `IMCM4` - A standalone UCS M4 Server. * `IMCM5` - A standalone UCS M5 server. * `IMCRack` - A standalone UCS M6 and above server. * `UCSIOM` - An UCS Chassis IO module. * `HX` - A HyperFlex storage controller. * `HyperFlexAP` - A HyperFlex Application Platform. * `IWE` - An Intersight Workload Engine. * `UCSD` - A UCS Director virtual appliance. Cisco UCS Director automates, orchestrates, and manages Cisco and third-party hardware. * `IntersightAppliance` - A Cisco Intersight Connected Virtual Appliance. * `IntersightAssist` - A Cisco Intersight Assist. * `PureStorageFlashArray` - A Pure Storage FlashArray device. * `NexusDevice` - A generic platform type to support Nexus Network Device. This can also be extended to support all network devices later on. * `UCSC890` - A standalone Cisco UCSC890 server. * `NetAppOntap` - A NetApp ONTAP storage system. * `NetAppActiveIqUnifiedManager` - A NetApp Active IQ Unified Manager. * `EmcScaleIo` - An EMC ScaleIO storage system. * `EmcVmax` - An EMC VMAX storage system. * `EmcVplex` - An EMC VPLEX storage system. * `EmcXtremIo` - An EMC XtremIO storage system. * `VmwareVcenter` - A VMware vCenter device that manages Virtual Machines. * `MicrosoftHyperV` - A Microsoft Hyper-V system that manages Virtual Machines. * `AppDynamics` - An AppDynamics controller that monitors applications. * `Dynatrace` - A software-intelligence monitoring platform that simplifies enterprise cloud complexity and accelerates digital transformation. * `NewRelic` - A software-intelligence monitoring platform that simplifies enterprise cloud complexity and accelerates digital transformation. * `ServiceNow` - A cloud-based workflow automation platform that enables enterprise organizations to improve operational efficiencies by streamlining and automating routine work tasks. * `ReadHatOpenStack` - An OpenStack target manages Virtual Machines, Physical Machines, Datacenters and Virtual Datacenters using different OpenStack services as administrative endpoints. * `CloudFoundry` - An open source cloud platform on which developers can build, deploy, run and scale applications. * `MicrosoftAzureApplicationInsights` - A feature of Azure Monitor, is an extensible Application Performance Management service for developers and DevOps professionals to monitor their live applications. * `OpenStack` - An OpenStack target manages Virtual Machines, Physical Machines, Datacenters and Virtual Datacenters using different OpenStack services as administrative endpoints. * `MicrosoftSqlServer` - A Microsoft SQL database server. * `Kubernetes` - A Kubernetes cluster that runs containerized applications. * `AmazonWebService` - A Amazon web service target that discovers and monitors different services like EC2. It discovers entities like VMs, Volumes, regions etc. and monitors attributes like Mem, CPU, cost. * `AmazonWebServiceBilling` - A Amazon web service billing target to retrieve billing information stored in S3 bucket. * `MicrosoftAzureServicePrincipal` - A Microsoft Azure Service Principal target that discovers all the associated Azure subscriptions. * `MicrosoftAzureEnterpriseAgreement` - A Microsoft Azure Enterprise Agreement target that discovers cost, billing and RIs. * `DellCompellent` - A Dell Compellent storage system. * `HPE3Par` - A HPE 3PAR storage system. * `RedHatEnterpriseVirtualization` - A Red Hat Enterprise Virtualization Hypervisor system that manages Virtual Machines. * `NutanixAcropolis` - A Nutanix Acropolis system that combines servers and storage into a distributed infrastructure platform. * `HPEOneView` - A HPE Oneview management system that manages compute, storage, and networking. * `ServiceEngine` - Cisco Application Services Engine. Cisco Application Services Engine is a platform to deploy and manage applications. * `HitachiVirtualStoragePlatform` - A Hitachi Virtual Storage Platform also referred to as Hitachi VSP. It includes various storage systems designed for data centers. * `IMCBlade` - An Intersight managed UCS Blade Server. * `TerraformCloud` - A Terraform Cloud account. * `TerraformAgent` - A Terraform Cloud Agent that Intersight will deploy in datacenter. The agent will execute Terraform plan for Terraform Cloud workspace configured to use the agent. * `CustomTarget` - An external endpoint added as Target that can be accessed through its HTTP API interface in Intersight Orchestrator automation workflow.Standard HTTP authentication scheme supported: Basic. * `AnsibleEndpoint` - An external endpoint added as Target that can be accessed through Ansible in Intersight Cloud Orchestrator automation workflow. * `HTTPEndpoint` - An external endpoint added as Target that can be accessed through its HTTP API interface in Intersight Orchestrator automation workflow.Standard HTTP authentication scheme supported: Basic, Bearer Token. * `SSHEndpoint` - An external endpoint added as Target that can be accessed through SSH in Intersight Cloud Orchestrator automation workflow. * `CiscoCatalyst` - A Cisco Catalyst networking switch device. * `PowerShellEndpoint` - A Windows machine on which PowerShell scripts can be executed remotely.. [optional] if omitted the server will use the default value of "" # noqa: E501
types ([ContentComplexType], none_type): [optional] # noqa: E501
catalog (WorkflowCatalogRelationship): [optional] # noqa: E501
account_moid (str): The Account ID for this managed object.. [optional] # noqa: E501
create_time (datetime): The time when this managed object was created.. [optional] # noqa: E501
domain_group_moid (str): The DomainGroup ID for this managed object.. [optional] # noqa: E501
mod_time (datetime): The time when this managed object was last modified.. [optional] # noqa: E501
moid (str): The unique identifier of this Managed Object instance.. [optional] # noqa: E501
owners ([str], none_type): [optional] # noqa: E501
shared_scope (str): Intersight provides pre-built workflows, tasks and policies to end users through global catalogs. Objects that are made available through global catalogs are said to have a 'shared' ownership. Shared objects are either made globally available to all end users or restricted to end users based on their license entitlement. Users can use this property to differentiate the scope (global or a specific license tier) to which a shared MO belongs.. [optional] # noqa: E501
tags ([MoTag], none_type): [optional] # noqa: E501
version_context (MoVersionContext): [optional] # noqa: E501
ancestors ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
parent (MoBaseMoRelationship): [optional] # noqa: E501
permission_resources ([MoBaseMoRelationship], none_type): An array of relationships to moBaseMo resources.. [optional] # noqa: E501
display_names (DisplayNames): [optional] # noqa: E501
"""
class_id = kwargs.get('class_id', "workflow.ErrorResponseHandler")
object_type = kwargs.get('object_type', "workflow.ErrorResponseHandler")
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
constant_args = {
'_check_type': _check_type,
'_path_to_item': _path_to_item,
'_spec_property_naming': _spec_property_naming,
'_configuration': _configuration,
'_visited_composed_classes': self._visited_composed_classes,
}
required_args = {
'class_id': class_id,
'object_type': object_type,
}
model_args = {}
model_args.update(required_args)
model_args.update(kwargs)
composed_info = validate_get_composed_info(
constant_args, model_args, self)
self._composed_instances = composed_info[0]
self._var_name_to_model_instances = composed_info[1]
self._additional_properties_model_instances = composed_info[2]
unused_args = composed_info[3]
for var_name, var_value in required_args.items():
setattr(self, var_name, var_value)
for var_name, var_value in kwargs.items():
if var_name in unused_args and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
not self._additional_properties_model_instances:
# discard variable.
continue
setattr(self, var_name, var_value)
@cached_property
def _composed_schemas():
# we need this here to make our import statements work
# we must store _composed_schemas in here so the code is only run
# when we invoke this method. If we kept this at the class
# level we would get an error beause the class level
# code would be run when this module is imported, and these composed
# classes don't exist yet because their module has not finished
# loading
lazy_import()
return {
'anyOf': [
],
'allOf': [
MoBaseMo,
WorkflowErrorResponseHandlerAllOf,
],
'oneOf': [
],
}
| 67.12963
| 6,023
| 0.66668
|
acfdddf906d230121acbf361d085c5d10d244cd9
| 2,766
|
py
|
Python
|
data-preprocessor/BJUT_100/preprocessor.py
|
hkbonychen/3D-Morphable-Model-training
|
fa86d7e62f3dfaf20f312d22fa5013d9328f56f8
|
[
"BSD-3-Clause"
] | 3
|
2021-10-03T19:49:04.000Z
|
2022-02-11T10:48:05.000Z
|
data-preprocessor/BJUT_100/preprocessor.py
|
hkbonychen/3D-Morphable-Model-training
|
fa86d7e62f3dfaf20f312d22fa5013d9328f56f8
|
[
"BSD-3-Clause"
] | null | null | null |
data-preprocessor/BJUT_100/preprocessor.py
|
hkbonychen/3D-Morphable-Model-training
|
fa86d7e62f3dfaf20f312d22fa5013d9328f56f8
|
[
"BSD-3-Clause"
] | 1
|
2021-12-21T01:13:24.000Z
|
2021-12-21T01:13:24.000Z
|
import csv
import sys
import os
import subprocess
import numpy as np
from pathlib import Path
def walklevel(some_dir, level=0):
some_dir = some_dir.rstrip(os.path.sep)
assert os.path.isdir(some_dir)
num_sep = some_dir.count(os.path.sep)
for root, dirs, files in os.walk(some_dir):
yield root, dirs, files
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
sys.path.insert(0, '/home/u/workspace/python-utility')
import hashtable
if __name__ == '__main__':
#example usage:
#python3 preprocessor.py folder_A folder_B lsfm_input_dir
#merge the landmark file from folder_B into folder_A for all the files exist in folder_A
#get folder name, and store them into a list
lsfm_inputDir = sys.argv[3]
directory_list_1 = list()
directory_list_2 = list()
#instanatiate a hash table
ht = hashtable.HashTable(10)
#scan the directories to get the folder path
for r, d, f in walklevel(sys.argv[1]):
for folder in d:
directory_list_1.append(os.path.join(r, folder))
for r, d, f in walklevel(sys.argv[2]):
for folder in d:
directory_list_2.append(os.path.join(r, folder))
obj_name = os.path.basename(os.path.normpath(os.path.join(r, folder)))
ht.set(obj_name, True)
for path in directory_list_1:
obj_name = os.path.basename(os.path.normpath(path))
#target file is the source to be copied
target_obj_file = sys.argv[2] + obj_name + '/output/' + obj_name + '.obj'
target_ply_file = sys.argv[2] + obj_name + '/output/' + obj_name + '.ply'
target_landmark_file_1to68 = sys.argv[2] + obj_name + '/output/' + obj_name + '.obj.landmark'
target_landmark_file_69to100 = sys.argv[1] + obj_name + '/output/' + obj_name + '.obj.landmark'
#final file is the destination of the file being copied
final_obj_file = './' + obj_name + '/' + obj_name + '.obj'
final_landmark_file = './' + obj_name + '/' + obj_name + '.obj.landmark'
try:
if ht.get(obj_name):
os.system('mkdir ' + obj_name)
os.system('cp ' + target_obj_file + ' ' + './' + obj_name)
os.system('cp ' + target_obj_file + ' ' + './' + obj_name)
os.system('cat ' + target_landmark_file_1to68 + ' ' + target_landmark_file_69to100 + ' > ' + final_landmark_file)
except KeyError:
print("folder" + obj_name + " is not found in " + sys.argv[2])
except:
print('Unknow error in hash table search!')
#copy the final files into the lsfm input directory
landmark_count = 0
with open(final_landmark_file) as final_lm_file:
for row in final_lm_file:
landmark_count = landmark_count + 1
if landmark_count == 100:
result = subprocess.call(['cp', final_obj_file, lsfm_inputDir])
result = subprocess.call(['cp', final_landmark_file , lsfm_inputDir])
| 37.378378
| 117
| 0.691612
|
acfde0147f717f3f29f809dda562af712ce0dfed
| 243
|
py
|
Python
|
notifications/urls.py
|
jeffsimp88/twitterclone
|
696aa05da4feae15d7a0c2296a8d74be4ee32286
|
[
"MIT"
] | null | null | null |
notifications/urls.py
|
jeffsimp88/twitterclone
|
696aa05da4feae15d7a0c2296a8d74be4ee32286
|
[
"MIT"
] | null | null | null |
notifications/urls.py
|
jeffsimp88/twitterclone
|
696aa05da4feae15d7a0c2296a8d74be4ee32286
|
[
"MIT"
] | null | null | null |
from django.urls import path
from notifications import views
urlpatterns = [
path("notifications/", views.notifications_view, name="notifications"),
path("notifications/old/", views.old_notification_view, name="old notifications"),
]
| 30.375
| 86
| 0.765432
|
acfde0af0b894a86b7290f45b786502178567283
| 273
|
py
|
Python
|
tests/artificial/transf_Integration/trend_ConstantTrend/cycle_30/ar_/test_artificial_1024_Integration_ConstantTrend_30__100.py
|
shaido987/pyaf
|
b9afd089557bed6b90b246d3712c481ae26a1957
|
[
"BSD-3-Clause"
] | 377
|
2016-10-13T20:52:44.000Z
|
2022-03-29T18:04:14.000Z
|
tests/artificial/transf_Integration/trend_ConstantTrend/cycle_30/ar_/test_artificial_1024_Integration_ConstantTrend_30__100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 160
|
2016-10-13T16:11:53.000Z
|
2022-03-28T04:21:34.000Z
|
tests/artificial/transf_Integration/trend_ConstantTrend/cycle_30/ar_/test_artificial_1024_Integration_ConstantTrend_30__100.py
|
ysdede/pyaf
|
b5541b8249d5a1cfdc01f27fdfd99b6580ed680b
|
[
"BSD-3-Clause"
] | 63
|
2017-03-09T14:51:18.000Z
|
2022-03-27T20:52:57.000Z
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 30, transform = "Integration", sigma = 0.0, exog_count = 100, ar_order = 0);
| 39
| 173
| 0.739927
|
acfde165ec504124a9c5c7d52ae7e87b011e9c16
| 7,748
|
py
|
Python
|
packages/syft/src/syft/core/tensor/autograd/tensor.py
|
eelcovdw/PySyft
|
7eff8e9ad3fffe792ac85b9f38391b7ec0e51391
|
[
"Apache-1.1"
] | 1
|
2019-02-10T13:22:14.000Z
|
2019-02-10T13:22:14.000Z
|
packages/syft/src/syft/core/tensor/autograd/tensor.py
|
dylan-fan/PySyft
|
c10b0e70a4a7f06eb9e01e6b98f0ff8856d7d62c
|
[
"Apache-1.1"
] | null | null | null |
packages/syft/src/syft/core/tensor/autograd/tensor.py
|
dylan-fan/PySyft
|
c10b0e70a4a7f06eb9e01e6b98f0ff8856d7d62c
|
[
"Apache-1.1"
] | 1
|
2021-07-12T09:15:44.000Z
|
2021-07-12T09:15:44.000Z
|
# future
from __future__ import annotations
# stdlib
from typing import Any
from typing import Dict as TypeDict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Type
from typing import Union
import uuid
# third party
import numpy as np
# relative
from .. import autograd
from ....core.common.serde.recursive import RecursiveSerde
from ....lib.python.collections.collections import DefaultDict
from ....lib.python.collections.collections import SerializableCounter
from ...common.serde.serializable import bind_protobuf
from ..ancestors import AutogradTensorAncestor
from ..ancestors import PhiTensorAncestor
from ..passthrough import AcceptableSimpleType # type: ignore
from ..passthrough import PassthroughTensor # type: ignore
from ..passthrough import is_acceptable_simple_type # type: ignore
@bind_protobuf
class AutogradTensor(PassthroughTensor, PhiTensorAncestor, RecursiveSerde):
__attr_allowlist__ = [
"child",
"requires_grad",
"_grad",
"_grad_fn",
"ops",
"backprop_id",
"n_backwards",
]
def __init__(
self,
child: Union[Type[AutogradTensor], AcceptableSimpleType],
requires_grad: bool = False,
) -> None:
super().__init__(child)
# whether to run backpropagation or not
self.requires_grad = requires_grad
# tensor gradient
self._grad: TypeDict = DefaultDict(lambda: None)
# operation used to create this tensor (if any)
self._grad_fn: Optional[Type[autograd.backward_ops.Op]] = None
# list of ops which use this tensor
self.ops: List = list()
self.backprop_id: Optional[uuid.UUID] = None
self.n_backwards: SerializableCounter = (
SerializableCounter()
) # may have to add [uuid.UUID] for type annotation
@property
def grad(self) -> Optional[np.ndarray]:
if self.backprop_id not in self._grad:
return None
return self._grad[self.backprop_id]
@property
def grad_fn(
self,
) -> Optional[Type[autograd.backward_ops.Op]]:
if not self.requires_grad:
raise Exception("This tensor is not backpropagated")
return self._grad_fn
# Autograd Tensor Operations
""" Note: Ignoring return type incompatibilities since AutogradTensorAncestor doesn't inherit from
PassThroughTensor"""
def __abs__(self) -> AutogradTensorAncestor:
op = autograd.backward_ops.AbsOp()
return op(self)
def __add__(self, other: AutogradTensor) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.AddOp()
return op(self, other)
def __sub__(self, other: AutogradTensor) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.SubOp()
return op(self, other)
def __mul__(self, other: AutogradTensor) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.MulOp()
return op(self, other)
def __rmul__(self, other: AutogradTensor) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.MulOp()
return op(self, other)
def __truediv__(self, other: AutogradTensor) -> AutogradTensorAncestor: # type: ignore
if is_acceptable_simple_type(other):
# Ignoring type annotation error because only int, floats, np.ndarrays will be parsed
return self * (1 / other) # type: ignore
return NotImplemented
def __pow__(self, other: Any) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.PowOp()
return op(self, other)
def __rpow__(self, other: Any) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.RPowOp()
return op(self, other)
def reshape(self, *shape: Tuple[int]) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.ReshapeOp()
return op(self, *shape)
def repeat(self, *args: Tuple[Any, ...], **kwargs: Any) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.RepeatOp()
return op(self, *args, **kwargs)
def copy(self) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.CopyOp()
return op(self)
def sum(self, *args: int, **kwargs: int) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.SumOp()
return op(self, *args, **kwargs)
def transpose(self, *dims: tuple) -> AutogradTensorAncestor: # type: ignore
op = autograd.backward_ops.TransposeOp()
return op(self, *dims)
# End Autograd Tensor Operations
def add_grad(self, grad: np.ndarray) -> None:
# print("Adding grad:" + str(type(grad)) + " w/ backprop_id:" + str(self.backprop_id))
if self._grad[self.backprop_id] is None:
self._grad[self.backprop_id] = grad
else:
self._grad[self.backprop_id] = self._grad[self.backprop_id] + grad
def backward(
self,
grad: Optional[np.ndarray] = None,
backprop_id: Optional[uuid.UUID] = None,
) -> bool:
if backprop_id is None:
backprop_id = uuid.uuid4()
self.n_backwards[backprop_id] += 1
self.backprop_id = backprop_id
if not self.grad_fn:
return False
if grad is None and self._grad[self.backprop_id] is None:
# in case if this is last loss tensor
grad = np.ones(self.shape)
# grad = self.__class__(grad, requires_grad=False)
# this more or less ensures it has the right tensor chain
# grad = (self * 0) + 1
elif self.grad is not None:
grad = self._grad[self.backprop_id]
if not self.requires_grad:
raise Exception("This tensor is not backpropagated")
# if all gradients are accounted for - backprop
if self.n_backwards[backprop_id] >= len(self.ops):
self.grad_fn.backward(grad, backprop_id=backprop_id) # type: ignore
# if some gradietns appear to be missing - parse forward in
# the graph to double check
else:
# investigate whether any of the missing ops are actually
# going to get used.
found_id = False
n_direct_ops = 0
for op in self.ops:
if op.backprop_id is not None and op.backprop_id == backprop_id:
n_direct_ops += 1
# if the number of operations we know will be backpropagating gradients to us
# exceeds the number of times we've been backpropgated into - then we know
# we need to wait.
if n_direct_ops > self.n_backwards[backprop_id]:
found_id = True
else:
for op in self.ops:
if op.backprop_id is None:
if op.out.find_backprop_id(self.backprop_id):
found_id = True
break
if found_id:
"do nothing - we're going to get another gradient"
else:
# backprop anyway - we've got all the grads we're gonna get
self.grad_fn.backward(grad, backprop_id=backprop_id) # type: ignore
return True
def find_backprop_id(self, backprop_id: Optional[uuid.UUID]) -> bool:
found_id = False
for op in self.ops:
if op.backprop_id is not None and op.backprop_id == backprop_id:
return True
if op.out.find_backprop_id(self.backprop_id):
found_id = True
break
return found_id
| 33.253219
| 102
| 0.630356
|
acfde1a681e6156a8e6835d21209dde939aeaa94
| 174
|
py
|
Python
|
tempCodeRunnerFile.py
|
jasDestiny/Reddit_EngDict_Bot
|
a2c81ddf87ab9023647d740112edec3ba47cdd8a
|
[
"MIT"
] | 1
|
2021-05-28T17:31:05.000Z
|
2021-05-28T17:31:05.000Z
|
tempCodeRunnerFile.py
|
jasDestiny/Reddit_EngDict_Bot
|
a2c81ddf87ab9023647d740112edec3ba47cdd8a
|
[
"MIT"
] | null | null | null |
tempCodeRunnerFile.py
|
jasDestiny/Reddit_EngDict_Bot
|
a2c81ddf87ab9023647d740112edec3ba47cdd8a
|
[
"MIT"
] | null | null | null |
if comment!="":
# print("reply sent")
# submission.reply("[A Real user's application that autogenerates synonyms of some words] \n\n"+comment)
| 58
| 120
| 0.597701
|
acfde2ed648321118a2bfe045b19a2a852f2d03e
| 554
|
py
|
Python
|
manage.py
|
couldandblow/Intelligent-QA-in-medicine
|
a067a7fe85c7ec034c627082e2ea28f11ee06797
|
[
"MIT"
] | null | null | null |
manage.py
|
couldandblow/Intelligent-QA-in-medicine
|
a067a7fe85c7ec034c627082e2ea28f11ee06797
|
[
"MIT"
] | null | null | null |
manage.py
|
couldandblow/Intelligent-QA-in-medicine
|
a067a7fe85c7ec034c627082e2ea28f11ee06797
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "KGQA_Based_On_medicine.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.625
| 86
| 0.694946
|
acfde30aead8d293d45d795d536f193bae0c89c9
| 21,917
|
py
|
Python
|
google/cloud/aiplatform/utils/__init__.py
|
morgandu/python-aiplatform
|
96ce7387ac58e0ec7cb6a7f6d6a6e422eae5da96
|
[
"Apache-2.0"
] | 1
|
2021-09-07T23:11:11.000Z
|
2021-09-07T23:11:11.000Z
|
google/cloud/aiplatform/utils/__init__.py
|
morgandu/python-aiplatform
|
96ce7387ac58e0ec7cb6a7f6d6a6e422eae5da96
|
[
"Apache-2.0"
] | null | null | null |
google/cloud/aiplatform/utils/__init__.py
|
morgandu/python-aiplatform
|
96ce7387ac58e0ec7cb6a7f6d6a6e422eae5da96
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import datetime
import pathlib
import logging
import re
from typing import Any, Callable, Dict, Optional, Type, TypeVar, Tuple
from google.protobuf import timestamp_pb2
from google.api_core import client_options
from google.api_core import gapic_v1
from google.auth import credentials as auth_credentials
from google.cloud import storage
from google.cloud.aiplatform import compat
from google.cloud.aiplatform.constants import base as constants
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.services import (
dataset_service_client_v1beta1,
endpoint_service_client_v1beta1,
featurestore_online_serving_service_client_v1beta1,
featurestore_service_client_v1beta1,
job_service_client_v1beta1,
metadata_service_client_v1beta1,
model_service_client_v1beta1,
pipeline_service_client_v1beta1,
prediction_service_client_v1beta1,
tensorboard_service_client_v1beta1,
)
from google.cloud.aiplatform.compat.services import (
dataset_service_client_v1,
endpoint_service_client_v1,
featurestore_online_serving_service_client_v1,
featurestore_service_client_v1,
job_service_client_v1,
metadata_service_client_v1,
model_service_client_v1,
pipeline_service_client_v1,
prediction_service_client_v1,
tensorboard_service_client_v1,
)
from google.cloud.aiplatform.compat.types import (
accelerator_type as gca_accelerator_type,
)
VertexAiServiceClient = TypeVar(
"VertexAiServiceClient",
# v1beta1
dataset_service_client_v1beta1.DatasetServiceClient,
endpoint_service_client_v1beta1.EndpointServiceClient,
featurestore_online_serving_service_client_v1beta1.FeaturestoreOnlineServingServiceClient,
featurestore_service_client_v1beta1.FeaturestoreServiceClient,
model_service_client_v1beta1.ModelServiceClient,
prediction_service_client_v1beta1.PredictionServiceClient,
pipeline_service_client_v1beta1.PipelineServiceClient,
job_service_client_v1beta1.JobServiceClient,
metadata_service_client_v1beta1.MetadataServiceClient,
tensorboard_service_client_v1beta1.TensorboardServiceClient,
# v1
dataset_service_client_v1.DatasetServiceClient,
endpoint_service_client_v1.EndpointServiceClient,
featurestore_online_serving_service_client_v1.FeaturestoreOnlineServingServiceClient,
featurestore_service_client_v1.FeaturestoreServiceClient,
metadata_service_client_v1.MetadataServiceClient,
model_service_client_v1.ModelServiceClient,
prediction_service_client_v1.PredictionServiceClient,
pipeline_service_client_v1.PipelineServiceClient,
job_service_client_v1.JobServiceClient,
tensorboard_service_client_v1.TensorboardServiceClient,
)
RESOURCE_ID_PATTERN = re.compile(r"^[\w-]+$")
def validate_id(resource_id: str):
"""Validate resource ID.
Args:
resource_id (str): Resource id.
Raises:
ValueError: If resource id is not a valid format.
"""
if not RESOURCE_ID_PATTERN.match(resource_id):
raise ValueError(f"Resource {resource_id} is not a valid resource id.")
def full_resource_name(
resource_name: str,
resource_noun: str,
parse_resource_name_method: Callable[[str], Dict[str, str]],
format_resource_name_method: Callable[..., str],
parent_resource_name_fields: Optional[Dict[str, str]] = None,
project: Optional[str] = None,
location: Optional[str] = None,
resource_id_validator: Optional[Callable[[str], None]] = None,
) -> str:
"""Returns fully qualified resource name.
Args:
resource_name (str):
Required. A fully-qualified Vertex AI resource name or
resource ID.
resource_noun (str):
Required. A resource noun to validate the resource name against.
For example, you would pass "datasets" to validate
"projects/123/locations/us-central1/datasets/456".
parse_resource_name_method (Callable[[str], Dict[str,str]]):
Required. Method that parses a resource name into its segment parts.
These are generally included with GAPIC clients.
format_resource_name_method (Callable[..., str]):
Required. Method that takes segment parts of resource names and returns
the formated resource name. These are generally included with GAPIC clients.
parent_resource_name_fields (Dict[str, str]):
Optional. Dictionary of segment parts where key is the resource noun and
values are the resource ids.
For example:
{
"metadataStores": "123"
}
project (str):
Optional. project to retrieve resource_noun from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. location to retrieve resource_noun from. If not set, location
set in aiplatform.init will be used.
resource_id_validator (Callable[str, None]):
Optional. Function that validates the resource ID. Overrides the default validator, validate_id.
Should take a resource ID as string and raise ValueError if invalid.
Returns:
resource_name (str):
A fully-qualified Vertex AI resource name.
"""
# Fully qualified resource name, e.g., "projects/.../locations/.../datasets/12345" or
# "projects/.../locations/.../metadataStores/.../contexts/12345"
fields = parse_resource_name_method(resource_name)
if fields:
return resource_name
resource_id_validator = resource_id_validator or validate_id
user_project = project or initializer.global_config.project
user_location = location or initializer.global_config.location
validate_region(user_location)
resource_id_validator(resource_name)
format_args = {
"location": user_location,
"project": user_project,
convert_camel_case_resource_noun_to_snake_case(resource_noun): resource_name,
}
if parent_resource_name_fields:
format_args.update(
{
convert_camel_case_resource_noun_to_snake_case(key): value
for key, value in parent_resource_name_fields.items()
}
)
return format_resource_name_method(**format_args)
# Resource nouns that are not plural in their resource names.
# Userd below to avoid conversion from plural to singular.
_SINGULAR_RESOURCE_NOUNS = {"time_series"}
def convert_camel_case_resource_noun_to_snake_case(resource_noun: str) -> str:
"""Converts camel case to snake case to map resource name parts to GAPIC parameter names.
Args:
resource_noun (str): The resource noun in camel case to covert.
Returns:
Singular snake case resource noun.
"""
snake_case = re.sub("([A-Z]+)", r"_\1", resource_noun).lower()
# plural to singular
if snake_case in _SINGULAR_RESOURCE_NOUNS or not snake_case.endswith("s"):
return snake_case
else:
return snake_case[:-1]
def validate_display_name(display_name: str):
"""Verify display name is at most 128 chars.
Args:
display_name: display name to verify
Raises:
ValueError: display name is longer than 128 characters
"""
if len(display_name) > 128:
raise ValueError("Display name needs to be less than 128 characters.")
def validate_labels(labels: Dict[str, str]):
"""Validate labels.
Args:
labels: labels to verify
Raises:
ValueError: if labels is not a mapping of string key value pairs.
"""
for k, v in labels.items():
if not isinstance(k, str) or not isinstance(v, str):
raise ValueError(
"Expect labels to be a mapping of string key value pairs. "
'Got "{}".'.format(labels)
)
def validate_region(region: str) -> bool:
"""Validates region against supported regions.
Args:
region: region to validate
Returns:
bool: True if no errors raised
Raises:
ValueError: If region is not in supported regions.
"""
if not region:
raise ValueError(
f"Please provide a region, select from {constants.SUPPORTED_REGIONS}"
)
region = region.lower()
if region not in constants.SUPPORTED_REGIONS:
raise ValueError(
f"Unsupported region for Vertex AI, select from {constants.SUPPORTED_REGIONS}"
)
return True
def validate_accelerator_type(accelerator_type: str) -> bool:
"""Validates user provided accelerator_type string for training and
prediction.
Args:
accelerator_type (str):
Represents a hardware accelerator type.
Returns:
bool: True if valid accelerator_type
Raises:
ValueError if accelerator type is invalid.
"""
if accelerator_type not in gca_accelerator_type.AcceleratorType._member_names_:
raise ValueError(
f"Given accelerator_type `{accelerator_type}` invalid. "
f"Choose one of {gca_accelerator_type.AcceleratorType._member_names_}"
)
return True
def extract_bucket_and_prefix_from_gcs_path(gcs_path: str) -> Tuple[str, Optional[str]]:
"""Given a complete GCS path, return the bucket name and prefix as a tuple.
Example Usage:
bucket, prefix = extract_bucket_and_prefix_from_gcs_path(
"gs://example-bucket/path/to/folder"
)
# bucket = "example-bucket"
# prefix = "path/to/folder"
Args:
gcs_path (str):
Required. A full path to a Google Cloud Storage folder or resource.
Can optionally include "gs://" prefix or end in a trailing slash "/".
Returns:
Tuple[str, Optional[str]]
A (bucket, prefix) pair from provided GCS path. If a prefix is not
present, a None will be returned in its place.
"""
if gcs_path.startswith("gs://"):
gcs_path = gcs_path[5:]
if gcs_path.endswith("/"):
gcs_path = gcs_path[:-1]
gcs_parts = gcs_path.split("/", 1)
gcs_bucket = gcs_parts[0]
gcs_blob_prefix = None if len(gcs_parts) == 1 else gcs_parts[1]
return (gcs_bucket, gcs_blob_prefix)
class ClientWithOverride:
class WrappedClient:
"""Wrapper class for client that creates client at API invocation
time."""
def __init__(
self,
client_class: Type[VertexAiServiceClient],
client_options: client_options.ClientOptions,
client_info: gapic_v1.client_info.ClientInfo,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Stores parameters needed to instantiate client.
Args:
client_class (VertexAiServiceClient):
Required. Class of the client to use.
client_options (client_options.ClientOptions):
Required. Client options to pass to client.
client_info (gapic_v1.client_info.ClientInfo):
Required. Client info to pass to client.
credentials (auth_credentials.credentials):
Optional. Client credentials to pass to client.
"""
self._client_class = client_class
self._credentials = credentials
self._client_options = client_options
self._client_info = client_info
def __getattr__(self, name: str) -> Any:
"""Instantiates client and returns attribute of the client."""
temporary_client = self._client_class(
credentials=self._credentials,
client_options=self._client_options,
client_info=self._client_info,
)
return getattr(temporary_client, name)
@property
@abc.abstractmethod
def _is_temporary(self) -> bool:
pass
@property
@classmethod
@abc.abstractmethod
def _default_version(self) -> str:
pass
@property
@classmethod
@abc.abstractmethod
def _version_map(self) -> Tuple:
pass
def __init__(
self,
client_options: client_options.ClientOptions,
client_info: gapic_v1.client_info.ClientInfo,
credentials: Optional[auth_credentials.Credentials] = None,
):
"""Stores parameters needed to instantiate client.
Args:
client_options (client_options.ClientOptions):
Required. Client options to pass to client.
client_info (gapic_v1.client_info.ClientInfo):
Required. Client info to pass to client.
credentials (auth_credentials.credentials):
Optional. Client credentials to pass to client.
"""
self._clients = {
version: self.WrappedClient(
client_class=client_class,
client_options=client_options,
client_info=client_info,
credentials=credentials,
)
if self._is_temporary
else client_class(
client_options=client_options,
client_info=client_info,
credentials=credentials,
)
for version, client_class in self._version_map
}
def __getattr__(self, name: str) -> Any:
"""Instantiates client and returns attribute of the client."""
return getattr(self._clients[self._default_version], name)
def select_version(self, version: str) -> VertexAiServiceClient:
return self._clients[version]
@classmethod
def get_gapic_client_class(
cls, version: Optional[str] = None
) -> Type[VertexAiServiceClient]:
"""Gets the underyilng GAPIC client.
Used to access class and static methods without instantiating.
Args:
version (str):
Optional. Version of client to retreive otherwise the default version is returned.
Retuns:
Underlying GAPIC client for this wrapper and version.
"""
return dict(cls._version_map)[version or cls._default_version]
class DatasetClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, dataset_service_client_v1.DatasetServiceClient),
(compat.V1BETA1, dataset_service_client_v1beta1.DatasetServiceClient),
)
class EndpointClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, endpoint_service_client_v1.EndpointServiceClient),
(compat.V1BETA1, endpoint_service_client_v1beta1.EndpointServiceClient),
)
class FeaturestoreClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, featurestore_service_client_v1.FeaturestoreServiceClient),
(compat.V1BETA1, featurestore_service_client_v1beta1.FeaturestoreServiceClient),
)
class FeaturestoreOnlineServingClientWithOverride(ClientWithOverride):
_is_temporary = False
_default_version = compat.DEFAULT_VERSION
_version_map = (
(
compat.V1,
featurestore_online_serving_service_client_v1.FeaturestoreOnlineServingServiceClient,
),
(
compat.V1BETA1,
featurestore_online_serving_service_client_v1beta1.FeaturestoreOnlineServingServiceClient,
),
)
class JobClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, job_service_client_v1.JobServiceClient),
(compat.V1BETA1, job_service_client_v1beta1.JobServiceClient),
)
class ModelClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, model_service_client_v1.ModelServiceClient),
(compat.V1BETA1, model_service_client_v1beta1.ModelServiceClient),
)
class PipelineClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, pipeline_service_client_v1.PipelineServiceClient),
(compat.V1BETA1, pipeline_service_client_v1beta1.PipelineServiceClient),
)
class PipelineJobClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, pipeline_service_client_v1.PipelineServiceClient),
(compat.V1BETA1, pipeline_service_client_v1beta1.PipelineServiceClient),
)
class PredictionClientWithOverride(ClientWithOverride):
_is_temporary = False
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, prediction_service_client_v1.PredictionServiceClient),
(compat.V1BETA1, prediction_service_client_v1beta1.PredictionServiceClient),
)
class MetadataClientWithOverride(ClientWithOverride):
_is_temporary = True
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, metadata_service_client_v1.MetadataServiceClient),
(compat.V1BETA1, metadata_service_client_v1beta1.MetadataServiceClient),
)
class TensorboardClientWithOverride(ClientWithOverride):
_is_temporary = False
_default_version = compat.DEFAULT_VERSION
_version_map = (
(compat.V1, tensorboard_service_client_v1.TensorboardServiceClient),
(compat.V1BETA1, tensorboard_service_client_v1beta1.TensorboardServiceClient),
)
VertexAiServiceClientWithOverride = TypeVar(
"VertexAiServiceClientWithOverride",
DatasetClientWithOverride,
EndpointClientWithOverride,
FeaturestoreClientWithOverride,
JobClientWithOverride,
ModelClientWithOverride,
PipelineClientWithOverride,
PipelineJobClientWithOverride,
PredictionClientWithOverride,
MetadataClientWithOverride,
TensorboardClientWithOverride,
)
class LoggingFilter(logging.Filter):
def __init__(self, warning_level: int):
self._warning_level = warning_level
def filter(self, record):
return record.levelname == self._warning_level
def _timestamped_gcs_dir(root_gcs_path: str, dir_name_prefix: str) -> str:
"""Composes a timestamped GCS directory.
Args:
root_gcs_path: GCS path to put the timestamped directory.
dir_name_prefix: Prefix to add the timestamped directory.
Returns:
Timestamped gcs directory path in root_gcs_path.
"""
timestamp = datetime.datetime.now().isoformat(sep="-", timespec="milliseconds")
dir_name = "-".join([dir_name_prefix, timestamp])
if root_gcs_path.endswith("/"):
root_gcs_path = root_gcs_path[:-1]
gcs_path = "/".join([root_gcs_path, dir_name])
if not gcs_path.startswith("gs://"):
return "gs://" + gcs_path
return gcs_path
def _timestamped_copy_to_gcs(
local_file_path: str,
gcs_dir: str,
project: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> str:
"""Copies a local file to a GCS path.
The file copied to GCS is the name of the local file prepended with an
"aiplatform-{timestamp}-" string.
Args:
local_file_path (str): Required. Local file to copy to GCS.
gcs_dir (str):
Required. The GCS directory to copy to.
project (str):
Project that contains the staging bucket. Default will be used if not
provided. Model Builder callers should pass this in.
credentials (auth_credentials.Credentials):
Custom credentials to use with bucket. Model Builder callers should pass
this in.
Returns:
gcs_path (str): The path of the copied file in gcs.
"""
gcs_bucket, gcs_blob_prefix = extract_bucket_and_prefix_from_gcs_path(gcs_dir)
local_file_name = pathlib.Path(local_file_path).name
timestamp = datetime.datetime.now().isoformat(sep="-", timespec="milliseconds")
blob_path = "-".join(["aiplatform", timestamp, local_file_name])
if gcs_blob_prefix:
blob_path = "/".join([gcs_blob_prefix, blob_path])
# TODO(b/171202993) add user agent
client = storage.Client(project=project, credentials=credentials)
bucket = client.bucket(gcs_bucket)
blob = bucket.blob(blob_path)
blob.upload_from_filename(local_file_path)
gcs_path = "".join(["gs://", "/".join([blob.bucket.name, blob.name])])
return gcs_path
def get_timestamp_proto(
time: Optional[datetime.datetime] = datetime.datetime.now(),
) -> timestamp_pb2.Timestamp:
"""Gets timestamp proto of a given time.
Args:
time (datetime.datetime):
Required. A user provided time. Default to datetime.datetime.now() if not given.
Returns:
timestamp_pb2.Timestamp - timestamp proto of the given time, not have higher than millisecond precision.
"""
t = time.timestamp()
seconds = int(t)
# must not have higher than millisecond precision.
nanos = int((t % 1 * 1e6) * 1e3)
return timestamp_pb2.Timestamp(seconds=seconds, nanos=nanos)
| 34.514961
| 112
| 0.696491
|
acfde3123065225740dbe1eceeb0c518a06b8555
| 13,407
|
py
|
Python
|
pychron/pipeline/plot/plotter/references_series.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 31
|
2016-03-07T02:38:17.000Z
|
2022-02-14T18:23:43.000Z
|
pychron/pipeline/plot/plotter/references_series.py
|
ASUPychron/pychron
|
dfe551bdeb4ff8b8ba5cdea0edab336025e8cc76
|
[
"Apache-2.0"
] | 1,626
|
2015-01-07T04:52:35.000Z
|
2022-03-25T19:15:59.000Z
|
pychron/pipeline/plot/plotter/references_series.py
|
UIllinoisHALPychron/pychron
|
f21b79f4592a9fb9dc9a4cb2e4e943a3885ededc
|
[
"Apache-2.0"
] | 26
|
2015-05-23T00:10:06.000Z
|
2022-03-07T16:51:57.000Z
|
# ===============================================================================
# Copyright 2015 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from chaco.array_data_source import ArrayDataSource
from numpy import zeros_like, array, asarray, isinf, isnan
from pyface.message_dialog import warning
from pyface.timer.do_later import do_later
from traits.api import Property, on_trait_change, List, Array
from uncertainties import nominal_value, std_dev
from pychron.core.helpers.formatting import floatfmt
from pychron.core.regression.base_regressor import BaseRegressor
from pychron.core.regression.interpolation_regressor import InterpolationRegressor
from pychron.graph.explicit_legend import ExplicitLegend
from pychron.graph.offset_plot_label import OffsetPlotLabel
from pychron.pipeline.plot.plotter.series import BaseSeries
from pychron.pychron_constants import PLUSMINUS
def calc_limits(ys, ye, n):
try:
ymi = (ys - (ye * n)).min()
except BaseException:
ymi = 0
try:
yma = (ys + (ye * n)).max()
except BaseException:
yma = 0
return ymi, yma
def unzip_data(data):
try:
return array([nominal_value(ri) for ri in data]), array(
[std_dev(ri) for ri in data]
)
except ValueError as e:
print(e)
class ReferencesSeries(BaseSeries):
references = List
sorted_references = Property(depends_on="references")
show_current = True
rxs = Array
references_name = "References"
xtitle = "Time (hrs)"
_normalization_factor = 3600.0
def set_interpolated_values(self, iso, reg, fit):
mi, ma = self._get_min_max()
# mi =
ans = self.sorted_analyses
xs = [(ai.timestamp - ma) / self._normalization_factor for ai in ans]
p_uys = reg.predict(xs)
p_ues = reg.predict_error(xs)
if p_ues is None or any(isnan(p_ues)) or any(isinf(p_ues)):
p_ues = zeros_like(xs)
if p_uys is None or any(isnan(p_uys)) or any(isinf(p_uys)):
p_uys = zeros_like(xs)
self._set_interpolated_values(iso, fit, ans, p_uys, p_ues)
return asarray(p_uys), asarray(p_ues)
def post_make(self):
self._fix_log_axes()
do_later(self.graph.refresh)
def plot(self, plots, legend=None):
if plots:
_, mx = self._get_min_max()
self.xs = self._get_xs(plots, self.sorted_analyses, tzero=mx)
self.rxs = self._get_xs(plots, self.sorted_references, tzero=mx)
graph = self.graph
for i, p in enumerate(plots):
self._new_fit_series(i, p)
self._add_plot_label(i, p)
if self.options.show_statistics:
graph.add_statistics(plotid=i)
mi, ma = self._get_min_max()
self.xmi, self.xma = (mi - ma) / 3600.0, 0
self.xpad = "0.1"
legend = ExplicitLegend(
plots=self.graph.plots[0].plots,
labels=[
("plot1", self.references_name),
("data0", self.references_name),
("plot0", "Unk. Current"),
("Unknowns-predicted0", "Unk. Predicted"),
],
)
self.graph.plots[-1].overlays.append(legend)
# private
@on_trait_change("graph:regression_results")
def _update_regression(self, new):
key = "Unknowns-predicted{}"
key = key.format(0)
for plotobj, reg in new:
if isinstance(reg, BaseRegressor):
excluded = reg.get_excluded()
for i, r in enumerate(self.sorted_references):
r.set_temp_status("omit" if i in excluded else "ok")
self._set_values(plotobj, reg, key)
def _get_signal_intensity(self, po, analysis):
v, e = 0, 0
iso = self._get_isotope(po, analysis)
if iso:
i = iso.get_intensity()
v, e = nominal_value(i), std_dev(i)
return v, e
def _get_isotope(self, po, analysis):
return analysis.get_isotope(po.name)
def _calc_limits(self, ys, ye):
return calc_limits(ys, ye, self.options.nsigma)
def _add_plot_label(
self, pid, po, overlay_position="inside top", hjustify="left", **kw
):
txt = self._get_plot_label_text(po)
if txt:
comp = self.graph.plots[pid]
pl = OffsetPlotLabel(
txt,
component=comp,
overlay_position=overlay_position,
hjustify=hjustify,
**kw
)
comp.overlays.append(pl)
def _get_plot_label_text(self, po):
pass
def _new_fit_series(self, pid, po):
ymi, yma = self._plot_unknowns_current(pid, po)
args = self._plot_references(pid, po)
if args:
reg, a, b = args
ymi = min(ymi, a)
yma = max(yma, b)
if reg:
a, b = self._plot_interpolated(pid, po, reg)
ymi = min(ymi, a)
yma = max(yma, b)
self.graph.set_y_limits(ymi, yma, pad="0.05", plotid=pid)
else:
warning(
None, "Invalid Detector choices for these analyses. {}".format(po.name)
)
def _get_min_max(self):
mi = min(self.sorted_references[0].timestamp, self.sorted_analyses[0].timestamp)
ma = max(
self.sorted_references[-1].timestamp, self.sorted_analyses[-1].timestamp
)
return mi, ma
def _get_sorted_references(self):
return sorted(
self.references,
key=self._cmp_analyses,
reverse=self._reverse_sorted_analyses,
)
def _set_values(self, plotobj, reg, key):
iso = plotobj.isotope
fit = plotobj.fit
if key in plotobj.plots:
scatter = plotobj.plots[key][0]
p_uys, p_ues = self.set_interpolated_values(iso, reg, fit)
scatter.value.set_data(p_uys)
scatter.yerror.set_data(p_ues)
scatter._layout_needed = True
def reference_data(self, po):
data = self._get_reference_data(po)
if data:
ans, xs, ys = data
return (
ans,
array(xs),
array([nominal_value(ri) for ri in ys]),
array([std_dev(ri) for ri in ys]),
)
def current_data(self, po):
data = self._get_current_data(po)
return array([nominal_value(ri) for ri in data]), array(
[std_dev(ri) for ri in data]
)
def _get_current_data(self, po):
return self._unpack_attr(po.name)
def _get_reference_data(self, po):
raise NotImplementedError
# plotting
def _plot_unknowns_current(self, pid, po):
ymi, yma = 0, 0
if self.analyses and self.show_current:
graph = self.graph
n = [ai.record_id for ai in self.sorted_analyses]
ys, ye = self.current_data(po)
ymi, yma = self._calc_limits(ys, ye)
scatter, plot = graph.new_series(
x=self.xs,
y=ys,
yerror=ye,
type="scatter",
display_index=ArrayDataSource(data=n),
fit=False,
plotid=pid,
bind_id=-2,
add_tools=False,
add_inspector=False,
marker=po.marker,
marker_size=po.marker_size,
)
def af(i, x, y, analysis):
v, e = self._get_interpolated_value(po, analysis)
s, se = self._get_signal_intensity(po, analysis)
return (
u"Interpolated: {} {} {}".format(
floatfmt(v), PLUSMINUS, floatfmt(e)
),
"Run Date: {}".format(analysis.rundate.strftime("%m-%d-%Y %H:%M")),
"Rel. Time: {:0.4f}".format(x),
"Signal: {} {} {}".format(floatfmt(s), PLUSMINUS, floatfmt(se)),
)
self._add_error_bars(scatter, ye, "y", self.options.nsigma, True)
self._add_scatter_inspector(
scatter, add_selection=False, additional_info=af
)
return ymi, yma
def _plot_interpolated(self, pid, po, reg, series_id=0):
iso = po.name
p_uys, p_ues = self.set_interpolated_values(iso, reg, po.fit)
ymi, yma = 0, 0
if len(p_uys):
ymi, yma = self._calc_limits(p_uys, p_ues)
graph = self.graph
# display the predicted values
s, p = graph.new_series(
self.xs,
p_uys,
isotope=iso,
yerror=ArrayDataSource(p_ues),
fit=False,
add_tools=False,
add_inspector=False,
type="scatter",
marker=po.marker,
marker_size=po.marker_size,
plotid=pid,
bind_id=-1,
)
series = len(p.plots) - 1
graph.set_series_label(
"Unknowns-predicted{}".format(series_id), plotid=pid, series=series
)
self._add_error_bars(s, p_ues, "y", self.options.nsigma, True)
return ymi, yma
def _plot_references(self, pid, po):
graph = self.graph
efit = po.fit.lower()
# r_xs = self.rxs
data = self.reference_data(po)
if data:
refs, r_xs, r_ys, r_es = data
ymi, yma = self._calc_limits(r_ys, r_es)
reg = None
kw = dict(
add_tools=True,
add_inspector=True,
add_point_inspector=False,
add_selection=False,
# color='red',
plotid=pid,
selection_marker=po.marker,
marker=po.marker,
marker_size=po.marker_size,
)
update_meta_func = None
if efit in [
"preceding",
"bracketing interpolate",
"bracketing average",
"succeeding",
]:
reg = InterpolationRegressor(xs=r_xs, ys=r_ys, yserr=r_es, kind=efit)
kw["add_tools"] = False
scatter, _p = graph.new_series(
r_xs, r_ys, yerror=r_es, type="scatter", fit=False, **kw
)
def update_meta_func(obj, b, c, d):
self.update_interpolation_regressor(po.name, reg, obj, refs)
self._add_error_bars(scatter, r_es, "y", self.options.nsigma, True)
ffit = po.fit
else:
bind_id = None
if self.options.link_plots:
bind_id = hash(tuple([r.uuid for r in refs]))
ffit = "{}_{}".format(po.fit, po.error_type)
_, scatter, l = graph.new_series(
r_xs,
r_ys,
yerror=ArrayDataSource(data=r_es),
fit=ffit,
bind_id=bind_id,
**kw
)
if hasattr(l, "regressor"):
reg = l.regressor
self._add_error_bars(scatter, r_es, "y", self.options.nsigma, True)
def af(i, x, y, analysis):
return (
"Run Date: {}".format(analysis.rundate.strftime("%m-%d-%Y %H:%M")),
"Rel. Time: {:0.4f}".format(x),
)
self._add_scatter_inspector(
scatter,
update_meta_func=update_meta_func,
add_selection=True,
additional_info=af,
items=refs,
)
plot = graph.plots[pid]
plot.isotope = po.name
plot.fit = ffit
scatter.index.metadata["selections"] = [
i for i, r in enumerate(refs) if r.temp_selected
]
return reg, ymi, yma
def _set_interpolated_values(self, iso, fit, ans, p_uys, p_ues):
pass
def update_interpolation_regressor(self, isotope, reg, obj, references):
sel = self._filter_metadata_changes(obj, references)
reg.user_excluded = sel
key = "Unknowns-predicted0"
for plotobj in self.graph.plots:
if hasattr(plotobj, "isotope"):
if plotobj.isotope == isotope:
self._set_values(plotobj, reg, key)
# ============= EOF =============================================
| 33.68593
| 88
| 0.533154
|
acfde3abd51479d957eaa9d75211fadeea8d7784
| 2,135
|
py
|
Python
|
src/optimctrltf/torch/obj.py
|
alucantonio/nabla
|
d24d8611178ae54c952a253612c0e3ae7ca25a21
|
[
"MIT"
] | null | null | null |
src/optimctrltf/torch/obj.py
|
alucantonio/nabla
|
d24d8611178ae54c952a253612c0e3ae7ca25a21
|
[
"MIT"
] | null | null | null |
src/optimctrltf/torch/obj.py
|
alucantonio/nabla
|
d24d8611178ae54c952a253612c0e3ae7ca25a21
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
class PyTorchObjective(object):
"""PyTorch objective function, wrapped to be called by scipy.optimize."""
def __init__(self, objfunc, x0):
# Objective function: callable with arguments x, params; must return a single scalar or tensor with one element
self.f = objfunc
# Initial guess vector (torch.Tensor)
self.x0 = x0
# Default data type for tensors
self.dtype = torch.get_default_dtype()
def is_new(self, x):
# if this is the first thing we've seen
if not hasattr(self, 'cached_x'):
return True
else:
# compare x to cached_x to determine if we've been given a new input
x, self.cached_x = np.array(x), np.array(self.cached_x)
error = np.abs(x - self.cached_x)
return error.max() > 1e-8
def cache_fun(self, x, params=None):
"""Evaluates objective function and caches its value.
"""
xx = torch.as_tensor(x, dtype=self.dtype)
if params is not None:
y = self.f(xx, params)
else:
y = self.f(xx)
self.cached_f = y.detach().numpy()
def cache_jac(self, x, params=None):
# FIXME: Passing parameters to jacobian calculation NOT supported
xx = torch.as_tensor(x, dtype=self.dtype)
xx.requires_grad_()
self.cached_jac = torch.autograd.functional.jacobian(
self.f, xx).numpy()
def cache_hess(self, x, params=None):
# FIXME: Passing parameters NOT supported
xx = torch.as_tensor(x, dtype=self.dtype)
xx.requires_grad_()
self.cached_hess = torch.autograd.functional.hessian(
self.f, xx).numpy()
def fun(self, x, params=None):
if self.is_new(x):
self.cache_fun(x, params)
return self.cached_f
def jac(self, x, params=None):
if self.is_new(x):
self.cache_jac(x, params)
return self.cached_jac
def hess(self, x, params=None):
if self.is_new(x):
self.cache_hess(x, params)
return self.cached_hess
| 32.846154
| 119
| 0.601874
|
acfde3fa4edc67f26e7f41cce9c2b25fba87f4aa
| 4,114
|
py
|
Python
|
test/test_shell_script.py
|
shah-newaz/vaxrank
|
65832878f28ce44ccaaf47be3e0c6d38a1743988
|
[
"Apache-2.0"
] | null | null | null |
test/test_shell_script.py
|
shah-newaz/vaxrank
|
65832878f28ce44ccaaf47be3e0c6d38a1743988
|
[
"Apache-2.0"
] | null | null | null |
test/test_shell_script.py
|
shah-newaz/vaxrank
|
65832878f28ce44ccaaf47be3e0c6d38a1743988
|
[
"Apache-2.0"
] | null | null | null |
from os.path import getsize
from mock import patch
from nose.plugins.attrib import attr
from tempfile import NamedTemporaryFile
import pandas as pd
from xlrd import open_workbook
from vaxrank.cli import main as run_shell_script
from .testing_helpers import data_path
cli_args_for_b16_seqdata = [
"--vcf", data_path("b16.f10/b16.vcf"),
"--bam", data_path("b16.f10/b16.combined.bam"),
"--vaccine-peptide-length", "25",
"--mhc-predictor", "random",
"--mhc-alleles", "H2-Kb,H2-Db",
"--padding-around-mutation", "5",
"--include-mismatches-after-variant"
]
cli_args_for_b16_seqdata_real_predictor = [
"--vcf", data_path("b16.f10/b16.vcf"),
"--bam", data_path("b16.f10/b16.combined.bam"),
"--vaccine-peptide-length", "25",
"--mhc-predictor", "netmhcpan",
"--mhc-alleles", "H2-Kb,H2-Db",
"--mhc-epitope-lengths", "8",
"--padding-around-mutation", "5",
"--include-mismatches-after-variant"
]
def test_ascii_report():
with NamedTemporaryFile(mode="r") as f:
ascii_args = cli_args_for_b16_seqdata + ["--output-ascii-report", f.name]
run_shell_script(ascii_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
def test_ascii_report_real_netmhc_predictor():
with NamedTemporaryFile(mode="r") as f:
ascii_args = cli_args_for_b16_seqdata_real_predictor + [
"--output-ascii-report", f.name]
run_shell_script(ascii_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
no_variants_text = 'No variants'
assert no_variants_text not in contents
def test_json_report():
with NamedTemporaryFile(mode="r") as f:
json_args = cli_args_for_b16_seqdata + ["--output-json-file", f.name]
run_shell_script(json_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
def test_csv_report():
with NamedTemporaryFile(mode="r") as f:
csv_args = cli_args_for_b16_seqdata + ["--output-csv", f.name]
run_shell_script(csv_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
def test_all_variant_csv_report():
with NamedTemporaryFile(mode="r") as f:
all_csv_args = cli_args_for_b16_seqdata + [
"--output-passing-variants-csv", f.name, "--output-csv", f.name + "ignored"]
run_shell_script(all_csv_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
# make sure it can be a valid dataframe
f.seek(0)
df = pd.read_csv(f)
assert len(df) > 0
def test_xlsx_report():
with NamedTemporaryFile(mode="r") as f:
xlsx_args = cli_args_for_b16_seqdata + ["--output-xlsx-report", f.name]
run_shell_script(xlsx_args)
book = open_workbook(f.name)
assert book.nsheets > 0
def test_html_report():
with NamedTemporaryFile(mode="r") as f:
html_args = cli_args_for_b16_seqdata + ["--output-html", f.name]
run_shell_script(html_args)
contents = f.read()
lines = contents.split("\n")
assert len(lines) > 0
@attr('skip') # want the ability to skip this test on some machines
def test_pdf_report():
with NamedTemporaryFile(mode="rb") as f:
pdf_args = cli_args_for_b16_seqdata + ["--output-pdf-report", f.name]
run_shell_script(pdf_args)
assert getsize(f.name) > 0
@patch('vaxrank.core_logic.VaxrankCoreLogic.vaccine_peptides_for_variant')
def test_report_no_peptides(mock_vaccine_peptides_for_variant):
# simulate case where we have no epitopes for any variant
mock_vaccine_peptides_for_variant.return_value = []
with NamedTemporaryFile(mode="r") as f:
html_args = cli_args_for_b16_seqdata + ["--output-csv", f.name]
# test that this doesn't crash and that the CSV output is empty
run_shell_script(html_args)
contents = f.read()
assert contents == ''
if __name__ == "__main__":
test_csv_report()
test_html_report()
| 32.140625
| 88
| 0.654351
|
acfde407712bbffb4844301748be7abe6897ea92
| 16,785
|
py
|
Python
|
astropy/modeling/tests/test_core.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | 4
|
2021-03-25T15:49:56.000Z
|
2021-12-15T09:10:04.000Z
|
astropy/modeling/tests/test_core.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | 20
|
2021-05-03T18:02:23.000Z
|
2022-03-12T12:01:04.000Z
|
astropy/modeling/tests/test_core.py
|
mehrdad-shokri/astropy
|
abd73b51277694338c8eca7639da956dcd06f207
|
[
"BSD-3-Clause"
] | 3
|
2021-03-28T16:13:00.000Z
|
2021-07-16T10:27:25.000Z
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# pylint: disable=invalid-name
import os
import sys
import subprocess
import pytest
import numpy as np
from inspect import signature
from numpy.testing import assert_allclose
import astropy
from astropy.modeling.core import Model, custom_model
from astropy.modeling.parameters import Parameter
from astropy.modeling import models
import astropy.units as u
from astropy.tests.helper import assert_quantity_allclose
try:
import scipy # pylint: disable=W0611 # noqa
except ImportError:
HAS_SCIPY = False
else:
HAS_SCIPY = True
class NonFittableModel(Model):
"""An example class directly subclassing Model for testing."""
a = Parameter()
def __init__(self, a, model_set_axis=None):
super().__init__(a, model_set_axis=model_set_axis)
@staticmethod
def evaluate():
pass
def test_Model_instance_repr_and_str():
m = NonFittableModel(42.5)
assert repr(m) == "<NonFittableModel(a=42.5)>"
assert (str(m) ==
"Model: NonFittableModel\n"
"Inputs: ()\n"
"Outputs: ()\n"
"Model set size: 1\n"
"Parameters:\n"
" a \n"
" ----\n"
" 42.5")
assert len(m) == 1
def test_Model_array_parameter():
model = models.Gaussian1D(4, 2, 1)
assert_allclose(model.param_sets, [[4], [2], [1]])
def test_inputless_model():
"""
Regression test for
https://github.com/astropy/astropy/pull/3772#issuecomment-101821641
"""
class TestModel(Model):
n_outputs = 1
a = Parameter()
@staticmethod
def evaluate(a):
return a
m = TestModel(1)
assert m.a == 1
assert m() == 1
# Test array-like output
m = TestModel([1, 2, 3], model_set_axis=False)
assert len(m) == 1
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[1, 2, 3], model_set_axis=0)
assert len(m) == 3
assert np.all(m() == [1, 2, 3])
# Test a model set
m = TestModel(a=[[1, 2, 3], [4, 5, 6]], model_set_axis=0)
assert len(m) == 2
assert np.all(m() == [[1, 2, 3], [4, 5, 6]])
def test_ParametericModel():
with pytest.raises(TypeError):
models.Gaussian1D(1, 2, 3, wrong=4)
def test_custom_model_signature():
"""
Tests that the signatures for the __init__ and __call__
methods of custom models are useful.
"""
@custom_model
def model_a(x):
return x
assert model_a.param_names == ()
assert model_a.n_inputs == 1
sig = signature(model_a.__init__)
assert list(sig.parameters.keys()) == ['self', 'args', 'meta', 'name', 'kwargs']
sig = signature(model_a.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_b(x, a=1, b=2):
return x + a + b
assert model_b.param_names == ('a', 'b')
assert model_b.n_inputs == 1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
@custom_model
def model_c(x, y, a=1, b=2):
return x + y + a + b
assert model_c.param_names == ('a', 'b')
assert model_c.n_inputs == 2
sig = signature(model_c.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'b', 'kwargs']
assert [x.default for x in sig.parameters.values()] == [sig.empty, 1, 2, sig.empty]
sig = signature(model_c.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_subclass():
"""Test that custom models can be subclassed."""
@custom_model
def model_a(x, a=1):
return x * a
class model_b(model_a):
# Override the evaluate from model_a
@classmethod
def evaluate(cls, x, a):
return -super().evaluate(x, a)
b = model_b()
assert b.param_names == ('a',)
assert b.a == 1
assert b(1) == -1
sig = signature(model_b.__init__)
assert list(sig.parameters.keys()) == ['self', 'a', 'kwargs']
sig = signature(model_b.__call__)
assert list(sig.parameters.keys()) == ['self', 'inputs', 'model_set_axis',
'with_bounding_box', 'fill_value',
'equivalencies', 'inputs_map', 'new_inputs']
def test_custom_model_parametrized_decorator():
"""Tests using custom_model as a decorator with parameters."""
def cosine(x, amplitude=1):
return [amplitude * np.cos(x)]
@custom_model(fit_deriv=cosine)
def sine(x, amplitude=1):
return amplitude * np.sin(x)
assert issubclass(sine, Model)
s = sine(2)
assert_allclose(s(np.pi / 2), 2)
assert_allclose(s.fit_deriv(0, 2), 2)
def test_custom_inverse():
"""Test setting a custom inverse on a model."""
p = models.Polynomial1D(1, c0=-2, c1=3)
# A trivial inverse for a trivial polynomial
inv = models.Polynomial1D(1, c0=(2./3.), c1=(1./3.))
with pytest.raises(NotImplementedError):
p.inverse
p.inverse = inv
x = np.arange(100)
assert_allclose(x, p(p.inverse(x)))
assert_allclose(x, p.inverse(p(x)))
p.inverse = None
with pytest.raises(NotImplementedError):
p.inverse
def test_custom_inverse_reset():
"""Test resetting a custom inverse to the model's default inverse."""
class TestModel(Model):
n_inputs = 0
outputs = ('y',)
@property
def inverse(self):
return models.Shift()
@staticmethod
def evaluate():
return 0
# The above test model has no meaning, nor does its inverse--this just
# tests that setting an inverse and resetting to the default inverse works
m = TestModel()
assert isinstance(m.inverse, models.Shift)
m.inverse = models.Scale()
assert isinstance(m.inverse, models.Scale)
del m.inverse
assert isinstance(m.inverse, models.Shift)
def test_render_model_2d():
imshape = (71, 141)
image = np.zeros(imshape)
coords = y, x = np.indices(imshape)
model = models.Gaussian2D(x_stddev=6.1, y_stddev=3.9, theta=np.pi / 3)
# test points for edges
ye, xe = [0, 35, 70], [0, 70, 140]
# test points for floating point positions
yf, xf = [35.1, 35.5, 35.9], [70.1, 70.5, 70.9]
test_pts = [(a, b) for a in xe for b in ye]
test_pts += [(a, b) for a in xf for b in yf]
for x0, y0 in test_pts:
model.x_mean = x0
model.y_mean = y0
expected = model(x, y)
for xy in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (xy is None):
# this case is tested in Fittable2DModelTester
continue
actual = model.render(out=im, coords=xy)
if im is None:
assert_allclose(actual, model.render(coords=xy))
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, y0) == (70, 35):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
# test an error is raised when the bounding box is larger than the input array
try:
actual = model.render(out=np.zeros((1, 1)))
except ValueError:
pass
def test_render_model_1d():
npix = 101
image = np.zeros(npix)
coords = np.arange(npix)
model = models.Gaussian1D()
# test points
test_pts = [0, 49.1, 49.5, 49.9, 100]
# test widths
test_stdv = np.arange(5.5, 6.7, .2)
for x0, stdv in [(p, s) for p in test_pts for s in test_stdv]:
model.mean = x0
model.stddev = stdv
expected = model(coords)
for x in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (x is None):
# this case is tested in Fittable1DModelTester
continue
actual = model.render(out=im, coords=x)
# assert images match
assert_allclose(expected, actual, atol=3e-7)
# assert model fully captured
if (x0, stdv) == (49.5, 5.5):
boxed = model.render()
flux = np.sum(expected)
assert ((flux - np.sum(boxed)) / flux) < 1e-7
@pytest.mark.filterwarnings('ignore:invalid value encountered in less')
def test_render_model_3d():
imshape = (17, 21, 27)
image = np.zeros(imshape)
coords = np.indices(imshape)
def ellipsoid(x, y, z, x0=13., y0=10., z0=8., a=4., b=3., c=2., amp=1.):
rsq = ((x - x0) / a) ** 2 + ((y - y0) / b) ** 2 + ((z - z0) / c) ** 2
val = (rsq < 1) * amp
return val
class Ellipsoid3D(custom_model(ellipsoid)):
@property
def bounding_box(self):
return ((self.z0 - self.c, self.z0 + self.c),
(self.y0 - self.b, self.y0 + self.b),
(self.x0 - self.a, self.x0 + self.a))
model = Ellipsoid3D()
# test points for edges
ze, ye, xe = [0, 8, 16], [0, 10, 20], [0, 13, 26]
# test points for floating point positions
zf, yf, xf = [8.1, 8.5, 8.9], [10.1, 10.5, 10.9], [13.1, 13.5, 13.9]
test_pts = [(x, y, z) for x in xe for y in ye for z in ze]
test_pts += [(x, y, z) for x in xf for y in yf for z in zf]
for x0, y0, z0 in test_pts:
model.x0 = x0
model.y0 = y0
model.z0 = z0
expected = model(*coords[::-1])
for c in [coords, None]:
for im in [image.copy(), None]:
if (im is None) & (c is None):
continue
actual = model.render(out=im, coords=c)
boxed = model.render()
# assert images match
assert_allclose(expected, actual)
# assert model fully captured
if (z0, y0, x0) == (8, 10, 13):
boxed = model.render()
assert (np.sum(expected) - np.sum(boxed)) == 0
def test_render_model_out_dtype():
"""Test different out.dtype for model.render."""
for model in [models.Gaussian2D(), models.Gaussian2D() + models.Planar2D()]:
for dtype in [np.float64, np.float32, np.complex64]:
im = np.zeros((40, 40), dtype=dtype)
imout = model.render(out=im)
assert imout is im
assert imout.sum() != 0
with pytest.raises(TypeError):
im = np.zeros((40, 40), dtype=np.int32)
imout = model.render(out=im)
def test_custom_bounding_box_1d():
"""
Tests that the bounding_box setter works.
"""
# 1D models
g1 = models.Gaussian1D()
bb = g1.bounding_box
expected = g1.render()
# assign the same bounding_box, now through the bounding_box setter
g1.bounding_box = bb
assert_allclose(g1.render(), expected)
# 2D models
g2 = models.Gaussian2D()
bb = g2.bounding_box
expected = g2.render()
# assign the same bounding_box, now through the bounding_box setter
g2.bounding_box = bb
assert_allclose(g2.render(), expected)
def test_n_submodels_in_single_models():
assert models.Gaussian1D().n_submodels == 1
assert models.Gaussian2D().n_submodels == 1
def test_compound_deepcopy():
model = (models.Gaussian1D(10, 2, 3) | models.Shift(2)) & models.Rotation2D(21.3)
new_model = model.deepcopy()
assert id(model) != id(new_model)
assert id(model._leaflist) != id(new_model._leaflist)
assert id(model[0]) != id(new_model[0])
assert id(model[1]) != id(new_model[1])
assert id(model[2]) != id(new_model[2])
@pytest.mark.skipif('not HAS_SCIPY')
def test_units_with_bounding_box():
points = np.arange(10, 20)
table = np.arange(10) * u.Angstrom
t = models.Tabular1D(points, lookup_table=table)
assert isinstance(t(10), u.Quantity)
assert isinstance(t(10, with_bounding_box=True), u.Quantity)
assert_quantity_allclose(t(10), t(10, with_bounding_box=True))
RENAMED_MODEL = models.Gaussian1D.rename('CustomGaussian')
MODEL_RENAME_CODE = """
from astropy.modeling.models import Gaussian1D
print(repr(Gaussian1D))
print(repr(Gaussian1D.rename('CustomGaussian')))
""".strip()
MODEL_RENAME_EXPECTED = b"""
<class 'astropy.modeling.functional_models.Gaussian1D'>
Name: Gaussian1D
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
<class '__main__.CustomGaussian'>
Name: CustomGaussian (Gaussian1D)
N_inputs: 1
N_outputs: 1
Fittable parameters: ('amplitude', 'mean', 'stddev')
""".strip()
def test_rename_path(tmpdir):
# Regression test for a bug that caused the path to the class to be
# incorrect in a renamed model's __repr__.
assert repr(RENAMED_MODEL).splitlines()[0] == "<class 'astropy.modeling.tests.test_core.CustomGaussian'>"
# Make sure that when called from a user script, the class name includes
# __main__.
env = os.environ.copy()
paths = [os.path.dirname(astropy.__path__[0])] + sys.path
env['PYTHONPATH'] = os.pathsep.join(paths)
script = tmpdir.join('rename.py').strpath
with open(script, 'w') as f:
f.write(MODEL_RENAME_CODE)
output = subprocess.check_output([sys.executable, script], env=env)
assert output.splitlines() == MODEL_RENAME_EXPECTED.splitlines()
@pytest.mark.parametrize('model_class',
[models.Gaussian1D, models.Polynomial1D,
models.Shift, models.Tabular1D])
def test_rename_1d(model_class):
new_model = model_class.rename(name='Test1D')
assert new_model.name == 'Test1D'
@pytest.mark.parametrize('model_class',
[models.Gaussian2D, models.Polynomial2D, models.Tabular2D])
def test_rename_2d(model_class):
new_model = model_class.rename(name='Test2D')
assert new_model.name == 'Test2D'
def test_rename_inputs_outputs():
g2 = models.Gaussian2D(10, 2, 3, 1, 2)
assert g2.inputs == ("x", "y")
assert g2.outputs == ("z",)
with pytest.raises(ValueError):
g2.inputs = ("w", )
with pytest.raises(ValueError):
g2.outputs = ("w", "e")
def test_coerce_units():
model = models.Polynomial1D(1, c0=1, c1=2)
with pytest.raises(u.UnitsError):
model(u.Quantity(10, u.m))
with_input_units = model.coerce_units({"x": u.m})
result = with_input_units(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_input_units_tuple = model.coerce_units((u.m,))
result = with_input_units_tuple(u.Quantity(10, u.m))
assert np.isclose(result, 21.0)
with_return_units = model.coerce_units(return_units={"y": u.s})
result = with_return_units(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_return_units_tuple = model.coerce_units(return_units=(u.s,))
result = with_return_units_tuple(10)
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with_both = model.coerce_units({"x": u.m}, {"y": u.s})
result = with_both(u.Quantity(10, u.m))
assert np.isclose(result.value, 21.0)
assert result.unit == u.s
with pytest.raises(ValueError, match=r"input_units keys.*do not match model inputs"):
model.coerce_units({"q": u.m})
with pytest.raises(ValueError, match=r"input_units length does not match n_inputs"):
model.coerce_units((u.m, u.s))
model_with_existing_input_units = models.BlackBody()
with pytest.raises(ValueError, match=r"Cannot specify input_units for model with existing input units"):
model_with_existing_input_units.coerce_units({"x": u.m})
with pytest.raises(ValueError, match=r"return_units keys.*do not match model outputs"):
model.coerce_units(return_units={"q": u.m})
with pytest.raises(ValueError, match=r"return_units length does not match n_outputs"):
model.coerce_units(return_units=(u.m, u.s))
| 31.025878
| 109
| 0.601966
|
acfde44cc5bc119f04426ab7364a49efd6811caa
| 3,237
|
py
|
Python
|
profiles_project/settings.py
|
AngusData/profiles-rest-api
|
41eda7366824c5be4b99f0186902fc38c090cd7c
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
AngusData/profiles-rest-api
|
41eda7366824c5be4b99f0186902fc38c090cd7c
|
[
"MIT"
] | null | null | null |
profiles_project/settings.py
|
AngusData/profiles-rest-api
|
41eda7366824c5be4b99f0186902fc38c090cd7c
|
[
"MIT"
] | null | null | null |
"""
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'r!__%cdchs(meg!l^ob677vuf-v(k6shn3#zctxf70xfwu$jkz'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.488189
| 91
| 0.700649
|
acfde48d7edfcf775a0170688977782e1d61b5da
| 460
|
py
|
Python
|
env/Lib/site-packages/plotly/validators/barpolar/marker/colorbar/_tickvalssrc.py
|
andresgreen-byte/Laboratorio-1--Inversion-de-Capital
|
8a4707301d19c3826c31026c4077930bcd6a8182
|
[
"MIT"
] | 11,750
|
2015-10-12T07:03:39.000Z
|
2022-03-31T20:43:15.000Z
|
venv/Lib/site-packages/plotly/validators/barpolar/marker/colorbar/_tickvalssrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,951
|
2015-10-12T00:41:25.000Z
|
2022-03-31T22:19:26.000Z
|
venv/Lib/site-packages/plotly/validators/barpolar/marker/colorbar/_tickvalssrc.py
|
wakisalvador/constructed-misdirection
|
74779e9ec640a11bc08d5d1967c85ac4fa44ea5e
|
[
"Unlicense"
] | 2,623
|
2015-10-15T14:40:27.000Z
|
2022-03-28T16:05:50.000Z
|
import _plotly_utils.basevalidators
class TickvalssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="tickvalssrc",
parent_name="barpolar.marker.colorbar",
**kwargs
):
super(TickvalssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs
)
| 27.058824
| 70
| 0.632609
|
acfde4dbad16b80f8f955eb5733f70c211b77e52
| 3,010
|
py
|
Python
|
examples/pybullet/examples/userData.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 9,136
|
2015-01-02T00:41:45.000Z
|
2022-03-31T15:30:02.000Z
|
examples/pybullet/examples/userData.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,424
|
2015-01-05T08:55:58.000Z
|
2022-03-30T19:34:55.000Z
|
examples/pybullet/examples/userData.py
|
felipeek/bullet3
|
6a59241074720e9df119f2f86bc01765917feb1e
|
[
"Zlib"
] | 2,921
|
2015-01-02T10:19:30.000Z
|
2022-03-31T02:48:42.000Z
|
import pybullet as pb
import time
from pybullet_utils import bullet_client
server = bullet_client.BulletClient(connection_mode=pb.SHARED_MEMORY_SERVER)
print("Connecting to bullet server")
CONNECTION_METHOD = pb.SHARED_MEMORY
client = bullet_client.BulletClient(connection_mode=CONNECTION_METHOD)
PLANE_PATH = "plane.urdf"
client.loadURDF(PLANE_PATH)
client.setGravity(0, 0, -10)
print("Adding plane object")
plane_id = client.loadURDF(PLANE_PATH)
print("Plane ID: %s" % plane_id)
print("Adding user data to plane")
MyKey1 = client.addUserData(plane_id, "MyKey1", "MyValue1")
MyKey2 = client.addUserData(plane_id, "MyKey2", "MyValue2")
MyKey3 = client.addUserData(plane_id, "MyKey3", "MyValue3")
MyKey4 = client.addUserData(plane_id, "MyKey4", "MyValue4")
print("Retrieving cached user data")
print(client.getUserData(MyKey1))
print(client.getUserData(MyKey2))
print(client.getUserData(MyKey3))
print(client.getUserData(MyKey4))
print("Disconnecting")
del client
print("Reconnecting")
client = bullet_client.BulletClient(connection_mode=CONNECTION_METHOD)
print("Retrieving synced user data")
print(client.getUserData(MyKey1))
print(client.getUserData(MyKey2))
print(client.getUserData(MyKey3))
print(client.getUserData(MyKey4))
print("Number of user data entries: %s" % client.getNumUserData(plane_id))
print("Overriding user data")
client.addUserData(plane_id, "MyKey1", "MyNewValue")
print("Cached overridden data")
print(client.getUserData(MyKey1))
print("Disconnecting")
del client
print("Reconnecting")
client = bullet_client.BulletClient(connection_mode=CONNECTION_METHOD)
print("Synced overridden data")
print(client.getUserData(MyKey1))
print("Getting user data ID")
print("Retrieved ID: %s, ID retrieved from addUserData: %s" %
(client.getUserDataId(plane_id, "MyKey2"), MyKey2))
print("Removing user data")
client.removeUserData(MyKey2)
print("Retrieving cached removed data")
print(client.getUserData(MyKey2))
print("Syncing")
client.syncUserData()
print("Retrieving removed removed data")
print(client.getUserData(MyKey2))
print("Iterating over all user data entries and printing results")
for i in range(client.getNumUserData(plane_id)):
userDataId, key, bodyId, linkIndex, visualShapeIndex = client.getUserDataInfo(plane_id, i)
print("Info: (%s, %s, %s, %s, %s)" % (userDataId, key, bodyId, linkIndex, visualShapeIndex))
print("Value: %s" % client.getUserData(userDataId))
print("Removing body")
client.removeBody(plane_id)
print("Retrieving user data")
print(client.getUserData(MyKey1))
print(client.getUserData(MyKey3))
print(client.getUserData(MyKey4))
print("Syncing")
client.syncUserData()
print("Retrieving user data")
print(client.getUserData(MyKey1))
print(client.getUserData(MyKey3))
print(client.getUserData(MyKey4))
plane_id2 = client.loadURDF(PLANE_PATH)
print("Plane1: %s, plane2: %s" % (plane_id, plane_id2))
print("Retrieving user data")
print(client.getUserData(MyKey1))
print(client.getUserData(MyKey3))
print(client.getUserData(MyKey4))
| 28.396226
| 94
| 0.781063
|
acfde72122b2d6224a834cb20ab88b5b97809ef6
| 3,905
|
py
|
Python
|
2.0/main.py
|
Felipe2102/Assistente-Escolar-setup-model
|
6181232b52f412581461ee1089fb068b85d7d28e
|
[
"Apache-2.0"
] | null | null | null |
2.0/main.py
|
Felipe2102/Assistente-Escolar-setup-model
|
6181232b52f412581461ee1089fb068b85d7d28e
|
[
"Apache-2.0"
] | null | null | null |
2.0/main.py
|
Felipe2102/Assistente-Escolar-setup-model
|
6181232b52f412581461ee1089fb068b85d7d28e
|
[
"Apache-2.0"
] | null | null | null |
from datetime import datetime
import PySimpleGUI as sg
import os
Dia = datetime.now().weekday()
def setup():
#Define o tema
sg.theme('DarkGrey13')
#Cria as pastas
os.mkdir('./Data')
os.mkdir('./Data/Lessons')
#Cria os arquivos
ASEG = open('Data/Lessons/Aulas_seg.txt','w')
ATER = open('Data/Lessons/Aulas_ter.txt','w')
AQUA = open('Data/Lessons/Aulas_qua.txt','w')
AQUI = open('Data/Lessons/Aulas_qui.txt','w')
ASEX = open('Data/Lessons/Aulas_sex.txt','w')
#Fecha os arquivos criados
ASEG.close()
ATER.close()
AQUA.close()
AQUI.close()
ASEX.close()
#Reabre os arquivos como leitura e escrita
ASEG = open('Data/Lessons/Aulas_seg.txt','r+')
ATER = open('Data/Lessons/Aulas_ter.txt','r+')
AQUA = open('Data/Lessons/Aulas_qua.txt','r+')
AQUI = open('Data/Lessons/Aulas_qui.txt','r+')
ASEX = open('Data/Lessons/Aulas_sex.txt','r+')
#Pega o input do usuário e salva as alterações
sg.popup("parece que você não tem nenhuma aula configurada ;-;")
ASEG.write(sg.PopupGetText('Quais aulas você tem na segunda?'))
ATER.write(sg.PopupGetText('Quais aulas você tem na terça?'))
AQUA.write(sg.PopupGetText('Quais aulas você tem na quarta?'))
AQUI.write(sg.PopupGetText('Quais aulas você tem na quinta?'))
ASEX.write(sg.PopupGetText('Quais aulas você tem na sexta?'))
sg.popup('configuração finalizada!')
#Fecha os arquivos
ASEG.close()
ATER.close()
AQUA.close()
AQUI.close()
ASEX.close()
def Redefinir():
#Define o tema
sg.theme('DarkGrey13')
#Remove os arquivos
os.remove('./Data/Lessons/Aulas_seg.txt')
os.remove('./Data/Lessons/Aulas_ter.txt')
os.remove('./Data/Lessons/Aulas_qua.txt')
os.remove('./Data/Lessons/Aulas_qui.txt')
os.remove('./Data/Lessons/Aulas_sex.txt')
#Cria os arquivos
ASEG = open('Data/Lessons/Aulas_seg.txt','w')
ATER = open('Data/Lessons/Aulas_ter.txt','w')
AQUA = open('Data/Lessons/Aulas_qua.txt','w')
AQUI = open('Data/Lessons/Aulas_qui.txt','w')
ASEX = open('Data/Lessons/Aulas_sex.txt','w')
#Fecha os arquivos criados
ASEG.close()
ATER.close()
AQUA.close()
AQUI.close()
ASEX.close()
#Reabre os arquivos como leitura e escrita
ASEG = open('Data/Lessons/Aulas_seg.txt','r+')
ATER = open('Data/Lessons/Aulas_ter.txt','r+')
AQUA = open('Data/Lessons/Aulas_qua.txt','r+')
AQUI = open('Data/Lessons/Aulas_qui.txt','r+')
ASEX = open('Data/Lessons/Aulas_sex.txt','r+')
#Pega as informações das aulas do úsuario e escreve as alterações nos arquivos
ASEG.write(sg.PopupGetText('Quais aulas você tem na segunda?'))
ATER.write(sg.PopupGetText('Quais aulas você tem na terça?'))
AQUA.write(sg.PopupGetText('Quais aulas você tem na quarta?'))
AQUI.write(sg.PopupGetText('Quais aulas você tem na quinta?'))
ASEX.write(sg.PopupGetText('Quais aulas você tem na sexta?'))
#Informa ao úsuario que é presciso reiniciar o programa para concluir as configurações
sg.popup('configuração finalizada, reinicie o programa para aplicar as novas configurações.')
#Fecha os arquivos novamente
ASEG.close()
ATER.close()
AQUA.close()
AQUI.close()
ASEX.close()
#Fecha a janela
window.close()
try:
ASEG = open('Data/Lessons/Aulas_seg.txt','r').read()
ATER = open('Data/Lessons/Aulas_ter.txt','r').read()
AQUA = open('Data/Lessons/Aulas_qua.txt','r').read()
AQUI = open('Data/Lessons/Aulas_qui.txt','r').read()
ASEX = open('Data/Lessons/Aulas_sex.txt','r').read()
except FileNotFoundError:
setup()
if Dia == 0:
Aulas = ASEG
elif Dia == 1:
Aulas = ATER
elif Dia == 2:
Aulas = AQUA
elif Dia == 3:
Aulas = AQUI
elif Dia == 4:
Aulas = ASEX
sg.theme('DarkGrey13')
layout = [
[sg.Text('Suas aulas são:')],
[sg.Text(Aulas)],
[sg.Button('Redefinir'), sg.Exit()]
]
window = sg.Window('Teste', layout, size=(150,100), element_justification='center', finalize=True)
while True:
event, values = window.read()
if event == 'Redefinir':
window.hide()
Redefinir()
if event == sg.WIN_CLOSED:
break
if event == 'Exit':
break
| 30.271318
| 98
| 0.702433
|
acfde886521b26470efdf1680611c904f6027b07
| 20,470
|
py
|
Python
|
vspk/v4_0/nuredundantport.py
|
mohaimenhasan/vspk-python
|
4c7b297427048340b250cc3c74d9214dc0d4bde1
|
[
"BSD-3-Clause"
] | null | null | null |
vspk/v4_0/nuredundantport.py
|
mohaimenhasan/vspk-python
|
4c7b297427048340b250cc3c74d9214dc0d4bde1
|
[
"BSD-3-Clause"
] | null | null | null |
vspk/v4_0/nuredundantport.py
|
mohaimenhasan/vspk-python
|
4c7b297427048340b250cc3c74d9214dc0d4bde1
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUVLANsFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUNSPortsFetcher
from bambou import NURESTObject
class NURedundantPort(NURESTObject):
""" Represents a RedundantPort in the VSD
Notes:
Represents a Port under a particular gateway object or redundant group object.
"""
__rest_name__ = "nsredundantport"
__resource_name__ = "nsredundantports"
## Constants
CONST_PORT_TYPE_NETWORK = "NETWORK"
CONST_PERMITTED_ACTION_USE = "USE"
CONST_SPEED_BASETX100 = "BASETX100"
CONST_PERMITTED_ACTION_READ = "READ"
CONST_STATUS_INITIALIZED = "INITIALIZED"
CONST_PERMITTED_ACTION_ALL = "ALL"
CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY"
CONST_PERMITTED_ACTION_EXTEND = "EXTEND"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE"
CONST_SPEED_BASET1000 = "BASET1000"
CONST_SPEED_BASE10 = "BASE10"
CONST_STATUS_MISMATCH = "MISMATCH"
CONST_STATUS_READY = "READY"
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_PORT_TYPE_ACCESS = "ACCESS"
CONST_STATUS_ORPHAN = "ORPHAN"
CONST_SPEED_AUTONEGOTIATE = "AUTONEGOTIATE"
CONST_SPEED_BASEX10G = "BASEX10G"
def __init__(self, **kwargs):
""" Initializes a RedundantPort instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> redundantport = NURedundantPort(id=u'xxxx-xxx-xxx-xxx', name=u'RedundantPort')
>>> redundantport = NURedundantPort(data=my_dict)
"""
super(NURedundantPort, self).__init__()
# Read/Write Attributes
self._vlan_range = None
self._mtu = None
self._name = None
self._last_updated_by = None
self._permitted_action = None
self._description = None
self._physical_name = None
self._infrastructure_profile_id = None
self._entity_scope = None
self._port_peer1_id = None
self._port_peer2_id = None
self._port_type = None
self._speed = None
self._use_untagged_heartbeat_vlan = None
self._use_user_mnemonic = None
self._user_mnemonic = None
self._associated_egress_qos_policy_id = None
self._status = None
self._external_id = None
self.expose_attribute(local_name="vlan_range", remote_name="VLANRange", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="mtu", remote_name="MTU", attribute_type=int, is_required=False, is_unique=False)
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE'])
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="physical_name", remote_name="physicalName", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="infrastructure_profile_id", remote_name="infrastructureProfileID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="port_peer1_id", remote_name="portPeer1ID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="port_peer2_id", remote_name="portPeer2ID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="port_type", remote_name="portType", attribute_type=str, is_required=True, is_unique=False, choices=[u'ACCESS', u'NETWORK'])
self.expose_attribute(local_name="speed", remote_name="speed", attribute_type=str, is_required=False, is_unique=False, choices=[u'AUTONEGOTIATE', u'BASE10', u'BASET1000', u'BASETX100', u'BASEX10G'])
self.expose_attribute(local_name="use_untagged_heartbeat_vlan", remote_name="useUntaggedHeartbeatVlan", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="use_user_mnemonic", remote_name="useUserMnemonic", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="user_mnemonic", remote_name="userMnemonic", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="associated_egress_qos_policy_id", remote_name="associatedEgressQOSPolicyID", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="status", remote_name="status", attribute_type=str, is_required=False, is_unique=False, choices=[u'INITIALIZED', u'MISMATCH', u'ORPHAN', u'READY'])
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.vlans = NUVLANsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.ns_ports = NUNSPortsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def vlan_range(self):
""" Get vlan_range value.
Notes:
VLAN Range of the Port. Format must conform to a-b,c,d-f where a,b,c,d,f are integers between 0 and 4095.
This attribute is named `VLANRange` in VSD API.
"""
return self._vlan_range
@vlan_range.setter
def vlan_range(self, value):
""" Set vlan_range value.
Notes:
VLAN Range of the Port. Format must conform to a-b,c,d-f where a,b,c,d,f are integers between 0 and 4095.
This attribute is named `VLANRange` in VSD API.
"""
self._vlan_range = value
@property
def mtu(self):
""" Get mtu value.
Notes:
Port MTU (Maximum Transmission Unit) : The size in octets of the largest protocol data unit (PDU) that the layer can pass on. The default value is normally 1500 octets for Ethernet v2 and can go up to 9198 for Jumbo Frames.
This attribute is named `MTU` in VSD API.
"""
return self._mtu
@mtu.setter
def mtu(self, value):
""" Set mtu value.
Notes:
Port MTU (Maximum Transmission Unit) : The size in octets of the largest protocol data unit (PDU) that the layer can pass on. The default value is normally 1500 octets for Ethernet v2 and can go up to 9198 for Jumbo Frames.
This attribute is named `MTU` in VSD API.
"""
self._mtu = value
@property
def name(self):
""" Get name value.
Notes:
Name of the Port
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
Name of the Port
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def permitted_action(self):
""" Get permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
return self._permitted_action
@permitted_action.setter
def permitted_action(self, value):
""" Set permitted_action value.
Notes:
The permitted action to USE/EXTEND this Gateway.
This attribute is named `permittedAction` in VSD API.
"""
self._permitted_action = value
@property
def description(self):
""" Get description value.
Notes:
A description of the Port
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the Port
"""
self._description = value
@property
def physical_name(self):
""" Get physical_name value.
Notes:
Identifier of the Port
This attribute is named `physicalName` in VSD API.
"""
return self._physical_name
@physical_name.setter
def physical_name(self, value):
""" Set physical_name value.
Notes:
Identifier of the Port
This attribute is named `physicalName` in VSD API.
"""
self._physical_name = value
@property
def infrastructure_profile_id(self):
""" Get infrastructure_profile_id value.
Notes:
The ID of the infrastructure profile this instance is associated with.
This attribute is named `infrastructureProfileID` in VSD API.
"""
return self._infrastructure_profile_id
@infrastructure_profile_id.setter
def infrastructure_profile_id(self, value):
""" Set infrastructure_profile_id value.
Notes:
The ID of the infrastructure profile this instance is associated with.
This attribute is named `infrastructureProfileID` in VSD API.
"""
self._infrastructure_profile_id = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def port_peer1_id(self):
""" Get port_peer1_id value.
Notes:
The master gateway peer port id.
This attribute is named `portPeer1ID` in VSD API.
"""
return self._port_peer1_id
@port_peer1_id.setter
def port_peer1_id(self, value):
""" Set port_peer1_id value.
Notes:
The master gateway peer port id.
This attribute is named `portPeer1ID` in VSD API.
"""
self._port_peer1_id = value
@property
def port_peer2_id(self):
""" Get port_peer2_id value.
Notes:
The slave gateway peer port id.
This attribute is named `portPeer2ID` in VSD API.
"""
return self._port_peer2_id
@port_peer2_id.setter
def port_peer2_id(self, value):
""" Set port_peer2_id value.
Notes:
The slave gateway peer port id.
This attribute is named `portPeer2ID` in VSD API.
"""
self._port_peer2_id = value
@property
def port_type(self):
""" Get port_type value.
Notes:
Type of the Port.
This attribute is named `portType` in VSD API.
"""
return self._port_type
@port_type.setter
def port_type(self, value):
""" Set port_type value.
Notes:
Type of the Port.
This attribute is named `portType` in VSD API.
"""
self._port_type = value
@property
def speed(self):
""" Get speed value.
Notes:
Port Speed in Mb/s : Supported Ethernet speeds are 10 (10Base-T), 100 (Fast-ethernet 100Base-TX), 1000 (Gigabit Ethernet 1000Base-T), 10 000 (10 Gigabit Ethernet 10GBase-X), and Auto-Negotiate.
"""
return self._speed
@speed.setter
def speed(self, value):
""" Set speed value.
Notes:
Port Speed in Mb/s : Supported Ethernet speeds are 10 (10Base-T), 100 (Fast-ethernet 100Base-TX), 1000 (Gigabit Ethernet 1000Base-T), 10 000 (10 Gigabit Ethernet 10GBase-X), and Auto-Negotiate.
"""
self._speed = value
@property
def use_untagged_heartbeat_vlan(self):
""" Get use_untagged_heartbeat_vlan value.
Notes:
A flag to indicate if for this redundant port an untagged heartbeat VLAN is to be used. If this is not set then will use the heartbeat VLAN set by the NS redundant group
This attribute is named `useUntaggedHeartbeatVlan` in VSD API.
"""
return self._use_untagged_heartbeat_vlan
@use_untagged_heartbeat_vlan.setter
def use_untagged_heartbeat_vlan(self, value):
""" Set use_untagged_heartbeat_vlan value.
Notes:
A flag to indicate if for this redundant port an untagged heartbeat VLAN is to be used. If this is not set then will use the heartbeat VLAN set by the NS redundant group
This attribute is named `useUntaggedHeartbeatVlan` in VSD API.
"""
self._use_untagged_heartbeat_vlan = value
@property
def use_user_mnemonic(self):
""" Get use_user_mnemonic value.
Notes:
determines whether to use user mnemonic of the Port
This attribute is named `useUserMnemonic` in VSD API.
"""
return self._use_user_mnemonic
@use_user_mnemonic.setter
def use_user_mnemonic(self, value):
""" Set use_user_mnemonic value.
Notes:
determines whether to use user mnemonic of the Port
This attribute is named `useUserMnemonic` in VSD API.
"""
self._use_user_mnemonic = value
@property
def user_mnemonic(self):
""" Get user_mnemonic value.
Notes:
user mnemonic of the Port
This attribute is named `userMnemonic` in VSD API.
"""
return self._user_mnemonic
@user_mnemonic.setter
def user_mnemonic(self, value):
""" Set user_mnemonic value.
Notes:
user mnemonic of the Port
This attribute is named `userMnemonic` in VSD API.
"""
self._user_mnemonic = value
@property
def associated_egress_qos_policy_id(self):
""" Get associated_egress_qos_policy_id value.
Notes:
ID of the Egress QOS Policy associated with this Vlan.
This attribute is named `associatedEgressQOSPolicyID` in VSD API.
"""
return self._associated_egress_qos_policy_id
@associated_egress_qos_policy_id.setter
def associated_egress_qos_policy_id(self, value):
""" Set associated_egress_qos_policy_id value.
Notes:
ID of the Egress QOS Policy associated with this Vlan.
This attribute is named `associatedEgressQOSPolicyID` in VSD API.
"""
self._associated_egress_qos_policy_id = value
@property
def status(self):
""" Get status value.
Notes:
Status of the port.
"""
return self._status
@status.setter
def status(self, value):
""" Set status value.
Notes:
Status of the port.
"""
self._status = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 30.416048
| 241
| 0.599365
|
acfde9036c39d84bc896f058fbfe7b992e26113b
| 6,443
|
py
|
Python
|
experiments/vss/export-latex.py
|
ibalajiarun/libpolycrypto
|
89a69ed90ee4e9287222cc5781ff11562286f454
|
[
"MIT"
] | 25
|
2020-01-29T19:33:48.000Z
|
2022-03-28T16:45:51.000Z
|
experiments/vss/export-latex.py
|
ibalajiarun/libpolycrypto
|
89a69ed90ee4e9287222cc5781ff11562286f454
|
[
"MIT"
] | 2
|
2020-03-18T12:33:27.000Z
|
2020-03-18T18:30:55.000Z
|
experiments/vss/export-latex.py
|
ibalajiarun/libpolycrypto
|
89a69ed90ee4e9287222cc5781ff11562286f454
|
[
"MIT"
] | 8
|
2020-07-09T01:35:42.000Z
|
2021-07-20T04:54:47.000Z
|
#!/usr/bin/env python2.7
import matplotlib
matplotlib.use('Agg') # otherwise script does not work when invoked over SSH
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import MonthLocator, DateFormatter, DayLocator, epoch2num, num2date
import itertools
import pandas
import sys
import os
import time
improvLatexSymb='\\texttimes'
if len(sys.argv) < 3:
print "Usage:", sys.argv[0], "<output-latex> <csv-file> [<csv_file> ...]"
sys.exit(0)
def humanizeMicroseconds(mus, precision = 2):
result = float(mus)
units = [ "mus", "ms", "secs", "mins", "hrs", "days", "years" ]
numUnits = len(units)
i = 0
while result >= 1000.0 and i < 2:
result /= 1000.0
i = i+1
while result >= 60.0 and i >= 2 and i < 4:
result /= 60.0
i = i+1
if i == 4 and result >= 24.0:
result /= 24.0
i = i+1
if i == 5 and result >= 365.25:
result /= 365.25
i = i+1
assert(i < numUnits)
string = ("{:." + str(precision) + "f}").format(result)
string += " "
string += units[i]
return string
def add_hum_col(csv_data, usec_col, hum_col):
for idx, r in csv_data.iterrows():
if r[usec_col] != 'todo' and str(r[usec_col]) != 'nan':
csv_data.ix[idx, hum_col] = humanizeMicroseconds(int(r[usec_col]))
del sys.argv[0]
out_tex_file = sys.argv[0]
del sys.argv[0]
if not out_tex_file.endswith('.tex'):
print "ERROR: Expected .tex file as first argument"
sys.exit(1)
data_files = [f for f in sys.argv]
print "Reading CSV files:", data_files, "..."
csv_data = pandas.concat((pandas.read_csv(f) for f in data_files), ignore_index=True)
#print "Raw:"
#print csv_data.to_string()
#print csv_data.columns
#print csv_data['dictSize'].values
#print "Averaged:"
minN = csv_data.n.unique().min();
maxN = csv_data.n.unique().max();
print "min N:", minN
print "max N:", maxN
# we specify the VSSs here in a specific order so they are plotted with the right colors
vsss_known = [ 'jf', 'ejf', 'amt' ]
csv_data.vss.replace('feld', 'jf', inplace=True)
csv_data.vss.replace('kate', 'evss', inplace=True)
vsss_file = csv_data.vss.unique()
csv_data['end_to_end_bc_usec'] = csv_data.avg_deal_usec + csv_data.avg_verify_usec + csv_data.avg_reconstr_bc_usec
csv_data['end_to_end_wc_usec'] = csv_data.avg_deal_usec + csv_data.avg_verify_usec + csv_data.avg_reconstr_wc_usec
add_hum_col(csv_data, 'end_to_end_bc_usec', 'end_to_end_bc_hum')
add_hum_col(csv_data, 'end_to_end_wc_usec', 'end_to_end_wc_hum')
#print csv_data.to_string() # print all data
print csv_data[['t','n','vss','avg_deal_hum', 'avg_verify_hum', 'avg_reconstr_bc_hum', 'avg_reconstr_wc_hum', 'end_to_end_bc_hum', 'end_to_end_wc_hum']].to_string()
print "VSSs in file:", vsss_file
print "VSSs known: ", vsss_known
# open the file in append mode, and truncate it to zero bytes if it has data
f = open(out_tex_file, "a+")
isEmpty = os.fstat(f.fileno()).st_size == 0
if not isEmpty:
f.truncate(0)
def write_latex_case_macro(f, data, macroName, col1, col2):
f.write("\\newcommand{\\" + macroName + "}[1]{%\n")
f.write(" \IfStrEqCase{#1}{")
for _, r in data.iterrows():
f.write("\n {" + str(r[col1]).strip() + "}{" + str(r[col2]).strip() + "\\xspace}")
f.write("}[\\textcolor{red}{\\textbf{NODATA}}]}\n\n")
def write_columns(f, csv_data, usec_col, hum_col, latex_col, min_improv_pct=1.2, min_improv_n=16):
f.write("%\n")
f.write("% Data for column '" + usec_col + "'\n")
f.write("% (improvements must be better than " + str(min_improv_pct) + " and occur after n > " + str(min_improv_n) + ")\n")
f.write("%\n")
vsss = csv_data.vss.unique()
for vss in vsss:
write_latex_case_macro(f, csv_data[csv_data.vss == vss], vss + latex_col, 'n', hum_col)
for vss, otherVss in itertools.combinations(vsss, 2):
assert vss != otherVss
# assume 'vss' beats 'otherVss' and compute the improvement factor
usec1 = csv_data[csv_data.vss == vss][usec_col]
usec2 = csv_data[csv_data.vss == otherVss][usec_col]
# check if actually 'otherVss' beats 'vss'
if usec2.sum() < usec1.sum():
tmp = usec1
usec1 = usec2
usec2 = tmp
tmp = vss;
vss = otherVss;
otherVss = tmp;
improv = usec2.values / usec1.values.astype(float)
improv_data = pandas.concat(
[
pandas.DataFrame(csv_data.n.unique(), columns=["n"]),
pandas.DataFrame(improv, columns=["improv"])
],
axis=1)
improv_data['improv'] = improv_data['improv'].round(decimals=2)
# extract the threshold # of players n at which 'vss' beats 'otherVss'
# we might beat the naive scheme at small thresholds too, but then later on we don't beat it anymore
outperform = improv_data[(improv_data.improv > 1.2) & (improv_data.n > min_improv_n)].copy()
outperform.reset_index(drop=True, inplace=True) # because copy() does not renumber the rows of the dataframe
outperform.sort_values(by='improv', ascending=True)
outperform_num = int(outperform.ix[0]['n'])
improv_data['improv'] = improv_data['improv'].astype(str) + improvLatexSymb
#print improv_data
write_latex_case_macro(f, improv_data, vss + latex_col + 'ImprovOver' + otherVss, 'n', 'improv')
print vss, "starts outperforming", otherVss, "for", latex_col, "at:", outperform_num
f.write("\\newcommand{\\" + vss + latex_col + "OutperformN" + otherVss + "}{" + str(outperform_num) + "}\n")
f.write("\n\n")
write_columns(f, csv_data, 'avg_deal_usec', 'avg_deal_hum', 'VssDealTime')
write_columns(f, csv_data, 'avg_verify_usec', 'avg_verify_hum', 'VssVerifyTime', min_improv_n=3)
write_columns(f, csv_data, 'avg_reconstr_bc_usec', 'avg_reconstr_bc_hum', 'VssReconstrBcTime', min_improv_n=3)
write_columns(f, csv_data, 'avg_reconstr_wc_usec', 'avg_reconstr_wc_hum', 'VssReconstrWcTime', min_improv_n=3)
# TODO: should compute ratios between bc/wc reconstr times
write_columns(f, csv_data, 'end_to_end_bc_usec', 'end_to_end_bc_hum', 'VssEndToEndBcTime')
write_columns(f, csv_data, 'end_to_end_wc_usec', 'end_to_end_wc_hum', 'VssEndToEndWcTime')
# TODO: should compute ratios between bc/wc e2e times
| 36.40113
| 164
| 0.652646
|
acfde92d5518e7beb962b7cee980f0c01ffeee3b
| 2,080
|
py
|
Python
|
mps_database/models/threshold_fault.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
mps_database/models/threshold_fault.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2017-07-07T21:31:59.000Z
|
2017-07-07T21:31:59.000Z
|
mps_database/models/threshold_fault.py
|
slaclab/mps_database
|
023ed9bb3b333e382cc612f816c3f4b295b66a4c
|
[
"BSD-3-Clause-LBNL"
] | 4
|
2017-07-07T20:10:54.000Z
|
2020-12-13T00:03:37.000Z
|
from sqlalchemy import Column, Integer, Float, String, Boolean, ForeignKey
from sqlalchemy.orm import relationship, backref
from mps_database.models import Base
class ThresholdFault(Base):
"""
ThresholdFault class (threshold_faults table)
Describe an analog fault, which is generated by an AnalogDevice.
The AnalogDevice provides a compressed analog value from the device,
the compressed value is expressed a reduced number of bits (e.g. 12).
The value read from the device is compared to the threshold stored
here. The conversion from the threshold to analog value is done
via the threshold_values_map and threshold_values tables.
Properties:
name: short fault description
greater_than: if true, if the AnalogDevice value is larger than the
compressed_threshold then a ThresholdFault is generated
if false, if the AnalogDevice value is smaller than the
compressed threshold then a ThresholdFault is generated
References:
analog_device_id: defines the type of analog device related to this
fault
threshold_value_id: defines which threshold value is used when calculating
if a fault happened
Relationships:
threshold_fault_state: through the ThresholdFaultStates this
ThresholdFault is linked to an AllowedClass (allowed beam class)
"""
__tablename__ = 'threshold_faults'
id = Column(Integer, primary_key=True)
name = Column(String, nullable=False)
analog_device_id = Column(Integer, ForeignKey('analog_devices.id'), nullable=False)
#If greater_than is true, a value larger than the threshold will generate a fault.
#If greater_than is false, a value smaller than the threshold will generate a fault.
greater_than = Column(Boolean, nullable=False)
threshold_fault_state = relationship("ThresholdFaultState", uselist=False, backref="threshold_fault")
threshold_value_id = Column(Integer, ForeignKey('threshold_values.id'), nullable=False)
@property
def less_than(self):
return not self.greater_than
| 45.217391
| 103
| 0.751923
|
acfde9453ce0c9c058b5aee09f9c5b14eefe5196
| 2,234
|
py
|
Python
|
geekup/models/participant.py
|
hasgeek/geekup
|
4e9b83f63203ae15d11a3e2e679e8a86ae02e545
|
[
"CC-BY-3.0"
] | 1
|
2020-06-26T17:10:37.000Z
|
2020-06-26T17:10:37.000Z
|
geekup/models/participant.py
|
hasgeek/geekup
|
4e9b83f63203ae15d11a3e2e679e8a86ae02e545
|
[
"CC-BY-3.0"
] | 5
|
2017-05-04T06:24:17.000Z
|
2019-05-08T00:16:46.000Z
|
geekup/models/participant.py
|
hasgeek/geekup
|
4e9b83f63203ae15d11a3e2e679e8a86ae02e545
|
[
"CC-BY-3.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from geekup.models import db, BaseMixin
from datetime import datetime
class Participant(BaseMixin, db.Model):
"""
Participant data, as submitted from the registration form.
"""
__tablename__ = 'participant'
#: User's full name
fullname = db.Column(db.Unicode(80), nullable=False)
#: User's email address
email = db.Column(db.Unicode(80), nullable=False)
#: User's company name
company = db.Column(db.Unicode(80), nullable=False)
#: User's job title
jobtitle = db.Column(db.Unicode(80), nullable=False)
#: User's twitter id (optional)
twitter = db.Column(db.Unicode(80), nullable=True)
#: How did the user hear about this event?
referrer = db.Column(db.Integer, nullable=False, default=0)
#: User category, defined by a reviewer
category = db.Column(db.Integer, nullable=False, default=0)
#: User agent with which the user registered
useragent = db.Column(db.Unicode(250), nullable=True)
#: Date the user registered
regdate = db.Column(db.DateTime, default=datetime.utcnow, nullable=False)
#: Submitter's IP address, for logging
#: (45 chars to accommodate an IPv6 address)
ipaddr = db.Column(db.String(45), nullable=False)
#: Has the user's application been approved?
approved = db.Column(db.Boolean, default=False, nullable=False)
#: RSVP status codes:
#: 0 = Awaiting Response
#: 1 = Yes, Attending
#: 2 = Maybe Attending
#: 3 = Not Attending
rsvp = db.Column(db.Integer, default=0, nullable=False)
#: Did the participant attend the event?
attended = db.Column(db.Boolean, default=False, nullable=False)
#: Datetime the participant showed up
attenddate = db.Column(db.DateTime, nullable=True)
#: Have we sent this user an email
email_sent = db.Column(db.Boolean, default=False, nullable=False)
#: Key created with coaster.secretkey
email_key = db.Column(db.Unicode(44), nullable=True)
#: Is it confirmed or not
email_status = db.Column(db.Boolean, default=False, nullable=False)
#: Event they'd like to attend
event_id = db.Column(db.Integer, db.ForeignKey('event.id'))
def __repr__(self):
return self.fullname
| 39.192982
| 77
| 0.682632
|
acfdebb27e82073412a74171aa132600d05a2dd6
| 862
|
py
|
Python
|
Taller_Estructuras_de _Control_Selectivas/Ejercicio_16.py
|
Mariajosedibo19/Talleres_de_Algoritmos
|
db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d
|
[
"MIT"
] | null | null | null |
Taller_Estructuras_de _Control_Selectivas/Ejercicio_16.py
|
Mariajosedibo19/Talleres_de_Algoritmos
|
db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d
|
[
"MIT"
] | null | null | null |
Taller_Estructuras_de _Control_Selectivas/Ejercicio_16.py
|
Mariajosedibo19/Talleres_de_Algoritmos
|
db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d
|
[
"MIT"
] | null | null | null |
"""
Datos de entrada
valor para A en la formula cuadratica = a= float
valor para B en la formula cuadratica = b = float
valor para C en la formula cuadratica = c = float
Datos de salida
Resultado de la ecuacion = A X**2 +BX + C =0
"""
# Entradas
a=float(input("valor para A en la formula cuadratica "))
b=float(input("valor para B en la formula cuadratica "))
c=float(input("valor para C en la formula cuadratica "))
# Caja Negra
from cmath import sqrt
x1=(-b-sqrt(b**2-4*a*c))/(2*a)
x2=(-b+sqrt(b**2-4*a*c))/(2*a)
discriminante=b**2-4*a*c # discriminante
solucion=""
if (discriminante==0):
solucion= -b/(2*a) # Tiene 1 solucion real
elif (discriminante>0):
solucion= x1,x2
elif (discriminante<0):
solucion= "No tiene solucion en los reales"
# Salidas
print(f" La solucion o soluciones de la ecuacion de segundo grado son {solucion}")
| 22.684211
| 82
| 0.687935
|
acfdebe951a5af3b6dd3848c882863048fc9e0c6
| 1,076
|
py
|
Python
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/packages/sqlite3.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 17
|
2018-11-25T19:29:34.000Z
|
2021-09-20T04:46:22.000Z
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/packages/sqlite3.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 1
|
2020-11-12T15:15:58.000Z
|
2020-12-29T15:29:24.000Z
|
dependencies/scons-config/build/lib.linux-x86_64-2.7/sconsconfig/packages/sqlite3.py
|
maierbn/opendihu
|
577650e2f6b36a7306766b0f4176f8124458cbf0
|
[
"MIT"
] | 4
|
2018-10-17T12:18:10.000Z
|
2021-05-28T13:24:20.000Z
|
import sys, os
from Package import Package
##
##
##
class sqlite3(Package):
def __init__(self, **kwargs):
defaults = {
'download_url': 'http://github.com/furious-luke/sqlite3-ext/tarball/master',
}
defaults.update(kwargs)
super(sqlite3, self).__init__(**defaults)
self.ext = '.c'
self.libs=[
['sqlite3'],
]
self.extra_libs=[
[],
]
self.check_text = r'''
#include <stdlib.h>
#include <stdio.h>
#include <sqlite3.h>
int main(int argc, char* argv[]) {
return EXIT_SUCCESS;
}
'''
# Setup the build handler. I'm going to assume this will work for all architectures.
self.set_build_handler([
'./configure --prefix=${PREFIX}',
'make install',
])
def check(self, ctx):
env = ctx.env
ctx.Message('Checking for sqlite3 ... ')
self.check_options(env)
res = super(sqlite3, self).check(ctx)
self.check_required(res[0])
ctx.Result(res[0])
return res[0]
| 22.893617
| 92
| 0.549257
|
acfdee6a35e1886193846182859f207bee72621f
| 401
|
py
|
Python
|
src/draggle_blog/wsgi.py
|
dipto0321/draggle_blog
|
c19f96aa1d4d2fb8b4b901e33c38e92602ef1fcb
|
[
"MIT"
] | null | null | null |
src/draggle_blog/wsgi.py
|
dipto0321/draggle_blog
|
c19f96aa1d4d2fb8b4b901e33c38e92602ef1fcb
|
[
"MIT"
] | 27
|
2019-11-12T17:04:02.000Z
|
2020-06-08T23:31:19.000Z
|
src/draggle_blog/wsgi.py
|
dipto0321/draggle_blog
|
c19f96aa1d4d2fb8b4b901e33c38e92602ef1fcb
|
[
"MIT"
] | null | null | null |
"""
WSGI config for draggle_blog project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'draggle_blog.settings')
application = get_wsgi_application()
| 23.588235
| 78
| 0.790524
|
acfdf0b15fd7d353b2668624f2976019bb1c5593
| 802
|
py
|
Python
|
opentech/apply/funds/templatetags/submission_tags.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/apply/funds/templatetags/submission_tags.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
opentech/apply/funds/templatetags/submission_tags.py
|
JakabGy/hypha
|
32634080ba1cb369f07f27f6616041e4eca8dbf2
|
[
"BSD-3-Clause"
] | null | null | null |
import re
from django import template
from django.utils.safestring import mark_safe
from opentech.apply.funds.models import ApplicationSubmission
register = template.Library()
@register.filter
def submission_links(value):
# Match tags in the format #123 that is not preceeded and/or followed by a word character.
matches = re.findall('(?<![\w\&])\#(\d+)(?!\w)', value)
links = {}
if matches:
for submission in ApplicationSubmission.objects.filter(id__in=matches):
links[f'\#{submission.id}'] = f'<a href="{submission.get_absolute_url()}">{submission.title} <span class="mid-grey-text">#{submission.id}</span></a>'
if links:
for sid, link in links.items():
value = re.sub(f'(?<!\w){sid}(?!\w)', link, value)
return mark_safe(value)
| 32.08
| 161
| 0.665835
|
acfdf147d28c7124b641a7334212c00dec2b4ece
| 10,409
|
py
|
Python
|
tests/conftest.py
|
Alenush/datasets
|
8342de4864ce255e802c0d15b14921029002befa
|
[
"Apache-2.0"
] | 1
|
2021-11-21T18:37:28.000Z
|
2021-11-21T18:37:28.000Z
|
tests/conftest.py
|
Ishan-Kumar2/datasets
|
ba831e4bcd175ae3d52afbf7d12c4f625bf541b0
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
Ishan-Kumar2/datasets
|
ba831e4bcd175ae3d52afbf7d12c4f625bf541b0
|
[
"Apache-2.0"
] | null | null | null |
import csv
import json
import lzma
import os
import textwrap
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets import config
from datasets.arrow_dataset import Dataset
from datasets.features import ClassLabel, Features, Sequence, Value
from .s3_fixtures import * # noqa: load s3 fixtures
@pytest.fixture(autouse=True)
def set_test_cache_config(tmp_path_factory, monkeypatch):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
test_hf_cache_home = tmp_path_factory.getbasetemp() / "cache"
test_hf_datasets_cache = test_hf_cache_home / "datasets"
test_hf_metrics_cache = test_hf_cache_home / "metrics"
test_hf_modules_cache = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE", str(test_hf_datasets_cache))
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE", str(test_hf_metrics_cache))
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE", str(test_hf_modules_cache))
test_downloaded_datasets_path = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH", str(test_downloaded_datasets_path))
test_extracted_datasets_path = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH", str(test_extracted_datasets_path))
FILE_CONTENT = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session")
def dataset():
n = 10
features = Features(
{
"tokens": Sequence(Value("string")),
"labels": Sequence(ClassLabel(names=["negative", "positive"])),
"answers": Sequence(
{
"text": Value("string"),
"answer_start": Value("int32"),
}
),
"id": Value("int64"),
}
)
dataset = Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(n)),
},
features=features,
)
return dataset
@pytest.fixture(scope="session")
def arrow_file(tmp_path_factory, dataset):
filename = str(tmp_path_factory.mktemp("data") / "file.arrow")
dataset.map(cache_file_name=filename)
return filename
@pytest.fixture(scope="session")
def text_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt"
data = FILE_CONTENT
with open(filename, "w") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def xz_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.txt.xz"
data = bytes(FILE_CONTENT, "utf-8")
with lzma.open(filename, "wb") as f:
f.write(data)
return filename
@pytest.fixture(scope="session")
def gz_file(tmp_path_factory):
import gzip
path = str(tmp_path_factory.mktemp("data") / "file.txt.gz")
data = bytes(FILE_CONTENT, "utf-8")
with gzip.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def bz2_file(tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "file.txt.bz2"
data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zstd_file(tmp_path_factory):
if config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
path = tmp_path_factory.mktemp("data") / "file.txt.zst"
data = bytes(FILE_CONTENT, "utf-8")
with zstd.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def lz4_file(tmp_path_factory):
if config.LZ4_AVAILABLE:
import lz4.frame
path = tmp_path_factory.mktemp("data") / "file.txt.lz4"
data = bytes(FILE_CONTENT, "utf-8")
with lz4.frame.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def xml_file(tmp_path_factory):
filename = tmp_path_factory.mktemp("data") / "file.xml"
data = textwrap.dedent(
"""\
<?xml version="1.0" encoding="UTF-8" ?>
<tmx version="1.4">
<header segtype="sentence" srclang="ca" />
<body>
<tu>
<tuv xml:lang="ca"><seg>Contingut 1</seg></tuv>
<tuv xml:lang="en"><seg>Content 1</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 2</seg></tuv>
<tuv xml:lang="en"><seg>Content 2</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 3</seg></tuv>
<tuv xml:lang="en"><seg>Content 3</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 4</seg></tuv>
<tuv xml:lang="en"><seg>Content 4</seg></tuv>
</tu>
<tu>
<tuv xml:lang="ca"><seg>Contingut 5</seg></tuv>
<tuv xml:lang="en"><seg>Content 5</seg></tuv>
</tu>
</body>
</tmx>"""
)
with open(filename, "w") as f:
f.write(data)
return filename
DATA = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
DATA2 = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
DATA_DICT_OF_LISTS = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
DATA_312 = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
DATA_STR = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session")
def dataset_dict():
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session")
def arrow_path(tmp_path_factory):
dataset = Dataset.from_dict(DATA_DICT_OF_LISTS)
path = str(tmp_path_factory.mktemp("data") / "dataset.arrow")
dataset.map(cache_file_name=path)
return path
@pytest.fixture(scope="session")
def csv_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def csv2_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.csv")
with open(path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["col_1", "col_2", "col_3"])
writer.writeheader()
for item in DATA:
writer.writerow(item)
return path
@pytest.fixture(scope="session")
def bz2_csv_path(csv_path, tmp_path_factory):
import bz2
path = tmp_path_factory.mktemp("data") / "dataset.csv.bz2"
with open(csv_path, "rb") as f:
data = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bz2.open(path, "wb") as f:
f.write(data)
return path
@pytest.fixture(scope="session")
def zip_csv_path(csv_path, csv2_path, tmp_path_factory):
import zipfile
path = tmp_path_factory.mktemp("data") / "dataset.csv.zip"
with zipfile.ZipFile(path, "w") as f:
f.write(csv_path, arcname=os.path.basename(csv_path))
f.write(csv2_path, arcname=os.path.basename(csv2_path))
return path
@pytest.fixture(scope="session")
def parquet_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.parquet")
schema = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.int64(),
"col_3": pa.float64(),
}
)
with open(path, "wb") as f:
writer = pq.ParquetWriter(f, schema=schema)
pa_table = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(DATA))] for k in DATA[0]}, schema=schema)
writer.write_table(pa_table)
writer.close()
return path
@pytest.fixture(scope="session")
def json_list_of_dicts_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def json_dict_of_lists_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.json")
data = {"data": DATA_DICT_OF_LISTS}
with open(path, "w") as f:
json.dump(data, f)
return path
@pytest.fixture(scope="session")
def jsonl_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl")
with open(path, "w") as f:
for item in DATA:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_312_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset_312.jsonl")
with open(path, "w") as f:
for item in DATA_312:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def jsonl_str_path(tmp_path_factory):
path = str(tmp_path_factory.mktemp("data") / "dataset-str.jsonl")
with open(path, "w") as f:
for item in DATA_STR:
f.write(json.dumps(item) + "\n")
return path
@pytest.fixture(scope="session")
def text_path(tmp_path_factory):
data = ["0", "1", "2", "3"]
path = str(tmp_path_factory.mktemp("data") / "dataset.txt")
with open(path, "w") as f:
for item in data:
f.write(item + "\n")
return path
@pytest.fixture(scope="session")
def text_gz_path(tmp_path_factory, text_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.txt.gz")
with open(text_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
@pytest.fixture(scope="session")
def jsonl_gz_path(tmp_path_factory, jsonl_path):
import gzip
path = str(tmp_path_factory.mktemp("data") / "dataset.jsonl.gz")
with open(jsonl_path, "rb") as orig_file:
with gzip.open(path, "wb") as zipped_file:
zipped_file.writelines(orig_file)
return path
| 29.238764
| 117
| 0.619176
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.