id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1776364 | <reponame>lumichatbot/experiment
TRANSLATE_URL = 'http://0.0.0.0:5000/webhook'
DEPLOY_URL = 'http://172.17.0.2:5000/deploy'
TRANSLATE_API_TEMPLATE = '''{
"id": "28419e8b-2ce2-4587-84b2-98be5c49739d",
"timestamp": "2018-05-29T18:39:06.145Z",
"lang": "en",
"result": {
"source": "agent",
"resolvedQuery": "#INTENT_TEXT",
"action": "input.nile",
"actionIncomplete": false,
"parameters": {
"origin": "#ORIGIN",
"destination": "#DESTINATION",
"policy-target": [],
"security-level": "",
"middlebox": [
#MIDDLEBOXES
]
},
"contexts": [],
"metadata": {
"intentId": "64cdfdeb-18dd-4c76-be0a-4b55021ad1eb",
"webhookUsed": "true",
"webhookForSlotFillingUsed": "false",
"endConversation": true,
"webhookResponseTime": 203,
"intentName": "Waypoint Intent"
},
"fulfillment": {
"speech": "",
"messages": [
{
"type": 0,
"speech": ""
}
]
},
"score": 0.7900000214576721
},
"status": {
"code": 200,
"webhookTimedOut": false
},
"sessionId": "493f83f0-6fab-429b-ba95-104eeb316cd9"
}'''
DEPLOY_API_TEMPLATE = '''{
"id": "28419e8b-2ce2-4587-84b2-98be5c49739d",
"timestamp": "2018-02-28T18:39:06.145Z",
"intent": "#INTENT_NILE"
}'''
| StarcoderdataPython |
64200 | '''
Author: <NAME>
Description:
Autocolorization
'''
import cv2
image = cv2.imread('data/original.png')
cv2.imshow('original',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
image = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
cv2.imshow('lab',image)
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.imshow('lab2bgr',cv2.cvtColor(image, cv2.COLOR_LAB2BGR))
cv2.waitKey(0)
cv2.destroyAllWindows() | StarcoderdataPython |
99580 | ## Hash table
# Idea: A smaller dynamic direct access array
# Reference implementation:
#MIT Introduction to Algorithms, Recitation 4
# https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-006-introduction-to-algorithms-spring-2020/lecture-notes/MIT6_006S20_r04.pdf
from random import randint
Set_from_Seq = lambda X: set(X)
Linked_List_Seq = None
class Hash_Table_Set:
def __init__(self, r = 200):
self.chain_set = Set_from_Seq(Linked_List_Seq)
self.A = []
self.size = 0
self.r = r
self.p = 2 ** 31 - 1
self.a = randint(1, self.p - 1)
self._compute_bounds()
self._resize(0)
def __len__(self):
return self.size
def __iter__(self):
for X in self.A:
yield from X
def build(self, X):
for x in X: self.insert(x)
def _hash(self, k, m):
return ((self.a * k) % self.p) % m
def _compute_bounds(self):
self.upper = len(self.A)
self.lower = len(self.A) * 100*100 // (self.r * self.r)
def _resize(self, n):
if (self.lower >= n) or (n >= self.upper):
f = self.r // 100
if self.r %100:
f += 1
#f = ceil(r/100)
m = max(n, 1) * f
A = [self.chain_set for _ in range(m)]
for x in self:
h = self._hash(x.key, m)
A[h].insert(x)
self.A = A
self._compute_bounds()
def find(self, k):
h = self._hash(k, len(self.A))
return self.A[h].find(k)
def insert(self, x):
self._resize(self.size + 1)
h = self._hash(x.key, len(self.A))
added = self.A[h].insert(x)
if added:
self.size += 1
return added
def delete(self, k):
assert len(self) > 0
h = self._hash(k, len(self.A))
x = self.A[h].delete(k)
self.size -= 1
self._resize(self.size)
return x
def find_min(self):
out = None
for x in self:
if (out is None) or (x.key < out.key):
out = x
return out
def find_max(self):
out = None
for x in self:
if (out is None) or (x.key > out.key):
out = x
def find_next(self, k):
out = None
for x in self:
if x.key > k:
if (out is None) or (x.key < out.key):
out = x
return out
def find_prev(self, k):
out = None
for x in self:
if x.key < k:
if (out is None) or (x.key > out.key)
out = x
return out
def iter_order(self):
x = self.find_min()
while x:
yield x
x = self.find_next(x.key)
# Reference implementation:
#MIT Introduction to Algorithms, Recitation 4
# https://ocw.mit.edu/courses/electrical-engineering-and-computer-science/6-006-introduction-to-algorithms-spring-2020/lecture-notes/MIT6_006S20_r04.pdf
| StarcoderdataPython |
119479 | <filename>testSpace.py<gh_stars>0
#working
from __future__ import print_function
import tensorflow as tf
import pandas as pd
import numpy as np
data=pd.read_csv("/home/ecotine/Desktop/presentDS/BreastCancer.csv")
data["diagnosis"]= data["diagnosis"].map({'M':1,'B':0})
data.drop('id',axis=1,inplace=True)
data=data.reindex(np.random.permutation(data.index))
data=data.reindex(np.random.permutation(data.index))
data=data.reindex(np.random.permutation(data.index))
testD=int(data.shape[0]*0.3)
testData=data[:testD]
trainingData=data[testD:]
trainingoutput=trainingData["diagnosis"]
testoutput=testData["diagnosis"]
trainingData=trainingData.drop('diagnosis',axis=1,inplace=False)
testData=testData.drop('diagnosis',axis=1,inplace=False)
#parameters
learningRate=0.001
epoch=20
batchSize=10
display_step = 1
#networkConfig
n_input=30
n_layer1=20
n_layer2=10
n_layer3=8
n_output=1
#io placeholders
x=tf.placeholder("float",[None,n_input])
y=tf.placeholder("float",[None,n_output])
def multilayer_perceptron(x,weights,biases):
layer1=tf.add(tf.matmul(x, weights['w1']), biases['b1'])
layer1=tf.nn.tanh(layer1)
layer2=tf.add(tf.matmul(layer1, weights['w2']), biases['b2'])
layer2=tf.nn.tanh(layer2)
layer3=tf.add(tf.matmul(layer2, weights['w3']), biases['b3'])
layer3=tf.nn.sigmoid(layer3)
outlayer=tf.add(tf.matmul(layer3,weights['out']),biases['out'])
outlayer=tf.nn.sigmoid(outlayer)
return outlayer
#weights
weights={
'w1':tf.Variable(tf.random_normal([n_input,n_layer1])),
'w2':tf.Variable(tf.random_normal([n_layer1,n_layer2])),
'w3':tf.Variable(tf.random_normal([n_layer2,n_layer3])),
'out':tf.Variable(tf.random_normal([n_layer3,n_output]))
}
biases={
'b1':tf.Variable(tf.random_normal([n_layer1])),
'b2':tf.Variable(tf.random_normal([n_layer2])),
'b3':tf.Variable(tf.random_normal([n_layer3])),
'out':tf.Variable(tf.random_normal([n_output]))
}
#construct model
pred=multilayer_perceptron(x,weights,biases)
cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(pred,y))
train=tf.train.GradientDescentOptimizer(learningRate).minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for i in range(epoch):
totalBatch=40
for z in range(totalBatch):
x_batch=trainingData[z*batchSize:(z+1)*batchSize]
y_batch=trainingoutput[z*batchSize:(z+1)*batchSize]
temp=y_batch.shape
y_batch=y_batch.values.reshape(temp[0],1)
c=sess.run([train,cost], feed_dict={x:x_batch,y:y_batch})
if(z%10==0):
print(weights['w2'].eval())
print("batch")
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(testData.shape)
print(testoutput.shape)
Yte = np.reshape(testoutput, (testoutput.shape[0], 1))
print("Accuracy:", accuracy.eval({x: testData, y: Yte}))
asz = sess.run(pred, feed_dict={x: testData})
print(asz)
print("-==-=-=--=-=-==-=-=-=-=-=--=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-==================================")
print(testoutput)
| StarcoderdataPython |
110329 | <filename>quickJSON.py
"""
quickJSON
A basic, simple Python module that interfaces with JSON files.
"""
# Imports
import json
class JSONManager:
"""Manages JSON files"""
def __init__(self, filename = None):
self.filename = filename
if self.filename != None:
self.loadJSON(self.filename)
else:
self.rawJSON = {}
def loadJSON(self, filename = None):
"""Load a specified JSON file"""
if filename == None:
filename = self.filename
with open(filename, "r+") as f:
self.rawJSON = json.load(f)
def createKey(self, name, value = None):
"""Create a key for the JSON file to later save. If you want to append a dictionary, use createKeysFromDict() instead"""
self.rawJSON.update({name : value})
def createKeysFromDict(self, dictionary):
"""Appends a Python dictionary to the JSON file"""
self.rawJSON.update(dictionary)
def updateKey(self, name, value):
"""Update a value of a certain JSON key"""
self.rawJSON[name] = value
def removeKey(self, name):
"""Remove a key from the JSON file"""
self.rawJSON.pop(name)
def saveJSON(self, filename = None, formatJSON = True):
"""Save a JSON file to the specified location"""
if filename == None:
filename = self.filename
with open(filename, "w+") as f:
if formatJSON:
json.dump(self.rawJSON, f, indent = 4)
else:
json.dump(self.rawJSON, f)
| StarcoderdataPython |
3212191 | #!/usr/bin/python3
import sys
import os
import json
from importlib import import_module
handler_function = os.getenv("LAMBDA_HANDLER_FUNCTION")
function_name = os.getenv("LAMBDA_FUNCTION_NAME")
logfile = os.getenv("BOOTSTRAP_LOG_FILE")
log = open(logfile, 'w')
handler_file_name = "code." + ".".join(handler_function.split(".")[:-1])
handler_function_name = handler_function.split(".")[-1]
log.write(json.dumps({'module':handler_file_name, 'method': handler_function_name, 'name':function_name}) + "\n")
output = os.fdopen(3, 'w')
module = import_module(handler_file_name)
handler_method = getattr(module, handler_function_name)
for event in sys.stdin:
response = {}
try:
eventData = json.loads(event)['eventData']
response = handler_method(json.dumps(eventData, indent=4), "")
output.write(json.dumps(response) + "\n")
except Exception as e:
log.write(json.dumps({'error': str(e)}) + "\n")
finally:
log.write(json.dumps({'event': event, 'response': response}) + "\n")
output.close()
| StarcoderdataPython |
129791 | <filename>vwoptimizelib/third_party/networkx/classes/digraph.py
"""Base class for directed graphs."""
# Copyright (C) 2004-2015 by
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# All rights reserved.
# BSD license.
from copy import deepcopy
from ...networkx.classes.graph import Graph
from ...networkx.exception import NetworkXError
from ...networkx import convert
__author__ = """\n""".join(['<NAME> (<EMAIL>)',
'<NAME> (<EMAIL>)',
'<NAME>(<EMAIL>)'])
class DiGraph(Graph):
"""
Base class for directed graphs.
A DiGraph stores nodes and edges with optional data, or attributes.
DiGraphs hold directed edges. Self loops are allowed but multiple
(parallel) edges are not.
Nodes can be arbitrary (hashable) Python objects with optional
key/value attributes.
Edges are represented as links between nodes with optional
key/value attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
Graph
MultiGraph
MultiDiGraph
Examples
--------
Create an empty graph structure (a "null graph") with no nodes and
no edges.
>>> G = nx.DiGraph()
G can be grown in several ways.
**Nodes:**
Add one node at a time:
>>> G.add_node(1)
Add the nodes from any container (a list, dict, set or
even the lines from a file or the nodes from another graph).
>>> G.add_nodes_from([2,3])
>>> G.add_nodes_from(range(100,110))
>>> H=nx.Graph()
>>> H.add_path([0,1,2,3,4,5,6,7,8,9])
>>> G.add_nodes_from(H)
In addition to strings and integers any hashable Python object
(except None) can represent a node, e.g. a customized node object,
or even another Graph.
>>> G.add_node(H)
**Edges:**
G can also be grown by adding edges.
Add one edge,
>>> G.add_edge(1, 2)
a list of edges,
>>> G.add_edges_from([(1,2),(1,3)])
or a collection of edges,
>>> G.add_edges_from(H.edges())
If some edges connect nodes not yet in the graph, the nodes
are added automatically. There are no errors when adding
nodes or edges that already exist.
**Attributes:**
Each graph, node, and edge can hold key/value attribute pairs
in an associated attribute dictionary (the keys must be hashable).
By default these are empty, but can be added or changed using
add_edge, add_node or direct manipulation of the attribute
dictionaries named graph, node and edge respectively.
>>> G = nx.DiGraph(day="Friday")
>>> G.graph
{'day': 'Friday'}
Add node attributes using add_node(), add_nodes_from() or G.node
>>> G.add_node(1, time='5pm')
>>> G.add_nodes_from([3], time='2pm')
>>> G.node[1]
{'time': '5pm'}
>>> G.node[1]['room'] = 714
>>> del G.node[1]['room'] # remove attribute
>>> G.nodes(data=True)
[(1, {'time': '5pm'}), (3, {'time': '2pm'})]
Warning: adding a node to G.node does not add it to the graph.
Add edge attributes using add_edge(), add_edges_from(), subscript
notation, or G.edge.
>>> G.add_edge(1, 2, weight=4.7 )
>>> G.add_edges_from([(3,4),(4,5)], color='red')
>>> G.add_edges_from([(1,2,{'color':'blue'}), (2,3,{'weight':8})])
>>> G[1][2]['weight'] = 4.7
>>> G.edge[1][2]['weight'] = 4
**Shortcuts:**
Many common graph features allow python syntax to speed reporting.
>>> 1 in G # check if node in graph
True
>>> [n for n in G if n<3] # iterate through nodes
[1, 2]
>>> len(G) # number of nodes in graph
5
The fastest way to traverse all edges of a graph is via
adjacency_iter(), but the edges() method is often more convenient.
>>> for n,nbrsdict in G.adjacency_iter():
... for nbr,eattr in nbrsdict.items():
... if 'weight' in eattr:
... (n,nbr,eattr['weight'])
(1, 2, 4)
(2, 3, 8)
>>> G.edges(data='weight')
[(1, 2, 4), (2, 3, 8), (3, 4, None), (4, 5, None)]
**Reporting:**
Simple graph information is obtained using methods.
Iterator versions of many reporting methods exist for efficiency.
Methods exist for reporting nodes(), edges(), neighbors() and degree()
as well as the number of nodes and edges.
For details on these and other miscellaneous methods, see below.
**Subclasses (Advanced):**
The Graph class uses a dict-of-dict-of-dict data structure.
The outer dict (node_dict) holds adjacency lists keyed by node.
The next dict (adjlist) represents the adjacency list and holds
edge data keyed by neighbor. The inner dict (edge_attr) represents
the edge data and holds edge attribute values keyed by attribute names.
Each of these three dicts can be replaced by a user defined
dict-like object. In general, the dict-like features should be
maintained but extra features can be added. To replace one of the
dicts create a new graph class by changing the class(!) variable
holding the factory for that dict-like structure. The variable names
are node_dict_factory, adjlist_dict_factory and edge_attr_dict_factory.
node_dict_factory : function, optional (default: dict)
Factory function to be used to create the outer-most dict
in the data structure that holds adjacency lists keyed by node.
It should require no arguments and return a dict-like object.
adjlist_dict_factory : function, optional (default: dict)
Factory function to be used to create the adjacency list
dict which holds edge data keyed by neighbor.
It should require no arguments and return a dict-like object
edge_attr_dict_factory : function, optional (default: dict)
Factory function to be used to create the edge attribute
dict which holds attrbute values keyed by attribute name.
It should require no arguments and return a dict-like object.
Examples
--------
Create a graph object that tracks the order nodes are added.
>>> from collections import OrderedDict
>>> class OrderedNodeGraph(nx.Graph):
... node_dict_factory=OrderedDict
>>> G=OrderedNodeGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> G.edges()
[(2, 1), (2, 2), (1, 1)]
Create a graph object that tracks the order nodes are added
and for each node track the order that neighbors are added.
>>> class OrderedGraph(nx.Graph):
... node_dict_factory = OrderedDict
... adjlist_dict_factory = OrderedDict
>>> G = OrderedGraph()
>>> G.add_nodes_from( (2,1) )
>>> G.nodes()
[2, 1]
>>> G.add_edges_from( ((2,2), (2,1), (1,1)) )
>>> G.edges()
[(2, 2), (2, 1), (1, 1)]
Create a low memory graph class that effectively disallows edge
attributes by using a single attribute dict for all edges.
This reduces the memory used, but you lose edge attributes.
>>> class ThinGraph(nx.Graph):
... all_edge_dict = {'weight': 1}
... def single_edge_dict(self):
... return self.all_edge_dict
... edge_attr_dict_factory = single_edge_dict
>>> G = ThinGraph()
>>> G.add_edge(2,1)
>>> G.edges(data= True)
[(1, 2, {'weight': 1})]
>>> G.add_edge(2,2)
>>> G[2][1] is G[2][2]
True
"""
def __init__(self, data=None, **attr):
"""Initialize a graph with edges, name, graph attributes.
Parameters
----------
data : input graph
Data to initialize graph. If data=None (default) an empty
graph is created. The data can be an edge list, or any
NetworkX graph object. If the corresponding optional Python
packages are installed the data can also be a NumPy matrix
or 2d ndarray, a SciPy sparse matrix, or a PyGraphviz graph.
name : string, optional (default='')
An optional name for the graph.
attr : keyword arguments, optional (default= no attributes)
Attributes to add to graph as key=value pairs.
See Also
--------
convert
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G = nx.Graph(name='my graph')
>>> e = [(1,2),(2,3),(3,4)] # list of edges
>>> G = nx.Graph(e)
Arbitrary graph attribute pairs (key=value) may be assigned
>>> G=nx.Graph(e, day="Friday")
>>> G.graph
{'day': 'Friday'}
"""
self.node_dict_factory = ndf = self.node_dict_factory
self.adjlist_dict_factory = self.adjlist_dict_factory
self.edge_attr_dict_factory = self.edge_attr_dict_factory
self.graph = {} # dictionary for graph attributes
self.node = ndf() # dictionary for node attributes
# We store two adjacency lists:
# the predecessors of node n are stored in the dict self.pred
# the successors of node n are stored in the dict self.succ=self.adj
self.adj = ndf() # empty adjacency dictionary
self.pred = ndf() # predecessor
self.succ = self.adj # successor
# attempt to load graph with data
if data is not None:
convert.to_networkx_graph(data,create_using=self)
# load graph attributes (must be after convert)
self.graph.update(attr)
self.edge=self.adj
def add_node(self, n, attr_dict=None, **attr):
"""Add a single node n and update node attributes.
Parameters
----------
n : node
A node can be any hashable Python object except None.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of node attributes. Key/value pairs will
update existing data associated with the node.
attr : keyword arguments, optional
Set or change attributes using key=value.
See Also
--------
add_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_node(1)
>>> G.add_node('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_node(K3)
>>> G.number_of_nodes()
3
Use keywords set/change node attributes:
>>> G.add_node(1,size=10)
>>> G.add_node(3,weight=0.4,UTM=('13S',382871,3972649))
Notes
-----
A hashable object is one that can be used as a key in a Python
dictionary. This includes strings, numbers, tuples of strings
and numbers, etc.
On many platforms hashable items also include mutables such as
NetworkX Graphs, though one should be careful that the hash
doesn't change on mutables.
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
if n not in self.succ:
self.succ[n] = self.adjlist_dict_factory()
self.pred[n] = self.adjlist_dict_factory()
self.node[n] = attr_dict
else: # update attr even if node already exists
self.node[n].update(attr_dict)
def add_nodes_from(self, nodes, **attr):
"""Add multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.).
OR
A container of (node, attribute dict) tuples.
Node attributes are updated using the attribute dict.
attr : keyword arguments, optional (default= no attributes)
Update attributes for all nodes in nodes.
Node attributes specified in nodes as a tuple
take precedence over attributes specified generally.
See Also
--------
add_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_nodes_from('Hello')
>>> K3 = nx.Graph([(0,1),(1,2),(2,0)])
>>> G.add_nodes_from(K3)
>>> sorted(G.nodes(),key=str)
[0, 1, 2, 'H', 'e', 'l', 'o']
Use keywords to update specific node attributes for every node.
>>> G.add_nodes_from([1,2], size=10)
>>> G.add_nodes_from([3,4], weight=0.4)
Use (node, attrdict) tuples to update attributes for specific
nodes.
>>> G.add_nodes_from([(1,dict(size=11)), (2,{'color':'blue'})])
>>> G.node[1]['size']
11
>>> H = nx.Graph()
>>> H.add_nodes_from(G.nodes(data=True))
>>> H.node[1]['size']
11
"""
for n in nodes:
# keep all this inside try/except because
# CPython throws TypeError on n not in self.succ,
# while pre-2.7.5 ironpython throws on self.succ[n]
try:
if n not in self.succ:
self.succ[n] = self.adjlist_dict_factory()
self.pred[n] = self.adjlist_dict_factory()
self.node[n] = attr.copy()
else:
self.node[n].update(attr)
except TypeError:
nn,ndict = n
if nn not in self.succ:
self.succ[nn] = self.adjlist_dict_factory()
self.pred[nn] = self.adjlist_dict_factory()
newdict = attr.copy()
newdict.update(ndict)
self.node[nn] = newdict
else:
olddict = self.node[nn]
olddict.update(attr)
olddict.update(ndict)
def remove_node(self, n):
"""Remove node n.
Removes the node n and all adjacent edges.
Attempting to remove a non-existent node will raise an exception.
Parameters
----------
n : node
A node in the graph
Raises
-------
NetworkXError
If n is not in the graph.
See Also
--------
remove_nodes_from
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.edges()
[(0, 1), (1, 2)]
>>> G.remove_node(1)
>>> G.edges()
[]
"""
try:
nbrs=self.succ[n]
del self.node[n]
except KeyError: # NetworkXError if n not in self
raise NetworkXError("The node %s is not in the digraph."%(n,))
for u in nbrs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # remove node from succ
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # remove node from pred
def remove_nodes_from(self, nbunch):
"""Remove multiple nodes.
Parameters
----------
nodes : iterable container
A container of nodes (list, dict, set, etc.). If a node
in the container is not in the graph it is silently
ignored.
See Also
--------
remove_node
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> e = G.nodes()
>>> e
[0, 1, 2]
>>> G.remove_nodes_from(e)
>>> G.nodes()
[]
"""
for n in nbunch:
try:
succs=self.succ[n]
del self.node[n]
for u in succs:
del self.pred[u][n] # remove all edges n-u in digraph
del self.succ[n] # now remove node
for u in self.pred[n]:
del self.succ[u][n] # remove all edges n-u in digraph
del self.pred[n] # now remove node
except KeyError:
pass # silent failure on remove
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between u and v.
The nodes u and v will be automatically added if they are
not already in the graph.
Edge attributes can be specified with keywords or by providing
a dictionary with key/value pairs. See examples below.
Parameters
----------
u, v : nodes
Nodes can be, for example, strings or numbers.
Nodes must be hashable (and not None) Python objects.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with the edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edges_from : add a collection of edges
Notes
-----
Adding an edge that already exists updates the edge data.
Many NetworkX algorithms designed for weighted graphs use as
the edge weight a numerical value assigned to a keyword
which by default is 'weight'.
Examples
--------
The following all add the edge e=(1,2) to graph G:
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> e = (1,2)
>>> G.add_edge(1, 2) # explicit two-node form
>>> G.add_edge(*e) # single edge as tuple of two nodes
>>> G.add_edges_from( [(1,2)] ) # add edges from iterable container
Associate data to edges using keywords:
>>> G.add_edge(1, 2, weight=3)
>>> G.add_edge(1, 3, weight=7, capacity=15, length=342.7)
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dictionary.")
# add nodes
if u not in self.succ:
self.succ[u]= self.adjlist_dict_factory()
self.pred[u]= self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v]= self.adjlist_dict_factory()
self.pred[v]= self.adjlist_dict_factory()
self.node[v] = {}
# add the edge
datadict=self.adj[u].get(v,self.edge_attr_dict_factory())
datadict.update(attr_dict)
self.succ[u][v]=datadict
self.pred[v][u]=datadict
def add_edges_from(self, ebunch, attr_dict=None, **attr):
"""Add all the edges in ebunch.
Parameters
----------
ebunch : container of edges
Each edge given in the container will be added to the
graph. The edges must be given as as 2-tuples (u,v) or
3-tuples (u,v,d) where d is a dictionary containing edge
data.
attr_dict : dictionary, optional (default= no attributes)
Dictionary of edge attributes. Key/value pairs will
update existing data associated with each edge.
attr : keyword arguments, optional
Edge data (or labels or objects) can be assigned using
keyword arguments.
See Also
--------
add_edge : add a single edge
add_weighted_edges_from : convenient way to add weighted edges
Notes
-----
Adding the same edge twice has no effect but any edge data
will be updated when each duplicate edge is added.
Edge attributes specified in edges take precedence
over attributes specified generally.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_edges_from([(0,1),(1,2)]) # using a list of edge tuples
>>> e = zip(range(0,3),range(1,4))
>>> G.add_edges_from(e) # Add the path graph 0-1-2-3
Associate data to edges
>>> G.add_edges_from([(1,2),(2,3)], weight=3)
>>> G.add_edges_from([(3,4),(1,4)], label='WN2898')
"""
# set up attribute dict
if attr_dict is None:
attr_dict=attr
else:
try:
attr_dict.update(attr)
except AttributeError:
raise NetworkXError(\
"The attr_dict argument must be a dict.")
# process ebunch
for e in ebunch:
ne = len(e)
if ne==3:
u,v,dd = e
assert hasattr(dd,"update")
elif ne==2:
u,v = e
dd = {}
else:
raise NetworkXError(\
"Edge tuple %s must be a 2-tuple or 3-tuple."%(e,))
if u not in self.succ:
self.succ[u] = self.adjlist_dict_factory()
self.pred[u] = self.adjlist_dict_factory()
self.node[u] = {}
if v not in self.succ:
self.succ[v] = self.adjlist_dict_factory()
self.pred[v] = self.adjlist_dict_factory()
self.node[v] = {}
datadict=self.adj[u].get(v,self.edge_attr_dict_factory())
datadict.update(attr_dict)
datadict.update(dd)
self.succ[u][v] = datadict
self.pred[v][u] = datadict
def remove_edge(self, u, v):
"""Remove the edge between u and v.
Parameters
----------
u, v : nodes
Remove the edge between nodes u and v.
Raises
------
NetworkXError
If there is not an edge between u and v.
See Also
--------
remove_edges_from : remove a collection of edges
Examples
--------
>>> G = nx.Graph() # or DiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.remove_edge(0,1)
>>> e = (1,2)
>>> G.remove_edge(*e) # unpacks e from an edge tuple
>>> e = (2,3,{'weight':7}) # an edge with attribute data
>>> G.remove_edge(*e[:2]) # select first part of edge tuple
"""
try:
del self.succ[u][v]
del self.pred[v][u]
except KeyError:
raise NetworkXError("The edge %s-%s not in graph."%(u,v))
def remove_edges_from(self, ebunch):
"""Remove all edges specified in ebunch.
Parameters
----------
ebunch: list or container of edge tuples
Each edge given in the list or container will be removed
from the graph. The edges can be:
- 2-tuples (u,v) edge between u and v.
- 3-tuples (u,v,k) where k is ignored.
See Also
--------
remove_edge : remove a single edge
Notes
-----
Will fail silently if an edge in ebunch is not in the graph.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> ebunch=[(1,2),(2,3)]
>>> G.remove_edges_from(ebunch)
"""
for e in ebunch:
(u,v)=e[:2] # ignore edge data
if u in self.succ and v in self.succ[u]:
del self.succ[u][v]
del self.pred[v][u]
def has_successor(self, u, v):
"""Return True if node u has successor v.
This is true if graph has the edge u->v.
"""
return (u in self.succ and v in self.succ[u])
def has_predecessor(self, u, v):
"""Return True if node u has predecessor v.
This is true if graph has the edge u<-v.
"""
return (u in self.pred and v in self.pred[u])
def successors_iter(self,n):
"""Return an iterator over successor nodes of n.
neighbors_iter() and successors_iter() are the same.
"""
try:
return iter(self.succ[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def predecessors_iter(self,n):
"""Return an iterator over predecessor nodes of n."""
try:
return iter(self.pred[n])
except KeyError:
raise NetworkXError("The node %s is not in the digraph."%(n,))
def successors(self, n):
"""Return a list of successor nodes of n.
neighbors() and successors() are the same function.
"""
return list(self.successors_iter(n))
def predecessors(self, n):
"""Return a list of predecessor nodes of n."""
return list(self.predecessors_iter(n))
# digraph definitions
neighbors = successors
neighbors_iter = successors_iter
def edges_iter(self, nbunch=None, data=False, default=None):
"""Return an iterator over the edges.
Edges are returned as tuples with optional data
in the order (node, neighbor, data).
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : string or bool, optional (default=False)
The edge attribute returned in 3-tuple (u,v,ddict[data]).
If True, return edge attribute dict in 3-tuple (u,v,ddict).
If False, return 2-tuple (u,v).
default : value, optional (default=None)
Value used for edges that dont have the requested attribute.
Only relevant if data is not True or False.
Returns
-------
edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of edges.
See Also
--------
edges : return a list of edges
Notes
-----
Nodes in nbunch that are not in the graph will be (quietly) ignored.
For directed graphs this returns the out-edges.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1,2])
>>> G.add_edge(2,3,weight=5)
>>> [e for e in G.edges_iter()]
[(0, 1), (1, 2), (2, 3)]
>>> list(G.edges_iter(data=True)) # default data is {} (empty dict)
[(0, 1, {}), (1, 2, {}), (2, 3, {'weight': 5})]
>>> list(G.edges_iter(data='weight', default=1))
[(0, 1, 1), (1, 2, 1), (2, 3, 5)]
>>> list(G.edges_iter([0,2]))
[(0, 1), (2, 3)]
>>> list(G.edges_iter(0))
[(0, 1)]
"""
if nbunch is None:
nodes_nbrs=self.adj.items()
else:
nodes_nbrs=((n,self.adj[n]) for n in self.nbunch_iter(nbunch))
if data is True:
for n,nbrs in nodes_nbrs:
for nbr,ddict in nbrs.items():
yield (n,nbr,ddict)
elif data is not False:
for n,nbrs in nodes_nbrs:
for nbr,ddict in nbrs.items():
d=ddict[data] if data in ddict else default
yield (n,nbr,d)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (n,nbr)
# alias out_edges to edges
out_edges_iter=edges_iter
out_edges=Graph.edges
def in_edges_iter(self, nbunch=None, data=False):
"""Return an iterator over the incoming edges.
Parameters
----------
nbunch : iterable container, optional (default= all nodes)
A container of nodes. The container will be iterated
through once.
data : bool, optional (default=False)
If True, return edge attribute dict in 3-tuple (u,v,data).
Returns
-------
in_edge_iter : iterator
An iterator of (u,v) or (u,v,d) tuples of incoming edges.
See Also
--------
edges_iter : return an iterator of edges
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if data:
for n,nbrs in nodes_nbrs:
for nbr,data in nbrs.items():
yield (nbr,n,data)
else:
for n,nbrs in nodes_nbrs:
for nbr in nbrs:
yield (nbr,n)
def in_edges(self, nbunch=None, data=False):
"""Return a list of the incoming edges.
See Also
--------
edges : return a list of edges
"""
return list(self.in_edges_iter(nbunch, data))
def degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, degree).
The node degree is the number of edges adjacent to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter, out_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> list(G.degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.degree_iter([0,1]))
[(0, 1), (1, 2)]
"""
if nbunch is None:
nodes_nbrs=( (n,succs,self.pred[n]) for n,succs in self.succ.items())
else:
nodes_nbrs=( (n,self.succ[n],self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,succ,pred in nodes_nbrs:
yield (n,len(succ)+len(pred))
else:
# edge weighted graph - degree is sum of edge weights
for n,succ,pred in nodes_nbrs:
yield (n,
sum((succ[nbr].get(weight,1) for nbr in succ))+
sum((pred[nbr].get(weight,1) for nbr in pred)))
def in_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, in-degree).
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, in-degree).
See Also
--------
degree, in_degree, out_degree, out_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.in_degree_iter(0)) # node 0 with degree 0
[(0, 0)]
>>> list(G.in_degree_iter([0,1]))
[(0, 0), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.pred.items()
else:
nodes_nbrs=((n,self.pred[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def out_degree_iter(self, nbunch=None, weight=None):
"""Return an iterator for (node, out-degree).
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd_iter : an iterator
The iterator returns two-tuples of (node, out-degree).
See Also
--------
degree, in_degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph()
>>> G.add_path([0,1,2,3])
>>> list(G.out_degree_iter(0)) # node 0 with degree 1
[(0, 1)]
>>> list(G.out_degree_iter([0,1]))
[(0, 1), (1, 1)]
"""
if nbunch is None:
nodes_nbrs=self.succ.items()
else:
nodes_nbrs=((n,self.succ[n]) for n in self.nbunch_iter(nbunch))
if weight is None:
for n,nbrs in nodes_nbrs:
yield (n,len(nbrs))
else:
# edge weighted graph - degree is sum of edge weights
for n,nbrs in nodes_nbrs:
yield (n, sum(data.get(weight,1) for data in nbrs.values()))
def in_degree(self, nbunch=None, weight=None):
"""Return the in-degree of a node or nodes.
The node in-degree is the number of edges pointing in to the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and in-degree as values or
a number if a single node is specified.
See Also
--------
degree, out_degree, in_degree_iter
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.in_degree(0)
0
>>> G.in_degree([0,1])
{0: 0, 1: 1}
>>> list(G.in_degree([0,1]).values())
[0, 1]
"""
if nbunch in self: # return a single node
return next(self.in_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.in_degree_iter(nbunch,weight))
def out_degree(self, nbunch=None, weight=None):
"""Return the out-degree of a node or nodes.
The node out-degree is the number of edges pointing out of the node.
Parameters
----------
nbunch : iterable container, optional (default=all nodes)
A container of nodes. The container will be iterated
through once.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used
as a weight. If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
nd : dictionary, or number
A dictionary with nodes as keys and out-degree as values or
a number if a single node is specified.
Examples
--------
>>> G = nx.DiGraph() # or MultiDiGraph
>>> G.add_path([0,1,2,3])
>>> G.out_degree(0)
1
>>> G.out_degree([0,1])
{0: 1, 1: 1}
>>> list(G.out_degree([0,1]).values())
[1, 1]
"""
if nbunch in self: # return a single node
return next(self.out_degree_iter(nbunch,weight))[1]
else: # return a dict
return dict(self.out_degree_iter(nbunch,weight))
def clear(self):
"""Remove all nodes and edges from the graph.
This also removes the name, and all graph, node, and edge attributes.
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> G.clear()
>>> G.nodes()
[]
>>> G.edges()
[]
"""
self.succ.clear()
self.pred.clear()
self.node.clear()
self.graph.clear()
def is_multigraph(self):
"""Return True if graph is a multigraph, False otherwise."""
return False
def is_directed(self):
"""Return True if graph is directed, False otherwise."""
return True
def to_directed(self):
"""Return a directed copy of the graph.
Returns
-------
G : DiGraph
A deepcopy of the graph.
Notes
-----
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar D=DiGraph(G) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Examples
--------
>>> G = nx.Graph() # or MultiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1), (1, 0)]
If already directed, return a (deep) copy
>>> G = nx.DiGraph() # or MultiDiGraph, etc
>>> G.add_path([0,1])
>>> H = G.to_directed()
>>> H.edges()
[(0, 1)]
"""
return deepcopy(self)
def to_undirected(self, reciprocal=False):
"""Return an undirected representation of the digraph.
Parameters
----------
reciprocal : bool (optional)
If True only keep edges that appear in both directions
in the original digraph.
Returns
-------
G : Graph
An undirected graph with the same name and nodes and
with edge (u,v,data) if either (u,v,data) or (v,u,data)
is in the digraph. If both edges exist in digraph and
their edge data is different, only one edge is created
with an arbitrary choice of which edge data to use.
You must check and correct for this manually if desired.
Notes
-----
If edges in both directions (u,v) and (v,u) exist in the
graph, attributes for the new undirected edge will be a combination of
the attributes of the directed edges. The edge data is updated
in the (arbitrary) order that the edges are encountered. For
more customized control of the edge attributes use add_edge().
This returns a "deepcopy" of the edge, node, and
graph attributes which attempts to completely copy
all of the data and references.
This is in contrast to the similar G=DiGraph(D) which returns a
shallow copy of the data.
See the Python copy module for more information on shallow
and deep copies, http://docs.python.org/library/copy.html.
Warning: If you have subclassed DiGraph to use dict-like objects
in the data structure, those changes do not transfer to the Graph
created by this method.
"""
H=Graph()
H.name=self.name
H.add_nodes_from(self)
if reciprocal is True:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items()
if v in self.pred[u])
else:
H.add_edges_from( (u,v,deepcopy(d))
for u,nbrs in self.adjacency_iter()
for v,d in nbrs.items() )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
return H
def reverse(self, copy=True):
"""Return the reverse of the graph.
The reverse is a graph with the same nodes and edges
but with the directions of the edges reversed.
Parameters
----------
copy : bool optional (default=True)
If True, return a new DiGraph holding the reversed edges.
If False, reverse the reverse graph is created using
the original graph (this changes the original graph).
"""
if copy:
H = self.__class__(name="Reverse of (%s)"%self.name)
H.add_nodes_from(self)
H.add_edges_from( (v,u,deepcopy(d)) for u,v,d
in self.edges(data=True) )
H.graph=deepcopy(self.graph)
H.node=deepcopy(self.node)
else:
self.pred,self.succ=self.succ,self.pred
self.adj=self.succ
H=self
return H
def subgraph(self, nbunch):
"""Return the subgraph induced on nodes in nbunch.
The induced subgraph of the graph contains the nodes in nbunch
and the edges between those nodes.
Parameters
----------
nbunch : list, iterable
A container of nodes which will be iterated through once.
Returns
-------
G : Graph
A subgraph of the graph with the same edge attributes.
Notes
-----
The graph, edge or node attributes just point to the original graph.
So changes to the node or edge structure will not be reflected in
the original graph while changes to the attributes will.
To create a subgraph with its own copy of the edge/node attributes use:
nx.Graph(G.subgraph(nbunch))
If edge attributes are containers, a deep copy can be obtained using:
G.subgraph(nbunch).copy()
For an inplace reduction of a graph to a subgraph you can remove nodes:
G.remove_nodes_from([ n in G if n not in set(nbunch)])
Examples
--------
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_path([0,1,2,3])
>>> H = G.subgraph([0,1,2])
>>> H.edges()
[(0, 1), (1, 2)]
"""
bunch = self.nbunch_iter(nbunch)
# create new graph and copy subgraph into it
H = self.__class__()
# copy node and attribute dictionaries
for n in bunch:
H.node[n]=self.node[n]
# namespace shortcuts for speed
H_succ=H.succ
H_pred=H.pred
self_succ=self.succ
# add nodes
for n in H:
H_succ[n]=H.adjlist_dict_factory()
H_pred[n]=H.adjlist_dict_factory()
# add edges
for u in H_succ:
Hnbrs=H_succ[u]
for v,datadict in self_succ[u].items():
if v in H_succ:
# add both representations of edge: u-v and v-u
Hnbrs[v]=datadict
H_pred[v][u]=datadict
H.graph=self.graph
return H
| StarcoderdataPython |
3281382 | #request
class MediaRequest(object):
__slots__ = ['gatewayobj']
def __init__(self, gatewayobj):
self.gatewayobj = gatewayobj
def call(self, channelID, guildID=None, mute=False, deaf=False, video=False):
self.gatewayobj.send(
{
"op": self.gatewayobj.OPCODE.VOICE_STATE_UPDATE,
"d": {
"guild_id": guildID,
"channel_id": channelID,
"self_mute": mute,
"self_deaf": deaf,
"self_video": video,
},
}
)
def endCall(self):
self.gatewayobj.send(
{
"op": self.gatewayobj.OPCODE.VOICE_STATE_UPDATE,
"d": {
"guild_id": None,
"channel_id": None,
"self_mute": False,
"self_deaf": False,
"self_video": False,
},
}
)
| StarcoderdataPython |
1776817 | from kv1_811 import *
from inserter import insert,version_imported,reject
from bs4 import BeautifulSoup
import urllib2
from datetime import datetime,timedelta
from htm import setLineColors,cleanDest,generatePool
import logging
from settings.const import *
logger = logging.getLogger("importer")
def getDataSource():
return { '1' : {
'operator_id' : 'HTMBUZZ',
'name' : 'HTMbuzz KV1',
'description' : 'HTMbuzz KV1 leveringen',
'email' : None,
'url' : None}}
def getOperator():
return { 'HTM' : {'privatecode' : 'HTM',
'operator_id' : 'HTM',
'name' : 'HTM',
'phone' : '0900-4864636',
'url' : 'http://www.htm.net',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'},
'HTMBUZZ' : {'privatecode' : 'HTMBUZZ',
'operator_id' : 'HTMBUZZ',
'name' : 'HTMbuzz',
'phone' : '0900-4864636',
'url' : 'http://www.htmbuzz.nl',
'timezone' : 'Europe/Amsterdam',
'language' : 'nl'}
}
def getMergeStrategies(conn):
cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cur.execute("""
SELECT 'DATASOURCE' as type,'1' as datasourceref,min(validfrom) as fromdate,max(validthru) as todate FROM schedvers GROUP BY dataownercode
""")
rows = cur.fetchall()
cur.close()
return rows
def import_zip(path,filename,version):
meta,conn = load(path,filename)
if datetime.strptime(meta['enddate'].replace('-',''),'%Y%m%d') < (datetime.now() - timedelta(days=1)):
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : 'HTMBUZZ:'+filename,
'datasourceref' : '1',
'operator_id' : 'HTMBUZZ:'+filename,
'startdate' : meta['startdate'],
'enddate' : meta['enddate'],
'error' : 'ALREADY_EXPIRED',
'description' : filename}
logger.info('Reject '+filename+'\n'+str(data['VERSION']['1']))
reject(data)
conn.commit()
conn.close()
return
try:
data = {}
cleanDest(conn)
if pool_generation_enabled:
generatePool(conn)
data['OPERATOR'] = getOperator()
data['MERGESTRATEGY'] = getMergeStrategies(conn)
data['DATASOURCE'] = getDataSource()
data['VERSION'] = {}
data['VERSION']['1'] = {'privatecode' : 'HTMBUZZ:'+filename,
'datasourceref' : '1',
'operator_id' : 'HTMBUZZ:'+filename,
'startdate' : meta['startdate'],
'enddate' : meta['enddate'],
'description' : filename}
data['DESTINATIONDISPLAY'] = getDestinationDisplays(conn)
data['LINE'] = getLineWithGeneratedNames(conn)
data['STOPPOINT'] = getStopPoints(conn)
data['STOPAREA'] = getStopAreas(conn)
data['AVAILABILITYCONDITION'] = getAvailabilityConditionsUsingOperday(conn)
data['PRODUCTCATEGORY'] = getBISONproductcategories()
data['ADMINISTRATIVEZONE'] = getAdministrativeZones(conn)
timedemandGroupRefForJourney,data['TIMEDEMANDGROUP'] = calculateTimeDemandGroups(conn)
routeRefForPattern,data['ROUTE'] = clusterPatternsIntoRoute(conn,getPool811)
data['JOURNEYPATTERN'] = getJourneyPatterns(routeRefForPattern,conn,data['ROUTE'])
data['JOURNEY'] = getJourneys(timedemandGroupRefForJourney,conn)
data['NOTICEASSIGNMENT'] = {}
data['NOTICE'] = {}
data['NOTICEGROUP'] = {}
insert(data)
conn.close()
setLineColors()
except:
raise
def download(url,filename):
u = urllib2.urlopen(url)
f = open('/tmp/'+filename, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print "Downloading: %s Bytes: %s" % (filename, file_size)
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print status,
print
f.close()
import_zip('/tmp',filename,None)
url = 'http://data.ndovloket.nl/htmbuzz/'
def sync():
f = urllib2.urlopen(url+'?order=d')
soup = BeautifulSoup(f.read())
for link in soup.find_all('a'):
link = link.get('href')
filename = urllib2.unquote(link)
if '.zip' in link.lower():
if not version_imported('HTMBUZZ:'+filename):
try:
logger.info('Importing :'+filename)
download(url+link,filename)
except Exception as e:
logger.exception(filename,exc_info=True)
pass
| StarcoderdataPython |
114658 | #!/usr/bin/env python3
"""
A script to monitor folders and subdirectories for file movement, creation
or modification so that files are automatically converted from predefined
filetypes to target filetypes set by the user.
Zamzar API keys can be obtained by registering at: https://developers.zamzar.com/pricing
"""
# Imports
import time
import json
from Watch import Watch
import os
import sys
# Load the config file
print("Starting HotFolders.py")
try:
config_file = open('hotfolders_config.json', 'r')
except FileNotFoundError as err:
print("ERROR: Could not find JSON config file - ensure current working directory contains hotfolders_config.json")
sys.exit(0)
# Use json.load to get the data from the config file
try:
config_info = json.load(config_file)
api_key = config_info['api_key'][0]
config_info = config_info["conversions"]
except json.decoder.JSONDecodeError as err:
print("ERROR: Could not parse 'hotfolders_config.json' - invalid JSON found:")
print(err)
sys.exit(0)
"""
We need to set a watch for each of the directories watched.
This is achieved using the Watch class written for this project.
We make a watch object for each path watched. This is necessary as each path will have different options in
formats and some paths may utilise the auto-extract or subdirectory search functions whilst others might not
"""
# This will hold the watch objects for the paths
watch_list = []
for key in config_info:
# Check the path exists, if not, skip it with an error message in the log
if not os.path.exists(key):
print("ERROR: " + key + " does not exist - cannot monitor\n")
sys.exit(0)
else:
print("Monitoring: " + key)
# Each key is a directory path.
watch_list.append(Watch(key, config_info[key]['to'], config_info[key]['from'], config_info[key]['options'],
config_info[key]['ignore'], api_key))
# Keep the program running, checking for keyboard input. If input is detected then clean up the Watch objects and end
# the program.
try:
while True:
time.sleep(0.5)
except KeyboardInterrupt:
for w in watch_list:
del w
# Upon program termination remake the config file with the new ignore
| StarcoderdataPython |
62629 | <filename>lib/logger.py
from os import environ
from loguru import logger
from sentry_sdk import capture_exception
def info(msg: str):
logger.info(msg)
def error(exception: Exception):
logger.exception(exception)
# to trigger error alerts
if environ.get("STAGE") == "prod":
capture_exception(exception)
| StarcoderdataPython |
127374 | <filename>src/teamboard/migrations/0002_auto_20180822_0812.py
# Generated by Django 2.1 on 2018-08-22 08:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('teamboard', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='team',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='teamboard.Team'),
),
]
| StarcoderdataPython |
4803350 | import pigpio
import time
pi = pigpio.pi()
h = pi.spi_open(0, 5000000, 0)
while True:
val = pi.spi_read(h, 2)
temp = (int.from_bytes(val[1], 'big', signed = True) >> 2) / 4
print('temp = {0:.1f}'.format(temp))
time.sleep(1)
| StarcoderdataPython |
1703971 | <reponame>haowsun/xgboost_py<filename>compute_fill_acc_by_threshold.py<gh_stars>0
#%%
#coding:utf-8
import os
import pandas as pd
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif']=['Arial Unicode MS'] #用来正常显示中文标签
plt.rcParams['axes.unicode_minus']=False #用来正常显示负号
# 参数
version = 'v1_0'
date = '20210408-20210414'
dataset = 'all'
# 路径
BASE_PATH = '/Users/sunhaowen/Documents/data/merge_relay_info'
FILE_NAME = 'merge_relay_info_flag_{}_95172.xlsx'.format(date)
BASE_PATH_pred = '/Users/sunhaowen/Documents/data/3.ivr_log/final_data/{}'.format(version)
FILE_NAME_pred = 'compute_fill_acc_rate_{}_{}.xlsx'.format(date, dataset)
OUT_BASE_PATH = '/Users/sunhaowen/Documents/data/3.ivr_log/final_data/{}/fill_acc'.format(version)
OUT_FILE_NAME_data = 'fill_acc_{}.xlsx'.format(date)
OUT_FILE_NAME_img = 'fill_acc_{}.png'.format(date)
# merge_relay_info文件读入
FILE_PATH = os.path.join(BASE_PATH, FILE_NAME)
merge_relay_info = pd.read_excel(FILE_PATH)
# merge_relay_info保留语音机器人 和 所需要的列数据
merge_relay_info_bot = merge_relay_info.loc[merge_relay_info['按键类型'] == '语音机器人']
merge_relay_info_bot = merge_relay_info_bot.loc[:, ['session_id','predict_bu_id', '一级FAQID', '是否路由准确']]
merge_relay_info_bot = merge_relay_info_bot.set_index('session_id')
# 预测数据读入、预处理
FILE_PATH_pred = os.path.join(BASE_PATH_pred, FILE_NAME_pred)
data_frame_pred = pd.read_excel(FILE_PATH_pred)
data_frame_pred = data_frame_pred.loc[:, ['sessionid', '一级FAQID','pred']]
data_frame_pred['pred_label'], data_frame_pred['pred_val'] = data_frame_pred['pred'].str.split('_').str
data_frame_pred = data_frame_pred.loc[:, ['sessionid', '一级FAQID', 'pred_label', 'pred_val']]
label2buid_dict = {'0': 52, '1': 73, '2': 97, '3': 88, '4': 96, '5': 39, '6': 114, '7': 64, '8': 104, '9': 53, '10': 115, '11': 139, '12': None}
data_frame_pred['pred_label'].replace(label2buid_dict, inplace=True)
data_frame_pred = data_frame_pred.set_index('sessionid')
# 计算业务填槽准确率
def compute_fill_acc(my_df):
all_num_bot = len(my_df)
fill_correct_num_bot = 0
for index, row in my_df.iterrows():
if row['predict_bu_id'] == str(row['一级FAQID']):
fill_correct_num_bot += 1
return fill_correct_num_bot, all_num_bot, fill_correct_num_bot/all_num_bot
#%%
# 计算原始业务填槽准确率
_,_, ori_fill_acc = compute_fill_acc(merge_relay_info_bot)
print(ori_fill_acc)
#%%
# 按阈值计算业务填槽准确率
threshold_start = 0.0
threshold_end = 1.0
threshold_step_len = 0.01
fill_acc_x = []
fill_acc_y = []
threshold_tmp = threshold_start
while threshold_tmp <= threshold_end:
pred_correct = 0
pred_bigthanthreshold = 0
tmp_data_frame = merge_relay_info_bot.copy(deep=True)
for index, row in data_frame_pred.iterrows():
if float(row['pred_val']) > threshold_tmp:
tmp_data_frame.loc[index,'predict_bu_id'] = str(row['pred_label'])
tmp_fill_acc = compute_fill_acc(tmp_data_frame)[2]
fill_acc_x.append(threshold_tmp)
fill_acc_y.append(tmp_fill_acc-ori_fill_acc)
threshold_tmp = threshold_tmp + threshold_step_len
#%%
#计算某一阈值下的填槽情况
def get_fill_condition(t = 0.59):
tmp_data_frame = merge_relay_info_bot.copy(deep=True)
tmp_data_frame['fill_predict_bu_id'] = '*'
tmp_data_frame['replace_predict_bu_id'] = tmp_data_frame['predict_bu_id']
for index, row in data_frame_pred.iterrows():
if float(row['pred_val']) > t:
tmp_data_frame.loc[index, 'is_fill'] = 1
tmp_data_frame.loc[index, 'fill_predict_bu_id'] = str(row['pred_label'])
tmp_data_frame.loc[index, 'replace_predict_bu_id'] = str(row['pred_label'])
else:
tmp_data_frame.loc[index, 'is_fill'] = 0
return tmp_data_frame
fill_condition_df = get_fill_condition()
fill_condition_df.to_excel('111.xlsx',index=True)
#%%
# 画图
fill_acc_y = [i*100 for i in fill_acc_y]
plt.plot(fill_acc_x, fill_acc_y)
plt.xlabel('阈值')
plt.ylabel('业务填槽准确率影响(pp)')
# plt.show()
plt.savefig(os.path.join(OUT_BASE_PATH, OUT_FILE_NAME_img), bbox_inches='tight')
# %%
# 输出文件
res_df = pd.DataFrame({'threshold': fill_acc_x, 'sub': fill_acc_y})
res_df.to_excel(os.path.join(OUT_BASE_PATH, OUT_FILE_NAME_data), index=False) | StarcoderdataPython |
1690484 | <gh_stars>100-1000
from exploits.hashes.collisions import python2_32
from test.exploits.dummy_output import DummyOutput
from input.chars import CharGenerator
import pytest
def test_run_small_collision_output():
output = DummyOutput()
n_collisions = 2
length = 7
substring_length = 3
target = '42'
python2_32.options['n_collisions'] = n_collisions
python2_32.options['length'] = length
python2_32.options['substring_length'] = substring_length
python2_32.options['target'] = target
python2_32.options['target_type'] = 'image'
python2_32.run(CharGenerator(), output)
assert output.count() == n_collisions
for i in output:
assert python2_32.hash(i) == int(target)
def test_run_more_collision_output():
output = DummyOutput()
n_collisions = 10
length = 8
substring_length = 4
target = '42'
python2_32.options['n_collisions'] = n_collisions
python2_32.options['length'] = length
python2_32.options['substring_length'] = substring_length
python2_32.options['target'] = target
python2_32.options['target_type'] = 'image'
python2_32.run(CharGenerator(), output)
assert output.count() == n_collisions
for i in output:
assert python2_32.hash(i) == int(target)
def test_preimage():
output = DummyOutput()
n_collisions = 2
length = 8
substring_length = 4
target = 'hello'
python2_32.options['n_collisions'] = n_collisions
python2_32.options['length'] = length
python2_32.options['substring_length'] = substring_length
python2_32.options['target'] = target
python2_32.options['target_type'] = 'preimage'
python2_32.run(CharGenerator(), output)
assert output.count() == n_collisions
for i in output:
assert python2_32.hash(i) == python2_32.hash(target)
def test_equal_length_and_substring_length():
output = DummyOutput()
length = 3
substring_length = 3
target = '42'
python2_32.options['length'] = length
python2_32.options['substring_length'] = substring_length
python2_32.options['target'] = target
with pytest.raises(ValueError):
python2_32.run(CharGenerator(), output)
def test_smaller_length_than_substring_length():
output = DummyOutput()
length = 2
substring_length = 3
target = '42'
python2_32.options['length'] = length
python2_32.options['substring_length'] = substring_length
python2_32.options['target'] = target
with pytest.raises(ValueError):
python2_32.run(CharGenerator(), output)
| StarcoderdataPython |
34022 | from tensorpy import image_base
classifications = image_base.classify_folder_images('./images')
print("*** Displaying Image Classification Results as a list: ***")
for classification in classifications:
print(classification)
| StarcoderdataPython |
3241081 | gauss_kernel = numpy.array([[1,2,1],
[2,4,1],
[1,2,1]]) * 1.0/16
def blur_naive_version(iamge, districts, scale):
if(len(image.shape)!=3):
print("error")
exit(0)
new_image = image.copy()
for district in districts:
new_image = gauss_blur_naive_version(new_image, district, scale)
return new_image
def
| StarcoderdataPython |
3320932 | <filename>nemcore/types/get_song_detail_resp.py
from typing import List, Any
from .easy_access import EasyAccessDict
class ChargeInfoList(EasyAccessDict):
rate: int
charge_url: None
charge_message: None
charge_type: int
class FreeTrialPrivilege(EasyAccessDict):
res_consumable: bool
user_consumable: bool
class Privilege(EasyAccessDict):
id: int
fee: int
payed: int
st: int
pl: int
dl: int
sp: int
cp: int
subp: int
cs: bool
maxbr: int
fl: int
toast: bool
flag: int
pre_sell: bool
play_maxbr: int
download_maxbr: int
free_trial_privilege: FreeTrialPrivilege
charge_info_list: List[ChargeInfoList]
class Al(EasyAccessDict):
id: int
name: str
pic_url: str
tns: List[Any]
pic_str: str
pic: float
class Ar(EasyAccessDict):
id: int
name: str
tns: List[Any]
alias: List[Any]
class H(EasyAccessDict):
br: int
fid: int
size: int
vd: int
class Song(EasyAccessDict):
name: str
id: int
pst: int
t: int
ar: List[Ar]
alia: List[Any]
pop: int
st: int
rt: str
fee: int
v: int
crbt: None
cf: str
al: Al
dt: int
h: H
m: H
l: H
a: None
cd: str
no: int
rt_url: None
ftype: int
rt_urls: List[Any]
dj_id: int
copyright: int
s_id: int
mark: int
origin_cover_type: int
origin_song_simple_data: None
single: int
no_copyright_rcmd: None
mv: int
rtype: int
rurl: None
mst: int
cp: int
publish_time: int
class GetSongDetailResp(EasyAccessDict):
songs: List[Song]
privileges: List[Privilege]
code: int
| StarcoderdataPython |
3343940 | <filename>setup.py
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='samopy',
version='0.2',
description="Access 5620 SAM 13.0.R7",
classifiers=[],
keywords='alcatel alcatel-lucent nokia 5620 5620sam sam',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/nlgotz/samopy',
license='Apache 2.0',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'requests',
'jinja2',
'MarkupSafe'
],
setup_requires=[],
namespace_packages=[],
)
| StarcoderdataPython |
1765795 | # -*- coding:utf-8 -*-
import datetime
from flask import current_app
from app.libs.datetime_helper import strptime_to_str
# menu
class MenuViewModel:
def __init__(self,menu):
from app.block.permission.model import Menu
self.id = menu['id']
self.name=menu['name']
current_rules=[]
for rule in menu['rules'].all():
current_rules.append(dict(rule))
self.rules=current_rules
self.create_time=strptime_to_str(menu['create_time'])
self.remark=menu['remark']
self.hide_children = menu['hide_children']
self.hide_self = menu['hide_self']
self.icon = menu['icon']
self.path = menu['path']
self.component = menu['component']
self.sort = menu['sort']
self.parent_id = menu['parent_id']
if menu['parent_id'] != None:
self.parent_name = Menu.query.filter_by(id=menu['parent_id']).first_or_404().name
class MenuCollection:
def __init__(self):
self.total=0
self.data=[]
self.success=False
self.pageNo=1
self.pageSize=current_app.config['PER_PAGE']
def fill(self,original):
self.total=original['total']
self.data=[MenuViewModel(menu) for menu in original['menus']]
self.success=original['success']
self.pageNo=original['pageNo'] or 1
self.pageSize=original['pageSize'] or current_app.config['PER_PAGE']
# rule
class RuleViewModel:
def __init__(self,rule):
from app.block.permission.model import Menu
self.id = rule['id']
self.name=rule['name']
self.create_time=strptime_to_str(rule['create_time'])
self.remark=rule['remark']
self.api = rule['api']
self.method = rule['method']
self.func = rule['func']
self.menu_id = rule['menu_id']
if rule['menu_id'] != None:
self.menu_name = Menu.query.filter_by(id=rule['menu_id']).first_or_404().name
current_groups=[]
for group in rule['groups'].all():
current_groups.append(dict(group))
self.groups=current_groups
class RuleCollection:
def __init__(self):
self.total=0
self.data=[]
self.success=False
self.pageNo=1
self.pageSize=current_app.config['PER_PAGE']
def fill(self,original):
self.total=original['total'] or 0
self.data=[RuleViewModel(rule) for rule in original['rules']]
self.success=original['success']
self.pageNo=original['pageNo'] or 1
self.pageSize=original['pageSize'] or current_app.config['PER_PAGE']
# group
class GroupViewModel:
def __init__(self,group):
self.id = group['id']
self.name=group['name']
self.create_time=strptime_to_str(group['create_time'])
self.remark=group['remark']
current_rules=[]
ruleids = []
for rule in group['rules'].all():
current_rules.append(dict(rule))
ruleids.append(rule.id)
self.rules=current_rules
self.ruleids = ruleids
class GroupCollection:
def __init__(self):
self.total=0
self.data=[]
self.success=False
self.pageNo=1
self.pageSize=current_app.config['PER_PAGE']
def fill(self,original):
self.total=original['total']
self.data=[GroupViewModel(group) for group in original['groups']]
self.success=original['success']
self.pageNo=original['pageNo'] or 1
self.pageSize=original['pageSize'] or current_app.config['PER_PAGE']
#user
class UserViewModel:
def __init__(self,user):
self.create_time = strptime_to_str(user['create_time'])
self.last_login_time = strptime_to_str(user['last_login_time'])
self.id = user['id']
self.email=user['email']
self.mobile=user['mobile']
self.nickname=user['nickname']
self.realname=user['realname']
self.gender=user['gender']
self.id_number=user['id_number']
self.avatar=user['avatar']
current_groups=[]
groupids=[]
for group in user['groups'].all():
current_groups.append(dict(group))
groupids.append(group.id)
self.groups=current_groups
self.groupids=groupids
current_departments=[]
departmentids=[]
for department in user['departments'].all():
current_departments.append(dict(department))
departmentids.append(department.id)
self.departments=current_departments
self.departmentids=departmentids
class UserCollection:
def __init__(self):
self.total=0
self.data=[]
self.success=False
self.pageNo=1
self.pageSize=current_app.config['PER_PAGE']
# original 原模型数据
def fill(self,original):
self.total=original['total']
self.data=[UserViewModel(user) for user in original['users']]
self.success=original['success']
self.pageNo=original['pageNo'] or 1
self.pageSize=original['pageSize'] or current_app.config['PER_PAGE']
| StarcoderdataPython |
3255543 | <filename>python_da/dsfs/mycode/ch10.py
from numpy.random import binomial
import pandas as pd
P_LUKE = 0.005
P_LEUKEMIA = 0.014
N = int(1e6)
df = pd.DataFrame({"lukes": [binomial(1, P_LUKE) for _ in range(N)],
"leukemia": [binomial(1, P_LEUKEMIA) for _ in range(N)]})
pd.crosstab(df["lukes"], df["leukemia"], margins=True)
def fit_scores(tp=None, fp=None, tn=None, fn=None):
accuracy = (tp + tn) / sum([tp, fp, tn, fn])
precision = tp / (tp + fp)
recall = tp / (tp + fn)
f1 = 2 * precision * recall / (precision + recall)
print(f"Accuracy: {accuracy}\nPrecision: {precision}\nRecall: {recall}\n" +
f"F1: {f1}")
return None
fit_scores(tp=60, fp=5013, tn=981017, fn=13910) | StarcoderdataPython |
3241525 | """
Created on Sat Dec 10 12:40:17 2017
@author: <NAME>
"""
def simHeatpump(T_cold, T_hot=50.0, efficiency=0.45, T_limit=-20.0, COP_limit=7.0):
"""
Creates a timedepent Coefficient of Performance (COP) based on the potential carnot
efficiency and a quality grade/efficiency of the system.
Parameters
-----------
T_cold: float, np.array or list, required
Outside temperature or in case of a ground source heat pump the ground temperature in degree C.
T_hot: float,np.array or list, optional (default: 50.0)
Temperature level in degree C at which the heat pump has to provide the heat.
efficiency: float, optional (default: 0.45) (Lauinger,2016)
Factor to multiplicate the carnot efficiency with.
T_limit: float, optional (default: -5)
Temperature at which the heat pump is shut off.
COP_limit: float, optional (default: 6.)
Maximum Coefficient of Performance that can be reached due to the
technical limitations of the system.
Returns
----------
cop: pandas.Series()
Time series of the Coefficient of Performance.
"""
# calculate timedependet COP
cop = efficiency * (T_hot + 273.15) / (T_hot - T_cold)
# limit too high COPs
if len(cop) > 1:
cop[cop > COP_limit] = COP_limit
# cut-off temperatures
cop[T_cold < T_limit] = 0.0
cop[T_cold > T_hot] = COP_limit
else:
cop = min(cop, COP_limit)
return cop
| StarcoderdataPython |
4802176 | <reponame>erialc-cal/NLP-FOMC
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 6 22:48:18 2021
@author: <NAME>
One thing you can do (in the meanwhile, before we get more into the LDA) is to see if it is doable to put in an excel or txt file the desired fund rates of FOMC members from the 70s to 1996. There is a book (attached) that reports the preferred rates of each committee members (according to their interpretation of the members' speeches)
1) The idea is to create a file with 4 columns (see screenshot uno.png)
2) First column is the member number (see file with numbers associated to the names)
3) Second column is the meeting number (see file as well)
4) Third column is the desired rate for all the members (the voting members and those who do not vote, called the "alternates".
5) The 5th column is the bias. After the desired fund, there are some letters BS, BE, A, etc. This is the so-called " bias" of a member (the bias is intended to give an indication of likely future policy moves: for instance, BE means that in the future the member expected "easing" of interest rate, BS meant that the bias was symmetric (neither easing or tightening).
The book gives more information (summary statistics), which we do not need.
"""
from PyPDF2 import PdfFileReader
#from PyPDF2 import PdfFileWriter
import os
from tqdm import trange
dir_name = os.path.dirname(__file__)
project_directory = dir_name
#%%
def pdf_to_txt(file):
with open(project_directory+'/transcript_files_pdf/'+str(date)+'meeting.pdf', "wb") as code:
code.write(r.content)
pdf_file = open(project_directory+'/transcript_files_pdf/'+str(date)+'meeting.pdf', 'rb')
try :
pdfReader = PdfFileReader(pdf_file)
count = pdfReader.numPages
output = []
for i in trange(count):
page = pdfReader.getPage(i)
output += page.extractText()
txtfile = open(project_directory+'/transcript_files_txt/'+str(date)+"meeting.txt","a")
txtfile.writelines(output)
except:
problem_date.append(date)
# Storing problematic dates
txtfile = open(project_directory+'/transcript_files_txt/problematic_dates.txt', 'a')
for elem in problem_date:
txtfile.writelines("\n"+elem)
# Storing non problematic dates
txtfile = open(project_directory+'/transcript_files_txt/scrapped_dates.txt', 'a')
date_new=[]
for elem in l_dates:
if elem not in problem_date:
date_new.append(elem)
txtfile.writelines("\n"+elem)
return date_new
#%%
| StarcoderdataPython |
3364247 | <filename>dbaccess/test_spi.py
# Read all temp sensors and write their datas into the table.
# Note:
# Added a multi-threaded function, but the reading of the 1WD seems to
# still just be taking ~900ms between samples and so you might hit a
# 1 second delay. So it helps, but not much to use a thread.
#
import sqlite3 as sql
import datetime, time
import os
import sys
from pathlib import Path
sys.path.append(str(Path(os.getcwd()).parent))
from sensors import DbMAX6675Sensor, DbSensorDatabase, \
DbBMP280TemperatureSensor, DbBMP280HumiditySensor, DbBMP280PressureSensor
from shared.thread.sensorthread import ThreadWithReturnValue
from shared.sensorbase.sensorbase import TempSensor
from shared.MAX6675.MAX6675 import MAX6675
from shared.BMP280.BMP280 import BMP280, BMP280H, BMP280P
from shared.sensor_config import SystemConfig
# temporary
from flasktest.raspberry.raspberry import is_raspberrypi
if is_raspberrypi():
import board
import busio
if __name__ == "__main__":
if is_raspberrypi():
# use the "live" DB on a ramdisk to save SD card
db = DbSensorDatabase(db_root_path=SystemConfig.ramdisk_path)
else:
db = DbSensorDatabase()
hw_sensors = []
db_sensors = {}
# start SPI
print(f"Starting SPI, lock bus...'")
spi_bus = busio.SPI(board.SCLK, board.MOSI, board.MISO)
while not spi_bus.try_lock():
pass
spi_bus.configure(baudrate=4000000) # 4MHz, chip can do 5MHz normally
# Note: will run fine at 100KHz
spi_bus.unlock()
thermocouple = MAX6675(spi_bus) # default CS GPIO 5
spi_addr = thermocouple.get_address()
print(f"MAX6675 address : {spi_addr}")
hw_sensors.append(thermocouple)
db_sensors[spi_addr] = DbMAX6675Sensor(db.get_connection(), spi_addr)
# temp_sensor = BMP280(spi_bus) # default CS GPIO 6
# spi_addr = temp_sensor.get_address()
# print(f"BMP280(T) address : {spi_addr}")
# hw_sensors.append(temp_sensor)
# db_sensors[spi_addr] = DbBMP280TemperatureSensor(db.get_connection(), spi_addr)
# pressure_sensor = BMP280P(spi_bus) # default CS GPIO 6
# spi_addr = pressure_sensor.get_address()
# print(f"BMP280(P) address : {spi_addr}")
# hw_sensors.append(pressure_sensor)
# db_sensors[spi_addr] = DbBMP280HumiditySensor(db.get_connection(), spi_addr)
# humidity_sensor = BMP280H(spi_bus) # default CS GPIO 6
# spi_addr = humidity_sensor.get_address()
# print(f"BMP280(P) address : {spi_addr}")
# hw_sensors.append(humidity_sensor)
# db_sensors[spi_addr] = DbBMP280PressureSensor(db.get_connection(), spi_addr)
while True:
ts = datetime.datetime.now().strftime("%Y/%m/%d %H:%M:%S")
#print(ts)
i=0
value = hw_sensors[i].get_sensor_value()
print(f"Sync read {hw_sensors[i].get_address():12} = {value}{hw_sensors[i].get_sensor_units()}")
# th = []
# for s in hw_sensors:
# th.append(ThreadWithReturnValue(target=s.get_sensor_value))
# th[len(th)-1].start()
# for i in range(0, len(hw_sensors)):
# #
# start = time.time()
# value = th[i].wait_result()
# end = time.time()
# print(f"Async read {hw_sensors[i].get_address():12} = {value}{hw_sensors[i].get_sensor_units()}")
# # write live value to database
# db_sensors[hw_sensors[i].get_address()].set(value)
#
| StarcoderdataPython |
1635042 | <gh_stars>0
#!/usr/bin/env python3
import sys
import shutil
import threading
import queue
from pathlib import Path
from pprint import pformat
from .manifest import Manifest
from .dependency import Dependency, sources_conflict_check
from .lock import LockFile
from .common import WitUserError, error
from .witlogger import getLogger
from .gitrepo import GitCommitNotFound
log = getLogger()
# TODO: hide the stacktrace when not debugging
class NotAncestorError(WitUserError):
def __init__(self, orig_child: Dependency, old_child: Dependency):
self.orig_child = orig_child
self.old_child = old_child
def __str__(self):
# orig is later in time because we traverse the queue backwards in time
assert self.orig_child.name == self.old_child.name
child_name = self.old_child.name
orig_parent = self.orig_child.dependents[0].dependents[0]
old_parent = self.old_child.dependents[0].dependents[0]
return ("\n\nAncestry error:\n"
"'{orig_parent_name}' and '{old_parent_name}' both depend on '{child_name}':\n"
" {orig_parent_tag} depends on "
"{orig_child_tag}\n"
" {old_parent_tag} depends on "
"{old_child_tag}\n\n"
"Although {orig_child_tag} is newer than "
"{old_child_tag},\n{orig_child_tag} is not "
"a descendent of {old_child_tag}.\n\n"
"Therefore, there is no guarantee that "
"the dependee needed by {old_parent_tag} will be satisfied "
"by the dependee needed by {orig_parent_tag}."
"".format(
orig_parent_name=orig_parent.name,
old_parent_name=old_parent.name,
orig_parent_tag=orig_parent.id(),
old_parent_tag=old_parent.id(),
child_name=child_name,
orig_child_tag=self.orig_child.id(),
old_child_tag=self.old_child.id(),
))
class PackageNotInWorkspaceError(WitUserError):
pass
class WorkSpace:
MANIFEST = "wit-workspace.json"
LOCK = "wit-lock.json"
def __init__(self, root, repo_paths, jobs=None):
self.root = root
self.repo_paths = repo_paths
self.manifest = self._load_manifest()
self.lock = self._load_lockfile()
self.jobs = jobs
def id(self):
return "[root]"
def get_id(self):
return "root"
@classmethod
def create(cls, name, repo_paths, jobs):
"""Create a wit workspace on disk with the appropriate json files"""
root = Path.cwd() / name
manifest_path = cls._manifest_path(root)
if root.exists():
log.info("Using existing directory [{}]".format(str(root)))
if manifest_path.exists():
log.error("Manifest file [{}] already exists.".format(manifest_path))
sys.exit(1)
else:
log.info("Creating new workspace [{}]".format(str(root)))
try:
root.mkdir()
except Exception as e:
log.error("Unable to create workspace [{}]: {}".format(str(root), e))
sys.exit(1)
dotwit = root/'.wit'
if dotwit.exists():
# we could keep the old cached repos, but if the user is explicitly re-initing,
# they probably want a 100% clean slate
shutil.rmtree(str(dotwit))
dotwit.mkdir()
manifest = Manifest([])
manifest.write(manifest_path)
lockfile = LockFile([])
lockfile.write(cls._lockfile_path(root))
return WorkSpace(root, repo_paths, jobs)
@classmethod
def restore(cls, root):
# constructing WorkSpace will parse the lock file
ws = WorkSpace(root, [])
def do_clone(pkg, root, errors):
try:
pkg.load(root, True)
pkg.checkout(root)
except Exception as e:
errors.put(e)
errors = queue.Queue()
threads = list()
for pkg in ws.lock.packages:
t = threading.Thread(target=do_clone, args=(pkg, root, errors))
threads.append(t)
t.start()
for t in threads:
t.join()
if not errors.empty():
while not errors.empty():
e = errors.get()
log.error("Unable to create workspace [{}]: {}".format(str(root), e))
sys.exit(1)
return ws
def _load_manifest(self):
return Manifest.read_manifest(self.manifest_path())
def _load_lockfile(self):
return LockFile.read(self.lockfile_path())
@classmethod
def _manifest_path(cls, root):
return root / cls.MANIFEST
def manifest_path(self):
return WorkSpace._manifest_path(self.root)
@classmethod
def _lockfile_path(cls, path):
return path / cls.LOCK
def lockfile_path(self):
return WorkSpace._lockfile_path(self.root)
@staticmethod
def find(start, repo_paths, jobs):
cwd = start.resolve()
for p in ([cwd] + list(cwd.parents)):
manifest_path = WorkSpace._manifest_path(p)
log.debug("Checking [{}]".format(manifest_path))
if Path(manifest_path).is_file():
log.debug("Found workspace at [{}]".format(p))
return WorkSpace(p, repo_paths, jobs)
raise FileNotFoundError("Couldn't find workspace file")
def resolve(self, download=False):
source_map, packages, queue = \
self.resolve_deps(self.root, self.repo_paths, download, {}, {}, [])
errors = list()
dep_errors = list()
while queue:
commit_time, dep = queue.pop()
log.debug("{} {}".format(commit_time, dep))
name = dep.package.name
if name in packages and packages[name].revision is not None:
package = packages[name]
if not package.repo.is_ancestor(dep.specified_revision, package.revision):
errors.append(NotAncestorError(package.find_matching_dependent(), dep))
continue
packages[dep.name] = dep.package
packages[dep.name].revision = dep.resolved_rev()
packages[dep.name].set_source(dep.source)
source_map, packages, queue, dep_errors = \
dep.resolve_deps(self.root, self.repo_paths, download, source_map,
packages, queue, self.jobs)
for pkg in packages.values():
if not pkg.repo or pkg.repo.path.parts[-2] == '.wit':
continue
used_commit = pkg.revision
fs_commit = pkg.repo.get_commit('HEAD')
if used_commit != fs_commit:
log.warn("using '{}' manifest instead of checked-out version of '{}'".format(
pkg.id(), pkg.name))
continue
if pkg.repo.modified_manifest():
log.warn("disregarding uncommitted changes to the '{}' manifest".format(pkg.name))
return packages, errors + dep_errors
def resolve_deps(self, wsroot, repo_paths, download, source_map, packages, queue):
source_map = source_map.copy()
queue = queue.copy()
for dep in self.manifest.dependencies:
dep.load(packages, repo_paths, wsroot, download)
sources_conflict_check(dep, source_map)
source_map[dep.name] = dep.source
commit_time = dep.get_commit_time()
queue.append((commit_time, dep))
queue.sort(key=lambda tup: tup[0])
return source_map, packages, queue
def checkout(self, packages):
lock_packages = []
for name in packages:
package = packages[name]
package.checkout(self.root)
lock_packages.append(package)
new_lock = LockFile(lock_packages)
new_lock_path = WorkSpace._lockfile_path(self.root)
new_lock.write(new_lock_path)
self.lock = new_lock
def add_dependency(self, tag) -> None:
""" Resolve a dependency then add it to the wit-workspace.json """
from .main import dependency_from_tag
dep = dependency_from_tag(self.root, tag)
if self.manifest.contains_dependency(dep.name):
error("Manifest already contains package {}".format(dep.name))
packages = {pkg.name: pkg for pkg in self.lock.packages}
dep.load(packages, self.repo_paths, self.root, True)
try:
dep.package.revision = dep.resolved_rev()
except GitCommitNotFound:
raise WitUserError("Could not find commit or reference '{}' in '{}'"
"".format(dep.specified_revision, dep.name))
assert dep.package.repo is not None
self.manifest.add_dependency(dep)
log.debug('my manifest_path = {}'.format(self.manifest_path()))
self.manifest.write(self.manifest_path())
log.info("The workspace now depends on '{}'".format(dep.package.id()))
def update_dependency(self, tag) -> None:
# init requested Dependency
from .main import dependency_from_tag
req_dep = dependency_from_tag(self.root, tag)
manifest_dep = self.manifest.get_dependency(req_dep.name)
# check if the package is missing from the wit-workspace.json
if manifest_dep is None:
log.error("Package {} not in wit-workspace.json".format(req_dep.name))
log.error("Did you mean to run 'wit add-pkg' or 'wit update-dep'?")
sys.exit(1)
# load their Package
packages = {pkg.name: pkg for pkg in self.lock.packages}
req_dep.load(packages, self.repo_paths, self.root, True)
manifest_dep.load(packages, self.repo_paths, self.root, True)
# check if the dependency is missing from disk
if req_dep.package.repo is None:
msg = "Cannot update package '{}'".format(req_dep.name)
if self.lock.contains_package(req_dep.name):
msg += (":\nAlthough '{}' exists (according to the wit-lock.json), "
"it has not been cloned to the root workspace.").format(req_dep.name)
else:
msg += "because it does not exist in the workspace."
raise PackageNotInWorkspaceError(msg)
try:
req_dep.package.revision = req_dep.resolved_rev()
except GitCommitNotFound:
raise WitUserError("Could not find commit or reference '{}' in '{}'"
"".format(req_dep.specified_revision, req_dep.name))
# compare the requested revision to the revision in the wit-workspace.json
if manifest_dep.resolved_rev() == req_dep.package.revision:
log.warn("Updating '{}' to the same revision it already is!".format(req_dep.name))
self.manifest.replace_dependency(req_dep)
self.manifest.write(self.manifest_path())
log.info("The workspace now depends on '{}'".format(req_dep.package.id()))
# if we differ from the lockfile, tell the user to update
if ((not self.lock.contains_package(req_dep.name) or
not self.lock.get_package(req_dep.name).revision == req_dep.package.revision)):
log.info("Don't forget to run 'wit update'!")
# Enable prettyish-printing of the class
def __repr__(self):
return pformat(vars(self), indent=4, width=1)
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
1636094 | <reponame>STARS4ALL/zptess
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright (c) 2021
#
# See the LICENSE file for details
# see the AUTHORS file for authors
# ----------------------------------------------------------------------
#--------------------
# System wide imports
# -------------------
import csv
import os
import os.path
import datetime
import logging
import zipfile
import traceback
# --------------------
# Third party packages
# --------------------
import requests
# -------------
# Local imports
# -------------
from zptess import TSTAMP_SESSION_FMT
from zptool.utils import paging, read_property
from zptool.summary import summary_number_of_sessions, summary_export, summary_sessions_iterable, summary_get_info
from zptool.rounds import rounds_export
from zptool.samples import samples_export
from zptool.emailer import email_send
log = logging.getLogger("zptool")
# ----------------
# Helper functions
# ----------------
def get_paths(directory):
'''Get all file paths in a list'''
file_paths = []
# crawling through directory and subdirectories
for root, directories, files in os.walk(directory):
root = os.path.basename(root) # Needs a change of cwd later on if we do this
log.debug("Exploring directory '{0}'".format(root))
for filename in files:
filepath = os.path.join(root, filename)
file_paths.append(filepath)
return file_paths
def pack(base_dir, zip_file):
'''Pack all files in the ZIP file given by options'''
paths = get_paths(base_dir)
log.info(f"Creating ZIP File: '{os.path.basename(zip_file)}'")
with zipfile.ZipFile(zip_file, 'w') as myzip:
for myfile in paths:
myzip.write(myfile)
def batch_view_iterable(connection):
cursor = connection.cursor()
cursor.execute("SELECT begin_tstamp, end_tstamp, calibrations, email_sent FROM batch_t ORDER BY begin_tstamp DESC")
return cursor
def check_open_batch(connection):
cursor = connection.cursor()
cursor.execute("SELECT count(*) FROM batch_t WHERE begin_tstamp IS NOT NULL AND end_tstamp IS NULL")
n = cursor.fetchone()[0]
if n > 0:
result = True
else:
result = False
return result
def get_timestamp():
return datetime.datetime.now(datetime.timezone.utc).replace(microsecond=0).strftime(TSTAMP_SESSION_FMT)
def get_open_batch(connection):
cursor = connection.cursor()
cursor.execute("SELECT begin_tstamp FROM batch_t WHERE end_tstamp IS NULL")
return cursor.fetchone()[0]
def update_email_state(connection, tstamp, flag):
cursor = connection.cursor()
row = {'tstamp': tstamp, 'flag': flag}
cursor.execute("UPDATE batch_t SET email_sent = :flag WHERE begin_tstamp = :tstamp", row)
connection.commit()
def insert_begin(connection, tstamp):
cursor = connection.cursor()
row = {'tstamp': tstamp}
cursor.execute("INSERT INTO batch_t(begin_tstamp, end_tstamp) VALUES(:tstamp, NULL)", row)
def insert_end(connection, begin_tstamp, end_tstamp, N):
cursor = connection.cursor()
row = {'end_tstamp': end_tstamp, 'N': N}
cursor.execute('''
UPDATE batch_t
SET end_tstamp = :end_tstamp, calibrations = :N
WHERE begin_tstamp = (SELECT begin_tstamp WHERE end_tstamp IS NULL)
''', row)
def batch_iterable(connection):
cursor = connection.cursor()
cursor.execute('''
SELECT begin_tstamp, end_tstamp, email_sent, calibrations
FROM batch_t
WHERE end_tstamp IS NOT NULL
ORDER BY begin_tstamp DESC
''')
return cursor.fetchall()
def batch_latest(connection):
cursor = connection.cursor()
cursor.execute('''
SELECT begin_tstamp, end_tstamp, email_sent, calibrations
FROM batch_t
WHERE end_tstamp IS NOT NULL
AND begin_tstamp = (SELECT MAX(begin_tstamp) FROM batch_t)
ORDER BY begin_tstamp DESC
''')
return cursor.fetchone()
def batch_specific(connection, tstamp):
cursor = connection.cursor()
row = {'begin_tstamp': tstamp}
cursor.execute('''
SELECT begin_tstamp, end_tstamp, email_sent, calibrations
FROM batch_t
WHERE begin_tstamp =: begin_tstamp
WHERE end_tstamp IS NOT NULL
ORDER BY begin_tstamp DESC
''', row)
return cursor.fetchone()
def batch_delete(connection, rows):
cursor = connection.cursor()
cursor.executemany('''
DELETE FROM batch_t
WHERE begin_tstamp = :begin_tstamp
AND end_tstamp = :end_tstamp
''', rows)
def batch_export(connection, batch, base_dir, updated, send_email):
begin_tstamp, end_tstamp, email_sent, calibrations = batch
log.info(f"(begin_tstamp, end_tstamp)= ({begin_tstamp}, {end_tstamp}, up to {calibrations} calibrations)")
suffix1 = f"from_{begin_tstamp}_to_{end_tstamp}".replace('-','').replace(':','')
export_dir = os.path.join(base_dir, suffix1)
os.makedirs(export_dir, exist_ok=True)
csv_path = os.path.join(export_dir, f"summary_{suffix1}.csv")
summary_export(
connection = connection,
extended = False,
updated = updated, # This should be true when sendimg email to people
csv_path = csv_path,
begin_tstamp = begin_tstamp,
end_tstamp = end_tstamp,
)
iterable = summary_sessions_iterable(connection, updated, begin_tstamp, end_tstamp)
for i, (session,) in enumerate(iterable):
log.info(f"Calibration {session} [{i+1}/{calibrations}] (updated = {bool(updated)})")
_, name, _ = summary_get_info(connection, session, 'test')
rounds_name = f"{name}_rounds_{session}.csv".replace('-','').replace(':','')
samples_name = f"{name}_samples_{session}.csv".replace('-','').replace(':','')
rounds_export(
connection = connection,
updated = updated, # This should be true when sendimg email to people
csv_path = os.path.join(export_dir, rounds_name),
session = session,
)
samples_export(
connection = connection,
session = session,
roun = None, # None is a marker for all rounds,
also_ref = True, # Include reference photometer samples
csv_path = os.path.join(export_dir, samples_name),
)
# Prepare a ZIP File
try:
prev_workdir = os.getcwd()
zip_file = os.path.join(base_dir, suffix1 + '.zip' )
os.chdir(base_dir)
pack(export_dir, zip_file)
except Exception as e:
log.error(f"excepcion {e}")
finally:
os.chdir(prev_workdir)
if not send_email:
return
if email_sent is None:
log.info("Never tried to send an email for this batch")
elif email_sent == 0:
log.info("Tried to send email for this batch previously but failed")
else:
log.info("Already sent an email for this batch")
# Test internet connectivity
try:
request = requests.get("http://www.google.com", timeout=5)
log.info("Connected to Internet")
except (requests.ConnectionError, requests.Timeout) as exception:
log.warning("No connection to internet. Stopping here")
return
# Check email configuration
config = dict()
missing = list()
smtp_keys = ("host", "port", "sender", "password", "receivers")
for key in smtp_keys:
try:
config[key] = read_property(connection, "smtp", key)
except Exception as e:
missing.append(key)
continue
if len(config) != len(smtp_keys):
log.error(f"Missing configuration: {missing}")
return
# Email ZIP File
try:
email_sent = 1
receivers = read_property(connection, "smtp","receivers")
email_send(
subject = f"[STARS4ALL] TESS calibration data from {begin_tstamp} to {end_tstamp}",
body = "Find attached hereafter the summary, rounds and samples from this calibration batch",
sender = config["sender"],
receivers = config["receivers"],
attachment = zip_file,
host = config["host"],
port = int(config["port"]),
password = config["password"],
)
except Exception as e:
# Mark fail in database
email_sent = 0
log.error(f"Exception while sending email: {e}")
print(traceback.format_exc())
else:
# Mark success in database
log.info(f"Mail succesfully sent.")
finally:
update_email_state(connection, begin_tstamp, email_sent)
# ================
# 'batch' commands
# ================
def begin(connection, options):
'''Exports all the database to a single file'''
if check_open_batch(connection):
log.error("A batch is already open")
return
insert_begin(connection, get_timestamp())
connection.commit()
log.info("A new batch has been opened")
def end(connection, options):
'''Exports all the database to a single file'''
if not check_open_batch(connection):
log.error("There is no open batch to close")
return
begin_tstamp = get_open_batch(connection)
end_tstamp = get_timestamp()
N = summary_number_of_sessions(connection, begin_tstamp, end_tstamp)
insert_end(connection, begin_tstamp, end_tstamp, N)
connection.commit()
log.info("Current open batch has been closed")
def view(connection, options):
'''Exports all the database to a single file'''
HEADERS = ("Begin (UTC)","End (UTC)","# Sessions","Emailed?")
cursor = batch_view_iterable(connection)
paging(cursor, HEADERS, size=100)
def purge(connection, options):
'''Exports all the database to a single file'''
batches = list()
for begin_tstamp, end_tstamp, _, _ in batch_iterable(connection):
n = summary_number_of_sessions(connection, begin_tstamp, end_tstamp)
if n == 0:
batches.append({'begin_tstamp': begin_tstamp, 'end_tstamp': end_tstamp})
log.info(f"purging {len(batches)} batches with unreferenced calibration sessions")
batch_delete(connection, batches)
connection.commit()
def export(connection, options):
if options.latest:
if check_open_batch(connection):
log.error("A batch is already open, close it first to export")
return
batch = batch_latest(connection)
if not batch:
log.error("No batches registered")
return
batch_export(connection, batch, options.base_dir, options.updated, options.email)
elif options.begin_date:
batch = batch_specific(connection, options.begin_date)
if not batch:
log.error(f"No batches registered with staring date {options.begin_date}")
return
batch_export(connection, batch, options.base_dir, options.updated, options.email)
else:
for begin_tstamp, end_tstamp in batch_iterable(connection):
batch_export(connection, batch, options.base_dir, options.updated, options.email)
| StarcoderdataPython |
4831282 | # -*- coding: utf-8 -*-
import colorama
import os
from datetime import datetime
class Output(object):
""" Manages the output, either to the stdout or the file. """
def __init__(self, results: dict, no_colors: bool):
self.results = results
self.no_colors = no_colors
if not no_colors:
colorama.init()
# Fix duplicates
for name in self.results:
self.results[name] = list(set(self.results[name]))
def output_results(self):
for name, values in self.results.items():
if len(values) == 0:
self.print_message("[-] Did not find %s" % name, False)
self.print_message("Found %s at %s" % (
name,
', '.join(hex(r) for r in values)
), True)
def print_message(self, message: str, severity=None) -> None:
if severity:
prefix = "[+] "
color_code = colorama.Fore.LIGHTGREEN_EX
if severity == False:
prefix = "[-] "
color_code = colorama.Fore.LIGHTRED_EX
if severity == None or self.no_colors:
prefix = ""
color_code = ""
print(color_code + "%s%s" % (prefix, message) + colorama.Style.RESET_ALL)
def save_results(self, config: dict, file: object):
if len(self.results) == 0:
return
lines = []
for name, values in self.results.items():
# Do not output not-found results
if len(values) == 0:
continue
for index, value in enumerate(values):
line = config["pattern"]
line = line.replace("{name}", name)
line = line.replace("{value}", str(hex(value)))
if index == 0 and len(values) > 1 and config["warn_on_mismatch"]:
line += " %s %s" % (config["comment_prefix"], "Warning: possible mismatch")
if index > 0 and config["output_mismatch"]:
line = ("%s " % config["comment_prefix"]) + line
lines.append(line)
# Write to file
file.write("%s Generated by straverse at %s\n" % (
config["comment_prefix"],
datetime.now().isoformat()
))
for line in lines:
file.write(line + '\n')
| StarcoderdataPython |
3327198 | <reponame>rootart/innerpoint
from rest_framework import serializers
from rest_framework_gis.fields import GeometryField
class RandomPointSerializer(serializers.Serializer):
name = serializers.CharField()
iso_2_digit = serializers.CharField()
iso_3_digit = serializers.CharField()
point = GeometryField(source='get_random_geometry_point')
| StarcoderdataPython |
4815039 | <reponame>johntiger1/blog-posts<filename>scripts/utils.py
"""
Plots Bandit Algorithms performance.
"""
import matplotlib.pyplot as plt
import numpy as np
from bandit_algorithms.epsilon_greedy.epsilon_greedy_algorithm import (
EpsilonGreedy,
AnnealingEpsilonGreedy
)
from bandit_algorithms.softmax.softmax_algorithm import (Softmax,
AnnealingSoftmax)
from bandit_algorithms.upper_confidence_bound.ucb import UCB
from testing.test_bandit_algorithms import BernoulliArm, test_algorithm
ALGORITHMS = {
"epsilon-Greedy": EpsilonGreedy,
"Softmax": Softmax,
"Annealing epsilon-Greedy": AnnealingEpsilonGreedy,
"Annealing Softmax": AnnealingSoftmax,
"UCB": UCB
}
def plot_algorithm(
alg_name="epsilon-Greedy", arms=None, best_arm_index=None,
hyper_params=None, num_simulations=1000, horizon=100, label=None,
fig_size=(18, 6)):
# Check if the algorithm doesn't have hyperparameter
if hyper_params is None:
# Run the algorithm
algo = ALGORITHMS[alg_name]()
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo, arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics of the algorithm
fig, axes = plt.subplots(1, 3, figsize=fig_size)
axes[0].plot(average_probs)
axes[0].set_xlabel("Time", fontsize=14)
axes[0].set_ylabel("Probability of Selecting Best Arm", fontsize=14)
axes[0].set_title(
f"Accuray of {alg_name} alg.", y=1.05, fontsize=16)
axes[0].set_ylim([0, 1.05])
axes[1].plot(average_rewards)
axes[1].set_xlabel("Time", fontsize=14)
axes[1].set_ylabel("Average Reward", fontsize=14)
axes[1].set_title(
f"Avg. Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[1].set_ylim([0, 1.0])
axes[2].plot(cum_rewards)
axes[2].set_xlabel("Time", fontsize=14)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=14)
axes[2].set_title(
f"Cumulative Rewards of {alg_name} alg.", y=1.05, fontsize=16)
plt.tight_layout()
else:
fig, axes = plt.subplots(1, 3, figsize=fig_size)
for hyper_param in hyper_params:
# Run the algorithm
algo = ALGORITHMS[alg_name](hyper_param)
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo, arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics of the algorithm
axes[0].plot(average_probs, label=f"{label} = {hyper_param}")
axes[0].set_xlabel("Time", fontsize=14)
axes[0].set_ylabel(
"Probability of Selecting Best Arm", fontsize=14)
axes[0].set_title(
f"Accuray of {alg_name} alg.", y=1.05, fontsize=16)
axes[0].legend()
axes[0].set_ylim([0, 1.05])
axes[1].plot(average_rewards, label=f"{label} = {hyper_param}")
axes[1].set_xlabel("Time", fontsize=14)
axes[1].set_ylabel("Average Reward", fontsize=14)
axes[1].set_title(
f"Avg. Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[1].legend()
axes[1].set_ylim([0, 1.0])
axes[2].plot(cum_rewards, label=f"{label} = {hyper_param}")
axes[2].set_xlabel("Time", fontsize=14)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=14)
axes[2].set_title(
f"Cumulative Rewards of {alg_name} alg.", y=1.05, fontsize=16)
axes[2].legend(loc="lower right")
plt.tight_layout()
def compare_algorithms(
algorithms=None, arms=None, best_arm_index=None, num_simulations=1000,
horizon=100, fig_size=(18, 6)):
fig, axes = plt.subplots(1, 3, figsize=(16, 6))
# Loop over all algorithms
for algorithm in algorithms:
# Run the algorithm
algo = ALGORITHMS[algorithm]
chosen_arms, average_rewards, cum_rewards = test_algorithm(
algo(), arms, num_simulations, horizon)
average_probs = np.where(chosen_arms == best_arm_index, 1, 0).sum(
axis=0) / num_simulations
# Plot the 3 metrics
axes[0].plot(average_probs, label=algo.__name__)
axes[0].set_xlabel("Time", fontsize=12)
axes[0].set_ylabel("Probability of Selecting Best Arm", fontsize=12)
axes[0].set_title(
f"Accuray of Different Algorithms", y=1.05, fontsize=14)
axes[0].set_ylim([0, 1.05])
axes[0].legend(loc="lower right")
axes[1].plot(average_rewards, label=algo.__name__)
axes[1].set_xlabel("Time", fontsize=12)
axes[1].set_ylabel("Average Reward", fontsize=12)
axes[1].set_title(
f"Average Rewards of Different Algorithms", y=1.05, fontsize=14)
axes[1].set_ylim([0, 1.0])
axes[1].legend(loc="lower right")
axes[2].plot(cum_rewards, label=algo.__name__)
axes[2].set_xlabel("Time", fontsize=12)
axes[2].set_ylabel("Cumulative Rewards of Chosen Arm", fontsize=12)
axes[2].set_title(
f"Cumulative Rewards of Different Algorithms", y=1.05, fontsize=14)
axes[2].legend(loc="lower right")
plt.tight_layout()
| StarcoderdataPython |
3324126 | <filename>f_TIC_TAC_TOE/c_human_agent.py
class Human_Agent:
def __init__(self, name, env):
self.name = name
self.env = env
def get_action(self, current_state):
available_actions_ids = current_state.get_available_actions()
valid_action_id = False
action_id = None
while not valid_action_id:
action_id = int(input(
"9개 셀 중 하나를 선택하세요 (보드 상단부터 숫자 키패드와 매칭하여 "
"[7,8,9, 4,5,6, 1,2,3] 숫자 중 하나를 선택하고 엔터를 누르세요)"
))
if action_id > 9 or action_id < 0:
print("[입력 오류: {0}] 1부터 9사이의 숫자 값을 입력하세요.".format(
action_id
))
continue
if action_id not in available_actions_ids:
print("[입력 오류: {0}] 유효한 셀을 선택하세요.".format(action_id))
else:
valid_action_id = True
return action_id | StarcoderdataPython |
61248 | <filename>trainingset_tools.py
import random
import pickle
import pymongo
##############################################################
def get_gabra_word_groups():
'''
Create a list of words obtained from a loaded Gabra MongoDB database and group them by lemma. Caches result into a pickle to avoid using the MongoDB database again.
If you already have gabra.pkl available then you do not need to load the MongoDB database.
Word groups list consists of the following tuples:
[
(
lemma e.g. "kiser",
root e.g. "k-s-r",
wordforms e.g. [ "ksirt", "kiser", "kisret", ... ]
),
...
]
'''
try:
with open("gabra.pkl", "rb") as f:
return pickle.load(f)
except:
pass
#To create a MongoDB instance with the Gabra dump:
#download tar file from http://mlrs.research.um.edu.mt/resources/gabra-api/download and extract it into a folder X
#in X create a folder called "data" next to "tmp"
#open a cmd, change directory to X and load a mongodb instance using mongod --dbpath data
#open another cmd, change directory to X\tmp and restore the dump to the database in "data" using mongorestore -d gabra --port 27017 gabra
db = pymongo.MongoClient()
invalid_vowel_pairs = { x+y for x in "aeiou" for y in "aeiou" } - { "ie", "oe", "ea", "ao", "oa", "eo" }
is_valid_word = lambda word:not any(word[i:i+2] in invalid_vowel_pairs for i in range(len(word)-1)) and word.islower() and word.isalpha()
is_valid_lexeme_doc = lambda lexeme:"lemma" in lexeme and not ("pending" in lexeme and lexeme["pending"]) and is_valid_word(lexeme["lemma"])
added_roots = set()
word_groups = []
for lexeme in db["gabra"]["lexemes"].find():
if not is_valid_lexeme_doc(lexeme):
continue
lexeme_id = lexeme["_id"]
lemma = lexeme["lemma"]
if "root" in lexeme and lexeme["root"] is not None and "radicals" in lexeme["root"]:
root = lexeme["root"]["radicals"]
if root in added_roots:
continue
else:
added_roots.add(root)
alternative_lemmas = { #all lemmas with same root
(alt_lexeme["_id"], alt_lexeme["lemma"])
for alt_lexeme in db["gabra"]["lexemes"].find({"root.radicals":root})
if is_valid_lexeme_doc(alt_lexeme)
}
(lexeme_id, lemma) = min(alternative_lemmas, key=lambda x:len(x[1])) #use shortest lemma of alternatives to represent all lemmas
wordforms = { #unify all word forms of all alternative lemmas
wordform["surface_form"]
for (alt_lexeme_id, alt_lemma) in alternative_lemmas
for wordform in db["gabra"]["wordforms"].find({"lexeme_id":alt_lexeme_id})
if is_valid_word(wordform["surface_form"])
}
else:
root = ""
wordforms = { #get all word forms of lemma
wordform["surface_form"]
for wordform in db["gabra"]["wordforms"].find({"lexeme_id":lexeme_id})
if is_valid_word(wordform["surface_form"])
}
if len(wordforms) < 3:
continue
word_groups.append((lemma, root, sorted(wordforms)))
word_groups.sort()
with open("gabra.pkl", "wb") as f:
pickle.dump(word_groups, f)
return word_groups
##############################################################
def create_raw_trainingset():
'''
Generate random (based on seed) sample of word groups and split them into two equal halves of 100 groups each for two separate text files called "trainingset1.txt" and "trainingset2.txt".
The idea is to have a text file of words which can be manually split into stems and affixes using a text editor.
The two files are used for training and validation.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
with open("trainingset%s.txt"%(i+1,), "w", encoding="utf-8") as f:
for (lemma, root, wordforms) in selected_word_groups:
for word in wordforms:
print(word, file=f)
print("", file=f)
##############################################################
def get_trainingset_roots():
'''
Take the training sets generated by the previous function and display their roots in order to help decide where the segmentation should be applied.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
print("trainingset%s.txt"%(i+1,))
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
for (lemma, root, wordforms) in selected_word_groups:
print(lemma, root)
print()
##############################################################
def validate_trainingset():
'''
Validate the manually segmented words in trainingset1.txt and trainingset2.txt.
The following validations are applied:
Check that, ignoring segmentation, the files still contain the same words generated in create_raw_trainingset().
Check that there are exactly 3 segments (separated by a "-") in each word.
Check that the stem (middle segment) does not end in a vowel as it was observed that no stem in Maltese ends in a vowel.
'''
random.seed(seed)
pre_selected_word_groups = random.sample(word_groups, 200)
random.shuffle(pre_selected_word_groups)
for i in range(2):
print("trainingset%s.txt"%(i+1,))
selected_word_groups = pre_selected_word_groups[100*(i+0):100*(i+1)]
selected_word_groups.sort()
originals = [ word for (lemma, root, wordforms) in selected_word_groups for word in wordforms+[ "" ] ]
with open("trainingset%s.txt"%(i+1,), "r", encoding="utf-8") as f:
for (line, original) in zip(f, originals):
line = line.strip("\r\n")
if line.replace("-", "") != original:
print("corrupted word", line, "should be", original)
break #break as a corrupted word might be caused by a missing or extra line which would shift all following words making them all appear corrupted.
elif line != "":
if line.count("-") != 2:
print("segments", line)
else:
(prefix, stem, suffix) = line.split("-")
if stem[-1] in { "a", "e", "i", "o", "u" }:
print("vowel", line)
print()
##############################################################
#obtain word groups from MongoDB database or cached gabra.pkl
word_groups = get_gabra_word_groups()
seed = 1
#uncomment the function call you want to execute
#create_raw_trainingset()
#get_trainingset_roots()
validate_trainingset()
| StarcoderdataPython |
19238 | from __future__ import absolute_import
import six
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.organization import OrganizationEndpoint
from sentry.api.serializers import serialize
from sentry.models import Project, Team
from sentry.utils.apidocs import scenario, attach_scenarios
@scenario('ListOrganizationProjects')
def list_organization_projects_scenario(runner):
runner.request(
method='GET',
path='/organizations/%s/projects/' % runner.org.slug
)
class OrganizationProjectsEndpoint(OrganizationEndpoint):
doc_section = DocSection.ORGANIZATIONS
@attach_scenarios([list_organization_projects_scenario])
def get(self, request, organization):
"""
List an Organization's Projects
```````````````````````````````
Return a list of projects bound to a organization.
:pparam string organization_slug: the slug of the organization for
which the projects should be listed.
:auth: required
"""
if request.auth and not request.user.is_authenticated():
# TODO: remove this, no longer supported probably
if hasattr(request.auth, 'project'):
team_list = [request.auth.project.team]
project_list = [request.auth.project]
elif request.auth.organization is not None:
org = request.auth.organization
team_list = list(Team.objects.filter(
organization=org,
))
project_list = list(Project.objects.filter(
team__in=team_list,
).order_by('name'))
else:
return Response({'detail': 'Current access does not point to '
'organization.'}, status=400)
else:
team_list = list(request.access.teams)
project_list = list(Project.objects.filter(
team__in=team_list,
).order_by('name'))
team_map = {
d['id']: d
for d in serialize(team_list, request.user)
}
context = []
for project, pdata in zip(project_list, serialize(project_list, request.user)):
assert six.text_type(project.id) == pdata['id']
pdata['team'] = team_map[six.text_type(project.team_id)]
context.append(pdata)
return Response(context)
| StarcoderdataPython |
75262 | import json
import logging
import re
import sys
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.middleware.gzip import GZipMiddleware
from fastapi.openapi.utils import get_openapi
from fastapi.responses import JSONResponse
from timvt.db.catalog import table_index
from timvt.db.events import close_db_connection, connect_to_db
from timvt.endpoints import tiles, demo, index
from timvt.models.metadata import TableMetadata
from .routers.titiler_router import router as cogrouter
from .routers.attribute_router import router as attribute_router
from typing import List
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
app = FastAPI(docs_url="/")
app.add_middleware(CORSMiddleware, allow_origins=["*"])
app.add_middleware(GZipMiddleware, minimum_size=0)
@app.on_event("startup")
async def startup_event():
"""
Application startup:
register the database connection and create table list.
"""
await connect_to_db(app)
# Fetch database table list
app.state.Catalog = await table_index(app.state.pool)
@app.on_event("shutdown")
async def shutdown_event():
"""Application shutdown: de-register the database connection."""
await close_db_connection(app)
app.include_router(
attribute_router, prefix="/vector", tags=['Tiles']
)
app.include_router(
demo.router, prefix="/vector",
)
app.include_router(
tiles.router, prefix="/vector",
)
app.include_router(
index.router, prefix="/vector",
)
app.include_router(
cogrouter, prefix="/cog", tags=['Raster Tiles (COG)']
)
@app.get(
"/vector/index",
response_model=List[TableMetadata],
tags=["Tiles"],
description="Reindex the Vector Data Catalog.",
)
async def reindex(
request: Request
):
"""Force a reindex of the catalog."""
request.app.state.Catalog = await table_index(request.app.state.pool)
return [
TableMetadata(
**table,
link=request.url_for("tilejson", table=table["id"])
)
for table in request.app.state.Catalog
]
# remove "/tiles/{identifier}/{table}/{z}/{x}/{y}.pbf" endpoint
for r in app.routes:
if r.path == "/vector/":
app.routes.remove(r)
# TODO: remove when https://github.com/developmentseed/titiler/pull/46 is merged
@app.middleware("http")
async def remove_memcached_middleware(request: Request, call_next):
"""
Remove memcached layer from titiler (quick and dirty approach)
Note: This could effect any other routes that happen to use state.cache,
which could be bad. timvt does not reference a cache state.
"""
request.state.cache = None
return await call_next(request)
@app.get("/RiskSchema.json", tags=["Risk Schema"], summary="Risk Schema")
async def root(request: Request) -> JSONResponse:
with open("app/templates/RiskSchema.json", "r") as f:
content = json.loads(f.read())
response = JSONResponse(content=content, status_code=200)
return response
def custom_openapi(openapi_prefix: str):
if app.openapi_schema:
return app.openapi_schema
o = get_openapi(
title="World Bank Covid API",
version="0.1",
description="API for World Bank Covid 19 Project",
routes=app.routes,
openapi_prefix=openapi_prefix,
)
cat = app.state.Catalog
tables_schema = {"title": "Table", "enum": [r["table"] for r in cat]}
# raise Exception(o['paths'].keys())
for path in o["paths"].values():
get = path.get("get")
if get is not None:
summary = get.get("summary", None)
tags = get.get("tags", None)
parameters = get.get("parameters", None)
if summary == "Demo":
get["summary"] = "Vector Tile Simple Viewer"
get["tags"] = ["Vector Tile API"]
if summary == "Display Index":
get["summary"] = "Available Layer Metadata"
get["tags"] = ["Vector Tile API"]
if "Tiles" in tags:
get["tags"] = ["Vector Tile API"]
if parameters is not None:
for param in parameters:
if param.get("description") == "Table Name":
param["schema"] = tables_schema
app.openapi_schema = o
return app.openapi_schema
app.openapi = custom_openapi
| StarcoderdataPython |
1736161 | <gh_stars>0
import logging
from typing import Iterable, Optional
from dvc.exceptions import InvalidArgumentError
from dvc.repo import locked
from dvc.repo.experiments.base import UnchangedExperimentError
logger = logging.getLogger(__name__)
def _parse_params(path_params: Iterable):
from ruamel.yaml import YAMLError
from dvc.dependency.param import ParamsDependency
from dvc.utils.serialize import loads_yaml
ret = {}
for path_param in path_params:
path, _, params_str = path_param.rpartition(":")
# remove empty strings from params, on condition such as `-p "file1:"`
params = {}
for param_str in filter(bool, params_str.split(",")):
try:
# interpret value strings using YAML rules
key, value = param_str.split("=")
params[key] = loads_yaml(value)
except (ValueError, YAMLError):
raise InvalidArgumentError(
f"Invalid param/value pair '{param_str}'"
)
if not path:
path = ParamsDependency.DEFAULT_PARAMS_FILE
ret[path] = params
return ret
@locked
def run(
repo,
target: Optional[str] = None,
params: Optional[Iterable] = None,
run_all: Optional[bool] = False,
jobs: Optional[int] = 1,
**kwargs,
) -> dict:
"""Reproduce the specified target as an experiment.
Accepts the same additional kwargs as Repo.reproduce.
Returns a dict mapping new experiment SHAs to the results
of `repro` for that experiment.
"""
if run_all:
return repo.experiments.reproduce_queued(jobs=jobs)
if params:
params = _parse_params(params)
else:
params = []
try:
return repo.experiments.reproduce_one(
target=target, params=params, **kwargs
)
except UnchangedExperimentError:
# If experiment contains no changes, just run regular repro
kwargs.pop("queue", None)
kwargs.pop("checkpoint_resume", None)
return {None: repo.reproduce(target=target, **kwargs)}
| StarcoderdataPython |
4834322 | STAT_STAGE_MULTIPLIERS = {
-6: 2 / 8,
-5: 2 / 7,
-4: 2 / 6,
-3: 2 / 5,
-2: 2 / 4,
-1: 2 / 3,
0: 2 / 2,
1: 3 / 2,
2: 4 / 2,
3: 5 / 2,
4: 6 / 2,
5: 7 / 2,
6: 8 / 2,
}
MOVE_META_CATEGORIES = [
"Inflicts damage",
"No damage; inflicts status ailment",
"No damage; lowers target's stats or raises user's stats",
"No damage; heals the user",
"Inflicts damage; inflicts status ailment",
"No damage; inflicts status ailment; raises target's stats",
"Inflicts damage; lowers target's stats",
"Inflicts damage; raises user's stats",
"Inflicts damage; absorbs damage done to heal the user",
"One-hit KO",
"Effect on the whole field",
"Effect on one side of the field",
"Forces target to switch out",
"Unique effect",
]
MOVE_AILMENTS = {
-1: "????",
0: "none",
1: "Paralysis",
2: "Sleep",
3: "Freeze",
4: "Burn",
5: "Poison",
6: "Confusion",
7: "Infatuation",
8: "Trap",
9: "Nightmare",
12: "Torment",
13: "Disable",
14: "Yawn",
15: "Heal Block",
17: "No type immunity",
18: "Leech Seed",
19: "Embargo",
20: "Perish Song",
21: "Ingrain",
24: "Silence",
}
TYPES = [
None,
"Normal",
"Fighting",
"Flying",
"Poison",
"Ground",
"Rock",
"Bug",
"Ghost",
"Steel",
"Fire",
"Water",
"Grass",
"Electric",
"Psychic",
"Ice",
"Dragon",
"Dark",
"Fairy",
"???",
"Shadow",
]
MOVE_TARGETS = [
None,
"One specific move. How this move is chosen depends upon on the move being used.",
"One other Pokémon on the field, selected by the trainer. Stolen moves reuse the same target.",
"The user's ally (if any).",
"The user's side of the field. Affects the user and its ally (if any).",
"Either the user or its ally, selected by the trainer.",
"The opposing side of the field. Affects opposing Pokémon.",
"The user.",
"One opposing Pokémon, selected at random.",
"Every other Pokémon on the field.",
"One other Pokémon on the field, selected by the trainer.",
"All opposing Pokémon.",
"The entire field. Affects all Pokémon.",
"The user and its allies.",
"Every Pokémon on the field.",
]
DAMAGE_CLASSES = [None, "Status", "Physical", "Special"]
TYPE_EFFICACY = [
None,
[None, 1, 1, 1, 1, 1, 0.5, 1, 0, 0.5, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[None, 2, 1, 0.5, 0.5, 1, 2, 0.5, 0, 2, 1, 1, 1, 1, 0.5, 2, 1, 2, 0.5],
[None, 1, 2, 1, 1, 1, 0.5, 2, 1, 0.5, 1, 1, 2, 0.5, 1, 1, 1, 1, 1],
[None, 1, 1, 1, 0.5, 0.5, 0.5, 1, 0.5, 0, 1, 1, 2, 1, 1, 1, 1, 1, 2],
[None, 1, 1, 0, 2, 1, 2, 0.5, 1, 2, 2, 1, 0.5, 2, 1, 1, 1, 1, 1],
[None, 1, 0.5, 2, 1, 0.5, 1, 2, 1, 0.5, 2, 1, 1, 1, 1, 2, 1, 1, 1],
[None, 1, 0.5, 0.5, 0.5, 1, 1, 1, 0.5, 0.5, 0.5, 1, 2, 1, 2, 1, 1, 2, 0.5],
[None, 0, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0.5, 1],
[None, 1, 1, 1, 1, 1, 2, 1, 1, 0.5, 0.5, 0.5, 1, 0.5, 1, 2, 1, 1, 2],
[None, 1, 1, 1, 1, 1, 0.5, 2, 1, 2, 0.5, 0.5, 2, 1, 1, 2, 0.5, 1, 1],
[None, 1, 1, 1, 1, 2, 2, 1, 1, 1, 2, 0.5, 0.5, 1, 1, 1, 0.5, 1, 1],
[None, 1, 1, 0.5, 0.5, 2, 2, 0.5, 1, 0.5, 0.5, 2, 0.5, 1, 1, 1, 0.5, 1, 1],
[None, 1, 1, 2, 1, 0, 1, 1, 1, 1, 1, 2, 0.5, 0.5, 1, 1, 0.5, 1, 1],
[None, 1, 2, 1, 2, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 0.5, 1, 1, 0, 1],
[None, 1, 1, 2, 1, 2, 1, 1, 1, 0.5, 0.5, 0.5, 2, 1, 1, 0.5, 2, 1, 1],
[None, 1, 1, 1, 1, 1, 1, 1, 1, 0.5, 1, 1, 1, 1, 1, 1, 2, 1, 0],
[None, 1, 0.5, 1, 1, 1, 1, 1, 2, 1, 1, 1, 1, 1, 2, 1, 1, 0.5, 0.5],
[None, 1, 2, 1, 0.5, 1, 1, 1, 1, 0.5, 0.5, 1, 1, 1, 1, 1, 2, 2, 1],
] | StarcoderdataPython |
3351574 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
urlpatterns = patterns('userprofiles.contrib.accountverification.views',
url(r'^(?P<activation_key>\w+)/$', 'registration_activate',
name='userprofiles_registration_activate'),
)
| StarcoderdataPython |
4825002 | <reponame>imduffy15/python-androidtv<gh_stars>0
"""Constants used throughout the code.
**Links**
* `ADB key event codes <https://developer.android.com/reference/android/view/KeyEvent>`_
* `MediaSession PlaybackState property <https://developer.android.com/reference/android/media/session/PlaybackState.html>`_
"""
import re
# Intents
INTENT_LAUNCH = "android.intent.category.LAUNCHER"
INTENT_HOME = "android.intent.category.HOME"
# echo '1' if the previous shell command was successful
CMD_SUCCESS1 = r" && echo -e '1\c'"
# echo '1' if the previous shell command was successful, echo '0' if it was not
CMD_SUCCESS1_FAILURE0 = r" && echo -e '1\c' || echo -e '0\c'"
#: Get the audio state
CMD_AUDIO_STATE = r"dumpsys audio | grep paused | grep -qv 'Buffer Queue' && echo -e '1\c' || (dumpsys audio | grep started | grep -qv 'Buffer Queue' && echo '2\c' || echo '0\c')"
#: Determine whether the device is awake
CMD_AWAKE = "dumpsys power | grep mWakefulness | grep -q Awake"
#: Get the current app
CMD_CURRENT_APP = "CURRENT_APP=$(dumpsys window windows | grep mCurrentFocus) && CURRENT_APP=${CURRENT_APP#*{* * } && CURRENT_APP=${CURRENT_APP%%/*} && echo $CURRENT_APP"
#: Get the current app for a Google TV device
CMD_CURRENT_APP_GOOGLE_TV = "CURRENT_APP=$(dumpsys activity a . | grep -E 'mResumedActivity' | cut -d ' ' -f 8) && CURRENT_APP=${CURRENT_APP%%/*} && echo $CURRENT_APP"
#: Get the HDMI input
CMD_HDMI_INPUT = "dumpsys activity starter | grep -o 'HDMIInputService\\/HW[0-9]' -m 1 | grep -o 'HW[0-9]'"
#: Launch an app if it is not already the current app
CMD_LAUNCH_APP = "CURRENT_APP=$(dumpsys window windows | grep mCurrentFocus) && CURRENT_APP=${{CURRENT_APP#*{{* * }} && CURRENT_APP=${{CURRENT_APP%%/*}} && if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
#: Launch an app if it is not already the current app (for Google TV devices)
CMD_LAUNCH_APP_GOOGLE_TV = "CURRENT_APP=$(dumpsys activity a . | grep -E 'mResumedActivity' | cut -d ' ' -f 8) && CURRENT_APP=${{CURRENT_APP%%/*}} && if [ $CURRENT_APP != '{0}' ]; then monkey -p {0} -c " + INTENT_LAUNCH + " --pct-syskeys 0 1; fi"
#: Get the state from ``dumpsys media_session``; this assumes that the variable ``CURRENT_APP`` has been defined
CMD_MEDIA_SESSION_STATE = "dumpsys media_session | grep -A 100 'Sessions Stack' | grep -A 100 $CURRENT_APP | grep -m 1 'state=PlaybackState {'"
#: Determine the current app and get the state from ``dumpsys media_session``
CMD_MEDIA_SESSION_STATE_FULL = CMD_CURRENT_APP + " && " + CMD_MEDIA_SESSION_STATE
#: Get the apps for an Android TV device
CMD_RUNNING_APPS_ANDROIDTV = "pm list packages | awk -F : '{print $2}'"
#: Get the apps for a Fire TV device
CMD_RUNNING_APPS_FIRETV = "pm list packages | awk -F : '{print $2}'"
#: Determine if the device is on
CMD_SCREEN_ON = "(dumpsys power | grep 'Display Power' | grep -q 'state=ON' || dumpsys power | grep -q 'mScreenOn=true')"
#: Get the "STREAM_MUSIC" block from ``dumpsys audio``
CMD_STREAM_MUSIC = r"dumpsys audio | grep '\- STREAM_MUSIC:' -A 11"
#: Get the wake lock size
CMD_WAKE_LOCK_SIZE = "dumpsys power | grep Locks | grep 'size='"
#: Get the properties for an Android TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for an Android TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for an Android TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for an Android TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_ANDROIDTV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Google TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for a Google TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Google TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC + " && " + CMD_RUNNING_APPS_ANDROIDTV
#: Get the properties for a Google TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.androidtv.androidtv_sync.AndroidTVSync.get_properties` and :py:meth:`androidtv.androidtv.androidtv_async.AndroidTVAsync.get_properties`
CMD_GOOGLE_TV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && (" + CMD_AUDIO_STATE + ") && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP_GOOGLE_TV + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_STREAM_MUSIC
#: Get the properties for a Fire TV device (``lazy=True, get_apps=True``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_RUNNING_APPS_FIRETV
#: Get the properties for a Fire TV device (``lazy=True, get_apps=False``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1 + " && " + CMD_AWAKE + CMD_SUCCESS1 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo)"
#: Get the properties for a Fire TV device (``lazy=False, get_apps=True``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_NOT_LAZY_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo) && " + CMD_RUNNING_APPS_FIRETV
#: Get the properties for a Fire TV device (``lazy=False, get_apps=False``); see :py:meth:`androidtv.firetv.firetv_sync.FireTVSync.get_properties` and :py:meth:`androidtv.firetv.firetv_async.FireTVAsync.get_properties`
CMD_FIRETV_PROPERTIES_NOT_LAZY_NO_RUNNING_APPS = CMD_SCREEN_ON + CMD_SUCCESS1_FAILURE0 + " && " + CMD_AWAKE + CMD_SUCCESS1_FAILURE0 + " && " + CMD_WAKE_LOCK_SIZE + " && " + CMD_CURRENT_APP + " && (" + CMD_MEDIA_SESSION_STATE + " || echo) && (" + CMD_HDMI_INPUT + " || echo)"
# `getprop` commands
CMD_MANUFACTURER = "getprop ro.product.manufacturer"
CMD_MODEL = "getprop ro.product.model"
CMD_SERIALNO = "getprop ro.serialno"
CMD_VERSION = "getprop ro.build.version.release"
# Commands for getting the MAC address
CMD_MAC_WLAN0 = "ip addr show wlan0 | grep -m 1 ether"
CMD_MAC_ETH0 = "ip addr show eth0 | grep -m 1 ether"
# ADB key event codes
# https://developer.android.com/reference/android/view/KeyEvent
KEY_BACK = 4
KEY_BLUE = 186
KEY_CENTER = 23
KEY_COMPONENT1 = 249
KEY_COMPONENT2 = 250
KEY_COMPOSITE1 = 247
KEY_COMPOSITE2 = 248
KEY_DOWN = 20
KEY_END = 123
KEY_ENTER = 66
KEY_ESCAPE = 111
KEY_FAST_FORWARD = 90
KEY_GREEN = 184
KEY_HDMI1 = 243
KEY_HDMI2 = 244
KEY_HDMI3 = 245
KEY_HDMI4 = 246
KEY_HOME = 3
KEY_INPUT = 178
KEY_LEFT = 21
KEY_MENU = 82
KEY_MOVE_HOME = 122
KEY_MUTE = 164
KEY_NEXT = 87
KEY_PAIRING = 225
KEY_PAUSE = 127
KEY_PLAY = 126
KEY_PLAY_PAUSE = 85
KEY_POWER = 26
KEY_PREVIOUS = 88
KEY_RED = 183
KEY_RESUME = 224
KEY_REWIND = 89
KEY_RIGHT = 22
KEY_SAT = 237
KEY_SEARCH = 84
KEY_SETTINGS = 176
KEY_SLEEP = 223
KEY_SPACE = 62
KEY_STOP = 86
KEY_SUSPEND = 276
KEY_SYSDOWN = 281
KEY_SYSLEFT = 282
KEY_SYSRIGHT = 283
KEY_SYSUP = 280
KEY_TEXT = 233
KEY_TOP = 122
KEY_UP = 19
KEY_VGA = 251
KEY_VOLUME_DOWN = 25
KEY_VOLUME_UP = 24
KEY_WAKEUP = 224
KEY_YELLOW = 185
# Alphanumeric key event codes
KEY_0 = 7
KEY_1 = 8
KEY_2 = 9
KEY_3 = 10
KEY_4 = 11
KEY_5 = 12
KEY_6 = 13
KEY_7 = 14
KEY_8 = 15
KEY_9 = 16
KEY_A = 29
KEY_B = 30
KEY_C = 31
KEY_D = 32
KEY_E = 33
KEY_F = 34
KEY_G = 35
KEY_H = 36
KEY_I = 37
KEY_J = 38
KEY_K = 39
KEY_L = 40
KEY_M = 41
KEY_N = 42
KEY_O = 43
KEY_P = 44
KEY_Q = 45
KEY_R = 46
KEY_S = 47
KEY_T = 48
KEY_U = 49
KEY_V = 50
KEY_W = 51
KEY_X = 52
KEY_Y = 53
KEY_Z = 54
# Android TV keys
KEYS = {"BACK": KEY_BACK,
"BLUE": KEY_BLUE,
"CENTER": KEY_CENTER,
"COMPONENT1": KEY_COMPONENT1,
"COMPONENT2": KEY_COMPONENT2,
"COMPOSITE1": KEY_COMPOSITE1,
"COMPOSITE2": KEY_COMPOSITE2,
"DOWN": KEY_DOWN,
"END": KEY_END,
"ENTER": KEY_ENTER,
"ESCAPE": KEY_ESCAPE,
"FAST_FORWARD": KEY_FAST_FORWARD,
"GREEN": KEY_GREEN,
"HDMI1": KEY_HDMI1,
"HDMI2": KEY_HDMI2,
"HDMI3": KEY_HDMI3,
"HDMI4": KEY_HDMI4,
"HOME": KEY_HOME,
"INPUT": KEY_INPUT,
"LEFT": KEY_LEFT,
"MENU": KEY_MENU,
"MOVE_HOME": KEY_MOVE_HOME,
"MUTE": KEY_MUTE,
"PAIRING": KEY_PAIRING,
"POWER": KEY_POWER,
"RED": KEY_RED,
"RESUME": KEY_RESUME,
"REWIND": KEY_REWIND,
"RIGHT": KEY_RIGHT,
"SAT": KEY_SAT,
"SEARCH": KEY_SEARCH,
"SETTINGS": KEY_SETTINGS,
"SLEEP": KEY_SLEEP,
"SUSPEND": KEY_SUSPEND,
"SYSDOWN": KEY_SYSDOWN,
"SYSLEFT": KEY_SYSLEFT,
"SYSRIGHT": KEY_SYSRIGHT,
"SYSUP": KEY_SYSUP,
"TEXT": KEY_TEXT,
"TOP": KEY_TOP,
"UP": KEY_UP,
"VGA": KEY_VGA,
"VOLUME_DOWN": KEY_VOLUME_DOWN,
"VOLUME_UP": KEY_VOLUME_UP,
"WAKEUP": KEY_WAKEUP,
"YELLOW": KEY_YELLOW}
# Android TV / Fire TV states
STATE_ON = 'on'
STATE_IDLE = 'idle'
STATE_OFF = 'off'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_STANDBY = 'standby'
STATE_STOPPED = 'stopped'
STATE_UNKNOWN = 'unknown'
#: States that are valid (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_STATES = (STATE_IDLE, STATE_OFF, STATE_PLAYING, STATE_PAUSED, STATE_STANDBY)
#: Properties that can be used to determine the current state (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_STATE_PROPERTIES = ("audio_state", "media_session_state")
#: Properties that can be checked for custom state detection (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_PROPERTIES = VALID_STATE_PROPERTIES + ("wake_lock_size",)
#: The required type for each entry in :py:const:`VALID_PROPERTIES` (used by :func:`~androidtv.basetv.state_detection_rules_validator`)
VALID_PROPERTIES_TYPES = {"audio_state": str,
"media_session_state": int,
"wake_lock_size": int}
# https://developer.android.com/reference/android/media/session/PlaybackState.html
#: States for the :attr:`~androidtv.basetv.basetv.BaseTV.media_session_state` property
MEDIA_SESSION_STATES = {0: None,
1: STATE_STOPPED,
2: STATE_PAUSED,
3: STATE_PLAYING}
# Apps
APP_AE_TV = 'com.aetn.aetv.watch'
APP_AMAZON_VIDEO = 'com.amazon.avod'
APP_AMZ_VIDEO = 'com.amazon.avod.thirdpartyclient'
APP_APPLE_TV = 'com.apple.atve.amazon.appletv'
APP_ATV_LAUNCHER = 'com.google.android.tvlauncher'
APP_BELL_FIBE = 'com.quickplay.android.bellmediaplayer'
APP_COMEDY_CENTRAL = 'com.vmn.android.comedycentral'
APP_DISNEY_PLUS = 'com.disney.disneyplus'
APP_ES_FILE_EXPLORER = 'com.estrongs.android.pop'
APP_FAWESOME = 'com.future.moviesByFawesomeAndroidTV'
APP_FIREFOX = 'org.mozilla.tv.firefox'
APP_FIRETV_PACKAGE_LAUNCHER = 'com.amazon.tv.launcher'
APP_FIRETV_PACKAGE_SETTINGS = 'com.amazon.tv.settings'
APP_FIRETV_STORE = 'com.amazon.venezia'
APP_FOOD_NETWORK_GO = 'tv.accedo.foodnetwork'
APP_FRANCE_TV = 'fr.francetv.pluzz'
APP_GOOGLE_CAST = 'com.google.android.apps.mediashell'
APP_HBO_GO = 'eu.hbogo.androidtv.production'
APP_HBO_GO_2 = 'com.HBO'
APP_HULU = 'com.hulu.plus'
APP_IMDB_TV = 'com.amazon.imdb.tv.android.app'
APP_IPTV_SMARTERS_PRO = 'com.nst.iptvsmarterstvbox'
APP_JELLYFIN_TV = 'org.jellyfin.androidtv'
APP_KODI = 'org.xbmc.kodi'
APP_MOLOTOV = 'tv.molotov.app'
APP_NETFLIX = 'com.netflix.ninja'
APP_OCS = 'com.orange.ocsgo'
APP_PLAY_GAMES = 'com.google.android.games'
APP_PLAY_MUSIC = 'com.google.android.music'
APP_PLAY_STORE = 'com.android.vending'
APP_PLAY_VIDEOS = 'com.android.videos'
APP_PLEX = 'com.plexapp.android'
APP_PRIME_VIDEO = 'com.amazon.amazonvideo.livingroom'
APP_PRIME_VIDEO_2 = 'com.amazon.firebat'
APP_SMART_YOUTUBE_TV = 'com.liskovsoft.videomanager'
APP_SPORT1 = 'de.sport1.firetv.video'
APP_SPOTIFY = 'com.spotify.tv.android'
APP_STEAM_LINK = 'com.valvesoftware.steamlink'
APP_SYFY = 'com.amazon.webapps.nbc.syfy'
APP_TVHEADEND = 'de.cyberdream.dreamepg.tvh.tv.player'
APP_TWITCH = 'tv.twitch.android.viewer'
APP_TWITCH_APP = 'tv.twitch.android.app'
APP_VH1 = 'com.mtvn.vh1android'
APP_VLC = 'org.videolan.vlc'
APP_VRV = 'com.ellation.vrv'
APP_WAIPU_TV = 'de.exaring.waipu.firetv.live'
APP_WATCH_TNT = 'com.turner.tnt.android.networkapp'
APP_YOUTUBE = 'com.google.android.youtube.tv'
APP_YOUTUBE_2 = 'com.amazon.firetv.youtube'
APP_ZIGGO_GO_TV = 'com.ziggo.tv'
APPS = {APP_AE_TV: 'A&E',
APP_AMAZON_VIDEO: 'Amazon Video',
APP_AMZ_VIDEO: 'Amz Video',
APP_APPLE_TV: 'Apple TV',
APP_ATV_LAUNCHER: 'Android TV Launcher',
APP_BELL_FIBE: 'Bell Fibe',
APP_COMEDY_CENTRAL: 'Comedy Central',
APP_DISNEY_PLUS: 'Disney+',
APP_ES_FILE_EXPLORER: 'ES File Explorer',
APP_FAWESOME: 'Fawsome',
APP_FIREFOX: 'Firefox',
APP_FIRETV_STORE: 'FireTV Store',
APP_FOOD_NETWORK_GO: 'Food Network GO',
APP_FRANCE_TV: 'France TV',
APP_GOOGLE_CAST: 'Google Cast',
APP_HBO_GO: 'HBO GO',
APP_HBO_GO_2: 'HBO GO (2)',
APP_HULU: 'Hulu',
APP_IMDB_TV: 'IMDb TV',
APP_IPTV_SMARTERS_PRO: 'IPTV Smarters Pro',
APP_JELLYFIN_TV: 'Jellyfin',
APP_KODI: 'Kodi',
APP_MOLOTOV: 'Molotov',
APP_NETFLIX: 'Netflix',
APP_OCS: 'OCS',
APP_PLAY_GAMES: 'Play Games',
APP_PLAY_MUSIC: 'Play Music',
APP_PLAY_STORE: 'Play Store',
APP_PLAY_VIDEOS: 'Play Videos',
APP_PLEX: 'Plex',
APP_PRIME_VIDEO: 'Prime Video',
APP_PRIME_VIDEO_2: 'Prime Video (2)',
APP_SMART_YOUTUBE_TV: 'Smart YouTube TV',
APP_SPORT1: 'Sport 1',
APP_SPOTIFY: 'Spotify',
APP_STEAM_LINK: 'Steam Link',
APP_SYFY: 'Syfy',
APP_TVHEADEND: 'DreamPLayer TVHeadend',
APP_TWITCH: 'Twitch',
APP_TWITCH_APP: 'Twitch App',
APP_VH1: 'VH1',
APP_VLC: 'VLC',
APP_VRV: 'VRV',
APP_WAIPU_TV: 'Waipu TV',
APP_WATCH_TNT: 'Watch TNT',
APP_YOUTUBE: 'YouTube',
APP_YOUTUBE_2: 'YouTube (FireTV)',
APP_ZIGGO_GO_TV: 'Ziggo GO TV'}
# Regular expressions
REGEX_MEDIA_SESSION_STATE = re.compile(r"state=(?P<state>[0-9]+)", re.MULTILINE)
REGEX_WAKE_LOCK_SIZE = re.compile(r"size=(?P<size>[0-9]+)")
# Regular expression patterns
DEVICE_REGEX_PATTERN = r"Devices: (.*?)\W"
MAC_REGEX_PATTERN = "ether (.*?) brd"
MAX_VOLUME_REGEX_PATTERN = r"Max: (\d{1,})"
MUTED_REGEX_PATTERN = r"Muted: (.*?)\W"
STREAM_MUSIC_REGEX_PATTERN = "STREAM_MUSIC(.*?)- STREAM"
VOLUME_REGEX_PATTERN = r"\): (\d{1,})"
#: Default authentication timeout (in s) for :meth:`adb_shell.handle.tcp_handle.TcpHandle.connect` and :meth:`adb_shell.handle.tcp_handle_async.TcpHandleAsync.connect`
DEFAULT_AUTH_TIMEOUT_S = 10.0
#: Default timeout (in s) for :class:`adb_shell.handle.tcp_handle.TcpHandle` and :class:`adb_shell.handle.tcp_handle_async.TcpHandleAsync`
DEFAULT_ADB_TIMEOUT_S = 9.0
#: Default timeout for acquiring the lock that protects ADB commands
DEFAULT_LOCK_TIMEOUT_S = 3.0
| StarcoderdataPython |
3352020 | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from profile_api import serializers
from rest_framework import viewsets
from rest_framework import filters
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
from rest_framework.permissions import IsAuthenticated
from profile_api import models
from profile_api import permissions
from rest_framework.authentication import TokenAuthentication
class HelloApiView(APIView):
"""Test API VIEW"""
serializer_class=serializers.HelloSerializer
def get(self,request,format=None):
"""resturns a list of APIView features"""
an_apiview=[
'Uses HTTP methods as function (get,post,patch,put,delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message':'Hello','an_apiview':an_apiview})
def post(self,request,format=None):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message=f'Hello {name}'
return Response({'message':message})
else:
return Response(
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def put(self, request, pk=None):
"""Handling updating an object"""
return Response({'method':'PUT'})
def patch(self, request, pk=None):
"""Handling a partial update of an object"""
return Response({'method':'PATCH'})
def delete(self,request, pk=None):
"""Delete an object"""
return Response({'method':'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test APIViewswt"""
serializer_class=serializers.HelloSerializer
def list(self,request):
"""Return heelo message"""
a_viewset = [
'Uses actions(list,creat,retrive,upddate,partial update)',
'Automativally maps to urls using router',
'provides more functionality by less code'
]
return Response({'message':'Hello','a_viewset':a_viewset})
def create(self,request):
"""create a hello message"""
serializer=self.serializer_class(data=request.data)
if serializer.is_valid():
name=serializer.validated_data.get('name')
message=f'Hello {name}'
return Response({'message':message})
else:
return Response (
serializer.errors,
status=status.HTTP_400_BAD_REQUEST
)
def retrive(self,request,pk=None):
"""Handle getting an objecy by it's id"""
return Response({'http_method':'GET'})
def update(self,request, pk=None):
"""Handle updating an object"""
return Response({'http_method':'PUT'})
def partial_update(self,request,pk=None):
"""Handle partiol updating"""
return Response ({'http_method':'PUNCH'})
def destroy(self,request,pk=None):
"""Handle the deleting of an object"""
return Response({'http_method':'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""handling creating and updating file"""
serializer_class=serializers.UserProfileSerializer
queryset=models.UserProfile.objects.all()
authentication_classes=(TokenAuthentication,)
permission_classes=(permissions.UpdateOwnProfile,)
filter_backends=(filters.SearchFilter,)
search_fields=('name','email',)
class UserLoginApiView(ObtainAuthToken):
"""Handling creating user authentication tokens"""
renderer_classes= api_settings.DEFAULT_RENDERER_CLASSES
class UserProfileFeedViewSet(viewsets.ModelViewSet):
"""handles creating reading and updating prfile feed items"""
authentication_classes=(TokenAuthentication,)
serializer_class=serializers.ProfileFeedItemSerializer
queryset=models.UserFeedItem.objects.all()
permission_classes=(
permissions.UpdateOwnStatus,
IsAuthenticated
)
def perform_create(self,serializer):
"""Sets the user profile to the logged in user"""
serializer.save(user_profile=self.request.user)
# Create your views here.
| StarcoderdataPython |
100537 | """
Defines some useful utilities for plotting the evolution of a Resonator Network
"""
import copy
import numpy as np
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.lines import Line2D
from utils.encoding_decoding import cosine_sim
class LiveResonatorPlot(object):
"""
A container for a Matplotlib plot we'll use for live visualization
Parameters
----------
plot_type : str
One of {'vec_img_viz', 'sim_bar_plots'}. Specifies which plot to
show. The 'vec_img_viz' plots just display the target and current states
as images. The 'sim_bar_plots' plot shows the similarity between each of
the factors and the codebook as well as displays the similarity between
the model's estimate of the composite vector and the composite vector
itself.
target_vectors : dictionary
Contains the target vectors for each factor.
factor_ordering : list
Order to display the factors in. Just nice for consistent plotting
query_vec : ndarray, optional
The original composite vector given as a query to the Resonator Network.
It will usually be the product of target vectors, but in general can
have some additional noise corruption so we provide this as an additional
input. Only necessary for the 'sim_bar_plots' type plot.
codebooks: dict, optional.
Keys label the factors and values are the corresponsing codebooks.
Only necessary for the 'sim_bar_plots' type plot.
image_size : (int, int), optional
If applicable, dimensions of image visualization for vectors (in pixels).
Only necessary for the 'vec_img_viz' type plot.
"""
def __init__(self, plot_type, target_vectors, factor_ordering,
query_vec=None, codebooks=None, image_size=None):
assert plot_type in ['vec_img_viz', 'sim_bar_plots']
self.vec_size = len(
target_vectors[np.random.choice(list(target_vectors.keys()))])
self.factor_ordering = factor_ordering
self.plot_type = plot_type
plt.ion()
if plot_type == 'sim_bar_plots':
assert codebooks is not None, 'please provide the codebooks as input'
assert query_vec is not None, 'please provide the query vec as input'
self.codebooks = copy.deepcopy(codebooks)
self.query_vec = copy.deepcopy(query_vec)
self.target_vectors = copy.deepcopy(target_vectors)
self.barplot_refs = [] # hold references to the BarContainer objects
# some constants to get GridSpec working for us
mjm = 0.075 # {m}a{j}or {m}argin
mnm = 1.0 # {m}i{n}or {m}argin
vert_margin = 0.08
horz_margin = 0.1
gs_height = (((1.0 - 2*vert_margin) -
(mjm * (len(self.factor_ordering) - 1))) /
len(self.factor_ordering))
fig = plt.figure(figsize=(15, 12))
tab10colors = plt.get_cmap('tab10').colors
for fac_idx in range(len(self.factor_ordering)):
factor_label = self.factor_ordering[fac_idx]
gs = GridSpec(6, 30)
t = (1.0 - vert_margin) - fac_idx*gs_height - fac_idx*mjm
gs.update(top=t, bottom=t - gs_height, left=horz_margin,
right=1.0-horz_margin, hspace=mnm, wspace=8*mnm)
num_in_codebook = self.codebooks[factor_label].shape[1]
# current states
t_ax = plt.subplot(gs[:3, :18])
self.barplot_refs.append(t_ax.bar(np.arange(num_in_codebook),
np.zeros((num_in_codebook,)),
color=tab10colors[fac_idx], width=1))
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
t_ax.get_xaxis().set_ticks([])
t_ax.get_yaxis().set_ticks([-1.0, 0.0, 1.0])
t_ax.set_ylabel('Similarity', fontsize=12)
t_ax.set_ylim(-1, 1)
t_ax.yaxis.set_tick_params(labelsize=12)
t_ax.text(0.02, 0.95, 'Current State', horizontalalignment='left',
verticalalignment='top', transform=t_ax.transAxes,
color='k', fontsize=14)
if fac_idx == 0:
t_ax.set_title('Current state of each factor', fontsize=18)
# target similarity plot
t_ax = plt.subplot(gs[3:, :18])
target_csim = cosine_sim(
target_vectors[factor_label],
self.codebooks[factor_label])
t_ax.bar(np.arange(num_in_codebook), target_csim,
color=tab10colors[fac_idx], width=1)
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
t_ax.set_xlabel('Index in codebook', fontsize=12)
t_ax.set_ylabel('Similarity', fontsize=12)
t_ax.get_yaxis().set_ticks([-1.0, 0.0, 1.0])
t_ax.text(0.02, 0.95, 'Target State', horizontalalignment='left',
verticalalignment='top', transform=t_ax.transAxes,
color='k', fontsize=14)
if num_in_codebook > 10:
t_ax.get_xaxis().set_ticks(
np.arange(0, num_in_codebook, np.rint(num_in_codebook/10)))
else:
t_ax.get_xaxis().set_ticks(np.arange(num_in_codebook))
t_ax.xaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_tick_params(labelsize=12)
# similarity between query composite and current estimated composite
gs = GridSpec(3, 30)
t_ax = plt.subplot(gs[1:2, 22:])
# similarities to target
self.lineplot_ref = Line2D([], [], color='k', linewidth=3)
self.total_sim_saved = []
t_ax.add_line(self.lineplot_ref)
t_ax.set_ylim(-1.25, 1.25)
t_ax.set_xlim(0, 20) # we'll have to update the axis every ten steps
t_ax.set_title(r'Similarity between $\mathbf{c}$ and $\hat{\mathbf{c}}$',
fontsize=18)
t_ax.set_xlabel('Iteration number', fontsize=14)
t_ax.set_ylabel('Cosine Similarity', fontsize=14)
t_ax.xaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_tick_params(labelsize=12)
t_ax.yaxis.set_ticks([-1, 0, 1])
t_ax.spines['top'].set_visible(False)
t_ax.spines['right'].set_visible(False)
self.sim_plot_ax_ref = t_ax
if plot_type == 'vec_img_viz':
if image_size is None:
# we assume that vector is a square number and we will display
# as a square image
assert np.sqrt(self.vec_size) % 1 == 0
self.image_size = (int(np.sqrt(self.vec_size)),
int(np.sqrt(self.vec_size)))
else:
self.image_size = image_size
self.fig, self.axes = plt.subplots(
len(factor_ordering), 2, figsize=(10, 15))
self.fig.suptitle('Resonator State', fontsize='15')
self.im_refs = []
for fac_idx in range(len(self.factor_ordering)):
factor_label = self.factor_ordering[fac_idx]
self.im_refs.append([])
maxval = np.max(target_vectors[factor_label])
minval = np.min(target_vectors[factor_label])
targ_im = self.axes[fac_idx][0].imshow(
np.reshape(target_vectors[factor_label],
self.image_size), cmap='gray', vmin=minval, vmax=maxval)
self.axes[fac_idx][0].set_title(
'Target vector for ' + factor_label)
self.axes[fac_idx][0].axis('off')
self.im_refs[fac_idx].append(targ_im)
res_im = self.axes[fac_idx][1].imshow(
np.zeros(self.image_size), cmap='gray', vmin=minval, vmax=maxval)
self.axes[fac_idx][1].set_title(
'Current state for ' + factor_label)
self.axes[fac_idx][1].axis('off')
self.im_refs[fac_idx].append(res_im)
plt.show(block=False)
plt.draw()
def UpdatePlot(self, current_state, wait_interval=0.001):
if self.plot_type == 'sim_bar_plots':
for f_idx in range(len(self.factor_ordering)):
csim = cosine_sim(current_state[self.factor_ordering[f_idx]],
self.codebooks[self.factor_ordering[f_idx]])
# really slow, should find a faster visualization solution
for rect, ht in zip(self.barplot_refs[f_idx], csim):
rect.set_height(ht)
composite_est = np.product(np.array(
[current_state[x] for x in current_state]), axis=0)
self.total_sim_saved.append(cosine_sim(self.query_vec, composite_est))
self.lineplot_ref.set_data(
np.arange(len(self.total_sim_saved)), self.total_sim_saved)
if len(self.total_sim_saved) % 20 == 0:
self.sim_plot_ax_ref.set_xlim(0, len(self.total_sim_saved) + 20)
if (len(self.total_sim_saved) > 1 and
not np.isclose(self.total_sim_saved[-1], self.total_sim_saved[-2])):
if self.total_sim_saved[-1] < self.total_sim_saved[-2]:
self.lineplot_ref.set_color('r')
else:
self.lineplot_ref.set_color('k')
else:
for f_idx in range(len(self.factor_ordering)):
self.im_refs[f_idx][-1].set_data(
np.reshape(current_state[self.factor_ordering[f_idx]],
self.image_size))
pause_without_refocus(wait_interval)
#^ we could get a LOT faster plotting my using something other than
# plt.pause but this is quick an dirty...
def ClosePlot(self):
plt.close()
# plus any other cleanup we may need
def pause_without_refocus(interval):
backend = plt.rcParams['backend']
if backend in matplotlib.rcsetup.interactive_bk:
figManager = matplotlib._pylab_helpers.Gcf.get_active()
if figManager is not None:
canvas = figManager.canvas
if canvas.figure.stale:
canvas.draw()
canvas.start_event_loop(interval)
return
| StarcoderdataPython |
1684937 | import torch
import matplotlib.pyplot as plt
# Calculate total link length for given sample, it is assumed the base of robot is at (0,0). Input is the position of joints in one timestamp
def base_to_ee_distance(input):
## Turn data to sets of 2 so that we can reach each joint seperately.
different_view = input.view(-1,2)
## TOOOODOOOOOOOOOOOOO in order to scale this without problem I need to do calculations for each of the joints
first_joint = different_view[0]
## Last element of the different_view is the end effector
ee_pose = different_view[-1]
base = torch.zeros(1,2).cuda()
## by calculating distance from first joint to base and end effector to first_joint we can calculate overall link distances
dist = ((first_joint - base)*(first_joint - base))+ ((ee_pose-first_joint)*(ee_pose-first_joint))
dist = torch.sqrt(dist.sum())
return dist
# First input is the data in "i"th time, second input is the "i+1" time data, and the last one is the total link length for the sampled robot
def past_to_present_dist(past,present,normalizer):
## For "i" data get its end effector position
past_different_view = past.view(-1,2)
past_ee_pose = past_different_view[1]
## For "i+1" data get its end effector position
present_different_view = present.view(-1,2)
present_ee_pose = present_different_view[1]
## L1 distance between past and present end effector positions
diff = present_ee_pose - past_ee_pose
diff = torch.abs(diff)
## Sum over x and y variable
diff = diff.sum()
## Divide it to link length of the sampled robot
diff = diff/normalizer
return diff
# It takes one input and it is sample from one of the batches provided to similarity function
def ee_history(input):
## Length of the timestamped sequence
length = input.shape[0]
## Create a torch to keep the outcome of calculations. There is a minus one because total history will be one less than length of the sample.
## The reason for that is we are calculating relative trajectory which will follow the actions of the end effector.
## For example first element of this torch will be the vector from timestamp0 to timestamp1
keeper = torch.zeros(length-1,1).cuda()
for i in range(1,length):
### Calculate the total link length for this sample
dist = base_to_ee_distance(input[i])
### Calculate the vector from past timestamp to current one
diff = past_to_present_dist(input[i-1],input[i],dist)
### Place it to keeper
keeper[i-1] = diff
return keeper
# It takes two inputs. First one is the "real" which is one of the robots. Second one is the "fake" which is the outcome of generator given "real".
def similarity(real,fake):
## Create variable to sum calculated differences over
total_difference = torch.FloatTensor([0]).cuda()
## Get the batch size of the given input.
batch_size = real.shape[0]
## For each sample duality(one from "real", one from "fake") from batch calculate similarity.
for i in range(batch_size):
### Calculate trajectory(relative positions ee were in) of the end effector for sample from "real" and put it in a torch
real_hist = ee_history(real[i])
### Calculate trajectory(relative positions ee were in) of the end effector for sample from "fake" and put it in a torch
fake_hist = ee_history(fake[i])
### Get difference of these trajectories for samples
temp = real_hist - fake_hist
### Take absolute value of the difference
temp = torch.abs(temp)
### Sum the difference for each timestamp
temp = temp.sum()
### Add it to total difference for the two batch
total_difference += temp
### Return the total_difference
return total_difference
def write_to_file(m_data,des_file):
resstr= []
for i in m_data:
for x in i:
resstr.append(x)
# print resstr
# print len(resstr)
str2 = ""
for e in resstr:
str2 += "," + str(e)
str2 = str2[1:]
# print len(str2.split(","))
# for e in str2.split(","):
# print e
with open(des_file,'w') as f:
f.write(str2)
del resstr
def write_res(m_data,des_file):
with open(des_file,'w') as f:
f.write(str(m_data))
def save_model(config, sensor1_gen, sensor2_gen, sensor1_dis, sensor2_dis,
optimizer_sensor1_gen, optimizer_sensor2_gen, optimizer_sensor1_dis, optimizer_sensor2_dis, epoch):
torch.save({
'sensor1_gen': sensor1_gen.state_dict(),
'sensor2_gen': sensor2_gen.state_dict(),
'sensor1_dis': sensor1_dis.state_dict(),
'sensor2_dis': sensor2_dis.state_dict(),
'optimizer_sensor1_gen': optimizer_sensor1_gen.state_dict(),
'optimizer_sensor2_gen': optimizer_sensor2_gen.state_dict(),
'optimizer_sensor1_dis': optimizer_sensor1_dis.state_dict(),
'optimizer_sensor2_dis': optimizer_sensor2_dis.state_dict()
}, config.TRAIN.SAVE_WEIGHTS+str(epoch))
def save_vanilla_model(config, sensor1_gen, sensor1_dis, optimizer_sensor1_gen, optimizer_sensor1_dis, epoch):
torch.save({
'sensor1_gen': sensor1_gen.state_dict(),
'sensor1_dis': sensor1_dis.state_dict(),
'optimizer_sensor1_gen': optimizer_sensor1_gen.state_dict(),
'optimizer_sensor1_dis': optimizer_sensor1_dis.state_dict()
}, config.TRAIN.SAVE_WEIGHTS+str(epoch)+".pth")
def save_generator(config, sensor1_gen, optimizer_sensor1_gen, epoch):
torch.save({
'sensor1_gen': sensor1_gen.state_dict(),
'optimizer_sensor1_gen': optimizer_sensor1_gen.state_dict()
}, config.TRAIN.SAVE_WEIGHTS+str(epoch))
| StarcoderdataPython |
75261 | <gh_stars>1-10
import os
import time
from pathlib import Path
import torch
import numpy as np
import torch.backends.cudnn as cudnn
from argparse import ArgumentParser
# user
from builders.model_builder import build_model
from builders.dataset_builder import build_dataset_test, build_dataset_predict
from utils.utils import save_predict
from utils.convert_state import convert_state_dict
import cv2
def parse_args():
parser = ArgumentParser(description='Efficient semantic segmentation')
# model and dataset
parser.add_argument('--model', default="ENet", help="model name: (default ENet)")
parser.add_argument('--dataset', default="custom_dataset", help="dataset: cityscapes, camvid or custom_dataset")
parser.add_argument('--image_input_path', default="./inference_images/input_images", help="load predict_image")
parser.add_argument('--num_workers', type=int, default=2, help="the number of parallel threads")
parser.add_argument('--use_txt_list', type=bool, default=False, help="Using txt list in dataset files")
parser.add_argument('--batch_size', type=int, default=1,
help=" the batch_size is set to 1 when evaluating or testing")
parser.add_argument('--checkpoint', type=str,
default=r"",
help="use the file to load the checkpoint for evaluating or testing ")
parser.add_argument('--save_seg_dir', type=str, default="./inference_images/predict_output/",
help="saving path of prediction result")
parser.add_argument('--cuda', default=True, help="run on CPU or GPU")
parser.add_argument("--gpus", default="0", type=str, help="gpu ids (default: 0)")
args = parser.parse_args()
return args
def predict(args, test_loader, model):
"""
args:
test_loader: loaded for test dataset, for those that do not provide label on the test set
model: model
return: class IoU and mean IoU
"""
# evaluation or test mode
model.eval()
total_batches = len(test_loader)
vid_writer = None
vid_path = None
for i, (input, size, name, mode, frame_count, img_original, vid_cap) in enumerate(test_loader):
with torch.no_grad():
input = input[None, ...] # 增加多一个维度
input = torch.tensor(input) # [1, 3, 224, 224]
input_var = input.cuda()
start_time = time.time()
output = model(input_var)
torch.cuda.synchronize()
time_taken = time.time() - start_time
print(f'[{i + 1}/{total_batches}] time: {time_taken * 1000:.4f} ms = {1 / time_taken:.1f} FPS')
output = output.cpu().data[0].numpy()
output = output.transpose(1, 2, 0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
save_name = Path(name).stem + f'_predict'
if mode == 'images':
# 保存图片推理结果
save_predict(output, None, save_name, args.dataset, args.save_seg_dir,
output_grey=True, output_color=True, gt_color=False)
# 将结果和原图画到一起
img = img_original
mask = output
mask[mask == 1] = 255 # 将 mask 的 1 变成 255 --> 用于后面显示充当红色通道
zeros = np.zeros(mask.shape[:2], dtype="uint8") # 生成 全为0 的矩阵,用于充当 蓝色 和 绿色通道
mask_final = cv2.merge([zeros, zeros, mask]) # 合并成 3 通道
img = cv2.addWeighted(img, 1, mask_final, 1, 0) # 合并
if mode == 'images':
# 保存 推理+原图 结果
cv2.imwrite(f"{os.path.join(args.save_seg_dir, save_name + '_img.png')}", img)
else:
# 保存视频
save_path = os.path.join(args.save_seg_dir, save_name + '_predict.mp4')
if vid_path != save_path: # new video
vid_path = save_path
if isinstance(vid_writer, cv2.VideoWriter):
vid_writer.release() # release previous video writer
fourcc = 'mp4v' # output video codec
fps = vid_cap.get(cv2.CAP_PROP_FPS)
w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
vid_writer = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*fourcc), fps, (w, h))
vid_writer.write(img)
def predict_model(args):
"""
main function for testing
param args: global arguments
return: None
"""
print(args)
if args.cuda:
print("=====> use gpu id: '{}'".format(args.gpus))
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
if not torch.cuda.is_available():
raise Exception("no GPU found or wrong gpu id, please run without --cuda")
# build the model
model = build_model(args.model, num_classes=args.classes)
if args.cuda:
model = model.cuda() # using GPU for inference
cudnn.benchmark = True
if args.checkpoint:
if os.path.isfile(args.checkpoint):
print("=====> loading checkpoint '{}'".format(args.checkpoint))
checkpoint = torch.load(args.checkpoint)
model.load_state_dict(checkpoint['model'])
# model.load_state_dict(convert_state_dict(checkpoint['model']))
else:
print("=====> no checkpoint found at '{}'".format(args.checkpoint))
raise FileNotFoundError("no checkpoint found at '{}'".format(args.checkpoint))
if not os.path.exists(args.save_seg_dir):
os.makedirs(args.save_seg_dir)
# load the test set
if args.use_txt_list:
_, testLoader = build_dataset_test(args.dataset, args.num_workers, none_gt=True)
else:
_, testLoader = build_dataset_predict(args.image_input_path, args.dataset, args.num_workers, none_gt=True)
print("=====> beginning testing")
print("test set length: ", len(testLoader))
predict(args, testLoader, model)
if __name__ == '__main__':
args = parse_args()
args.save_seg_dir = os.path.join(args.save_seg_dir, args.dataset, 'predict', args.model)
if args.dataset == 'cityscapes':
args.classes = 19
elif args.dataset == 'camvid':
args.classes = 11
elif args.dataset == 'custom_dataset':
args.classes = 2
else:
raise NotImplementedError(
"This repository now supports two datasets: cityscapes and camvid, %s is not included" % args.dataset)
predict_model(args)
| StarcoderdataPython |
3371877 | <gh_stars>0
# Class for storing and retrieving core data
class CoreData():
# List of races
races = [
'dragonborn',
'dwarf',
'elf',
'gnome',
'half-elf',
'half-orc',
'halfling',
'human',
'tiefling'
]
# Dict of subraces
subraces = {
'dragonborn': [],
'dwarf': [
'hill dwarf'
],
'elf': [
'high elf'
],
'gnome': [
'rock gnome'
],
'half-elf': [],
'halfling': [
'lightfoot'
],
'half-orc': [],
'human': [],
'tiefling': []
}
# List of classes
classes = [
'barbarian',
'bard',
'cleric',
'druid',
'fighter',
'monk',
'paladin',
'ranger',
'rogue',
'sorcerer',
'warlock',
'wizard'
]
# List of languages
languages = [
'abyssal',
'celestial',
'common',
'deep speech',
'draconic',
'dwarvish',
'elvish',
'giant',
'gnomish',
'goblin',
'halfling',
'infernal',
'orc',
'primordial',
'sylvan',
'undercommon'
]
# Dict of alignments
alignments = {
'lawful': [
'lawful good',
'lawful neutral',
'lawful evil'
],
'chaotic': [
'chaotic good',
'chaotic neutral',
'chaotic evil'
],
'good': [
'lawful good',
'chaotic good',
'neutral good'
],
'evil': [
'lawful evil',
'chaotic evil',
'neutral evil'
],
'neutral': [
'neutral',
'neutral good',
'neutral evil'
],
'all': [
'lawful good',
'lawful neutral',
'lawful evil',
'chaotic good',
'chaotic neutral',
'chaotic evil',
'neutral',
'neutral good',
'neutral evil'
]
}
# List of abilities for reference
ability_list = [
'strength',
'dexterity',
'constitution',
'intelligence',
'wisdom',
'charisma'
]
# List of skills for reference
skill_list = [
'acrobatics',
'animal_handling',
'arcana',
'athletics',
'deception',
'history',
'insight',
'intimidation',
'investigation',
'medicine',
'nature',
'perception',
'performance',
'persuasion',
'religion',
'sleight_of_hand',
'stealth',
'survival'
]
# Dict of skills grouped by class, used for assigning proficiency
skill_prof_dict = {
'all': [
'acrobatics',
'animal_handling',
'arcana',
'athletics',
'deception',
'history',
'insight',
'intimidation',
'investigation',
'medicine',
'nature',
'perception',
'performance',
'persuasion',
'religion',
'sleight_of_hand',
'stealth',
'survival'
],
'barbarian': [
'animal_handling',
'athletics',
'intimidation',
'nature',
'perception',
'survival'
],
'cleric': [
'history',
'insight',
'medicine',
'persuasion',
'religion'
],
'druid': [
'arcana',
'animal_handling',
'insight',
'medicine',
'nature',
'perception',
'religion',
'survival'
],
'fighter': [
'acrobatics',
'animal_handling',
'athletics',
'history',
'insight',
'intimidation',
'perception',
'survival'
],
'monk': [
'acrobatics',
'athletics',
'history',
'insight',
'religion',
'stealth'
],
'paladin': [
'athletics',
'insight',
'intimidation',
'medicine',
'persuasion',
'religion'
],
'ranger': [
'animal_handling',
'athletics',
'insight',
'investigation',
'nature',
'perception',
'stealth',
'survival'
],
'rogue': [
'acrobatics',
'athletics',
'deception',
'insight',
'intimidation',
'investigation',
'perception',
'performance',
'persuasion',
'sleight_of_hand',
'stealth'
],
'sorcerer': [
'arcana',
'deception',
'insight',
'intimidation',
'persuasion',
'religion'
],
'warlock': [
'arcana',
'deception',
'history',
'intimidation',
'investigation',
'nature',
'religion'
],
'wizard': [
'arcana',
'history',
'insight',
'investigation',
'medicine',
'religion'
]
}
draconic_ancestries = [
'black',
'blue',
'brass',
'bronze',
'copper',
'gold',
'green',
'red',
'silver',
'white'
]
fighting_styles = [
'archery',
'defense',
'dueling',
'great weapon fighting',
'protection',
'two-weapon fighting'
]
enemy_types = [
'aberrations',
'beasts',
'celestials',
'constructs',
'dragons',
'elementals',
'fey',
'fiends',
'giants',
'monstrosities',
'oozes',
'plants',
'undead'
]
| StarcoderdataPython |
55257 | # -*- coding: utf-8 -*-
"""\
This is a python port of "Goose" orignialy licensed to Gravity.com
under one or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership.
Python port was written by <NAME>
Gravity.com licenses this file
to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
try:
import urllib2 as request
except ImportError:
from urllib import request
class HtmlFetcher(object):
def __init__(self):
pass
def get_http_client(self):
pass
def get_html(self, config, url):
"""\
"""
headers = {'User-agent': config.browser_user_agent}
req = request.Request(url, headers=headers)
try:
result = request.urlopen(req).read()
except:
return None
return result
| StarcoderdataPython |
3355745 |
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for the hardsigmoid operator.
"""
# Configs for hardsigmoid ops
hardsigmoid_configs_short = op_bench.config_list(
attr_names=[
'N', 'C', 'H', 'W'
],
attrs=[
[1, 3, 256, 256],
[4, 3, 256, 256],
],
cross_product_configs={
'device': ['cpu'],
},
tags=['short']
)
hardsigmoid_configs_long = op_bench.cross_product_configs(
N=[8, 16],
C=[3],
H=[256, 512],
W=[256, 512],
device=['cpu'],
tags=['long']
)
hardsigmoid_ops_list = op_bench.op_list(
attr_names=['op_name', 'op_func'],
attrs=[
['Hardsigmoid', nn.Hardsigmoid],
],
)
class HardsigmoidBenchmark(op_bench.TorchBenchmarkBase):
def init(self, N, C, H, W, device, op_func):
self.input_one = torch.rand(N, C, H, W, device=device)
self.op_func = op_func()
def forward(self):
return self.op_func(self.input_one)
op_bench.generate_pt_tests_from_op_list(hardsigmoid_ops_list,
hardsigmoid_configs_short + hardsigmoid_configs_long,
HardsigmoidBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| StarcoderdataPython |
1620705 | <gh_stars>10-100
# Author: <NAME> <<EMAIL>>
# License: MIT
import copy
from functools import reduce
import math
import numpy as np
import operator as op
import pandas as pd
from random import shuffle, seed
from wittgenstein.base import Cond, Rule, Ruleset
from wittgenstein.check import (
_warn,
_warn_only_single_class,
_check_model_features_present,
)
from wittgenstein.utils import rnd
##########################
##### BASE FUNCTIONS #####
##########################
def grow_rule(
pos_df,
neg_df,
possible_conds,
initial_rule=Rule(),
max_rule_conds=None,
verbosity=0,
):
"""Fit a new rule to add to a ruleset"""
rule0 = copy.deepcopy(initial_rule)
if verbosity >= 4:
print(f"growing rule from initial rule: {rule0}")
rule1 = copy.deepcopy(rule0)
while (len(rule0.covers(neg_df)) > 0 and rule1 is not None) and (
max_rule_conds is None or len(rule1.conds) < max_rule_conds
): # Stop refining rule if no negative examples remain
rule1 = best_successor(
rule0, possible_conds, pos_df, neg_df, verbosity=verbosity
)
if rule1 is not None:
rule0 = rule1
if verbosity >= 4:
print(f"negs remaining {len(rule0.covers(neg_df))}")
if not rule0.isempty():
if verbosity >= 2:
print(f"grew rule: {rule0}")
return rule0
else:
# warning_str = f"grew an empty rule: {rule0} over {len(pos_idx)} pos and {len(neg_idx)} neg"
# _warn(warning_str, RuntimeWarning, filename='base_functions', funcname='grow_rule')
return rule0
def grow_rule_cn(
cn, pos_idx, neg_idx, initial_rule=Rule(), max_rule_conds=None, verbosity=0
):
"""Fit a new rule to add to a ruleset. (Optimized version.)"""
rule0 = copy.deepcopy(initial_rule)
rule1 = copy.deepcopy(rule0)
if verbosity >= 4:
print(f"growing rule from initial rule: {rule0}")
num_neg_covered = len(cn.rule_covers(rule0, subset=neg_idx))
while num_neg_covered > 0: # Stop refining rule if no negative examples remain
user_halt = max_rule_conds is not None and len(rule1.conds) >= max_rule_conds
if user_halt:
break
rule1 = best_rule_successor_cn(cn, rule0, pos_idx, neg_idx, verbosity=verbosity)
if rule1 is None:
break
rule0 = rule1
num_neg_covered = len(cn.rule_covers(rule0, neg_idx))
if verbosity >= 4:
print(f"negs remaining: {num_neg_covered}")
if not rule0.isempty():
if verbosity >= 2:
print(f"grew rule: {rule0}")
return rule0
else:
# warning_str = f"grew an empty rule: {rule0} over {len(pos_idx)} pos and {len(neg_idx)} neg"
# _warn(warning_str, RuntimeWarning, filename='base_functions', funcname='grow_rule_cn')
return rule0
def prune_rule(
rule,
prune_metric,
pos_pruneset,
neg_pruneset,
eval_index_on_ruleset=None,
verbosity=0,
):
"""Return a pruned version of the Rule by removing Conds.
rule : Rule
Rule to prune.
prune_metric : function
Function that returns a value to maximize.
pos_pruneset : DataFrame
Positive class examples.
neg_pruneset : DataFrame
Negative class examples.
eval_index_on_ruleset : tuple(rule_index, Ruleset), default=None
Pass the rest of the Rule's Ruleset (excluding the Rule in question),
in order to prune the rule based on the performance of its entire Ruleset,
rather than on the rule alone. Used during optimization stage of RIPPER.
verbosity : int (0-5), default=0
Output verbosity.
"""
if rule.isempty():
# warning_str = f"can't prune empty rule: {rule}"
# _warn(warning_str, RuntimeWarning, filename='base_functions', funcname='prune_rule')
return rule
if not eval_index_on_ruleset:
# Currently-best pruned rule and its prune value
best_rule = copy.deepcopy(rule)
best_v = 0
# Iterative test rule
current_rule = copy.deepcopy(rule)
while current_rule.conds:
v = prune_metric(current_rule, pos_pruneset, neg_pruneset)
if verbosity >= 5:
print(f"prune value of {current_rule}: {rnd(v)}")
if v is None:
return None
if v >= best_v:
best_v = v
best_rule = copy.deepcopy(current_rule)
current_rule.conds.pop(-1)
if verbosity >= 2:
if len(best_rule.conds) != len(rule.conds):
print(f"pruned rule: {best_rule}")
else:
print(f"pruned rule unchanged")
return best_rule
else:
# Check if index matches rule to prune
rule_index, ruleset = eval_index_on_ruleset
if ruleset.rules[rule_index] != rule:
raise ValueError(
f"rule mismatch: {rule} - {ruleset.rules[rule_index]} in {ruleset}"
)
current_ruleset = copy.deepcopy(ruleset)
current_rule = current_ruleset.rules[rule_index]
best_ruleset = copy.deepcopy(current_ruleset)
best_v = 0
# Iteratively prune and test rule over ruleset.
# This is unfortunately expensive.
while current_rule.conds:
v = prune_metric(current_ruleset, pos_pruneset, neg_pruneset)
if verbosity >= 5:
print(f"prune value of {current_rule}: {rnd(v)}")
if v is None:
return None
if v >= best_v:
best_v = v
best_rule = copy.deepcopy(current_rule)
best_ruleset = copy.deepcopy(current_ruleset)
current_rule.conds.pop(-1)
current_ruleset.rules[rule_index] = current_rule
return best_rule
def prune_rule_cn(
cn, rule, prune_metric_cn, pos_idx, neg_idx, eval_index_on_ruleset=None, verbosity=0
):
"""Return a pruned version of the Rule by removing Conds. (Optimized version.)
rule : Rule
Rule to prune.
prune_metric : function
Function that returns a value to maximize.
pos_pruneset : DataFrame
Positive class examples.
neg_pruneset : DataFrame
Negative class examples.
eval_index_on_ruleset : tuple(rule_index, Ruleset), default=None
Pass the rest of the Rule's Ruleset (excluding the Rule in question),
in order to prune the rule based on the performance of its entire Ruleset,
rather than on the rule alone. Used during optimization stage of RIPPER.
verbosity : int (0-5), default=0
Output verbosity.
"""
if rule.isempty():
# warning_str = f"can't prune empty rule: {rule}"
# _warn(warning_str, RuntimeWarning, filename='base_functions', funcname='prune_rule_cn')
return rule
if not eval_index_on_ruleset:
# Currently-best pruned rule and its prune value
best_rule = copy.deepcopy(rule)
best_v = 0
# Iterative test rule
current_rule = copy.deepcopy(rule)
while current_rule.conds:
v = prune_metric_cn(cn, current_rule, pos_idx, neg_idx)
if verbosity >= 5:
print(f"prune value of {current_rule}: {rnd(v)}")
if v is None:
return None
if v >= best_v:
best_v = v
best_rule = copy.deepcopy(current_rule)
current_rule.conds.pop(-1)
if verbosity >= 2:
if len(best_rule.conds) != len(rule.conds):
print(f"pruned rule: {best_rule}")
else:
print(f"pruned rule unchanged")
return best_rule
# cn is Untouched below here
else:
# Check if index matches rule to prune
rule_index, ruleset = eval_index_on_ruleset
if ruleset.rules[rule_index] != rule:
raise ValueError(
f"rule mismatch: {rule} - {ruleset.rules[rule_index]} in {ruleset}"
)
current_ruleset = copy.deepcopy(ruleset)
current_rule = current_ruleset.rules[rule_index]
best_ruleset = copy.deepcopy(current_ruleset)
best_v = 0
# Iteratively prune and test rule over ruleset.
while current_rule.conds:
v = prune_metric_cn(cn, current_rule, pos_idx, neg_idx)
if verbosity >= 5:
print(f"prune value of {current_rule}: {rnd(v)}")
if v is None:
return None
if v >= best_v:
best_v = v
best_rule = copy.deepcopy(current_rule)
best_ruleset = copy.deepcopy(current_ruleset)
current_rule.conds.pop(-1)
current_ruleset.rules[rule_index] = current_rule
return best_rule
def recalibrate_proba(
ruleset, Xy_df, class_feat, pos_class, min_samples=10, require_min_samples=True
):
"""Recalibrate a Ruleset's probability estimations using unseen labeled data without changing the underlying model. May improve .predict_proba generalizability.
Does not affect the underlying model or which predictions it makes -- only probability estimates. Use params min_samples and require_min_samples to select desired behavior.
Note1: RunTimeWarning will occur as a reminder when min_samples and require_min_samples params might result in unintended effects.
Note2: It is possible recalibrating could result in some positive .predict predictions with <0.5 .predict_proba positive probability.
ruleset : Ruleset
Ruleset to recalibrate.
Xy_df : DataFrame
Labeled dataset.
class_feat : str
Name of class feature column in Xy_df.
pos_class : value, typically str or int
Positive class value.
min_samples : int, default=10
Required minimum number of samples per Rule. Regardless of min_samples, at least one sample of the correct class is always required.
require_min_samples : bool, default=True
Halt (with warning) if any Rule lacks the minimum number of samples.
Setting to False will warn, but still replace Rules probabilities even if the minimum number of samples is not met.
"""
_check_model_features_present(Xy_df, ruleset.get_selected_features())
# At least this many samples per rule (or neg) must be of correct class
required_correct_samples = 1
# If not using min_samples, set it to 1
if not min_samples or min_samples < 1:
min_samples = 1
# Collect each Rule's pos and neg frequencies in list "rule_class_freqs"
# Store rules that lack enough samples in list "insufficient_rules"
df = Xy_df
rule_class_freqs = [None] * len(ruleset.rules)
insufficient_rules = []
for i, rule in enumerate(ruleset.rules):
npos_pred = num_pos(rule.covers(df), class_feat=class_feat, pos_class=pos_class)
nneg_pred = num_neg(rule.covers(df), class_feat=class_feat, pos_class=pos_class)
neg_pos_pred = (nneg_pred, npos_pred)
rule_class_freqs[i] = neg_pos_pred
# Rule has insufficient samples if fewer than minsamples or lacks at least one correct sample
if (
sum(neg_pos_pred) < min_samples
or sum(neg_pos_pred) < 1
or neg_pos_pred[0] < required_correct_samples
):
insufficient_rules.append(rule)
# Collect class frequencies for negative predictions
uncovered = df.drop(ruleset.covers(df).index)
neg_freq = num_neg(uncovered, class_feat=class_feat, pos_class=pos_class)
tn_fn = (neg_freq, len(uncovered) - neg_freq)
# Issue warnings if trouble with sample size
if require_min_samples:
if insufficient_rules: # WARN if/which rules lack enough samples
pretty_insufficient_rules = "\n".join([str(r) for r in insufficient_rules])
warning_str = f"param min_samples={min_samples}; insufficient number of samples or fewer than {required_correct_samples} correct samples for rules {pretty_insufficient_rules}"
_warn(
warning_str,
RuntimeWarning,
filename="base_functions",
funcname="recalibrate_proba",
)
if neg_freq < min_samples or tn_fn[1] < 1: # WARN if neg lacks enough samples
warning_str = f"param min_samples={min_samples}; insufficient number of negatively labled samples"
_warn(
warning_str,
RuntimeWarning,
filename="base_functions",
funcname="recalibrate_proba",
)
if insufficient_rules or sum(tn_fn) < min_samples:
if (
require_min_samples
): # WARN if require_min_samples -> halting recalibration
warning_str = f"Recalibrating halted. to recalibrate, try using more samples, lowering min_samples, or set require_min_samples to False"
_warn(
warning_str,
RuntimeWarning,
filename="base_functions",
funcname="recalibrate_proba",
)
return
else: # GO AHEAD EVEN THOUGH NOT ENOUGH SAMPLES
pass
# warning_str = f'Because require_min_samples=False, recalibrating probabilities for any rules with enough samples min_samples>={min_samples} that have at least {required_correct_samples} correct samples even though not all rules have enough samples. Probabilities for any rules that lack enough samples will be retained.'
# _warn(warning_str, RuntimeWarning, filename='base_functions', funcname='recalibrate_proba')
# Assign collected frequencies to Rules
for rule, freqs in zip(ruleset.rules, rule_class_freqs):
if sum(freqs) >= min_samples and freqs[0] >= required_correct_samples:
rule.class_freqs = freqs
else:
rule.class_freqs = None
# Assign Ruleset's uncovered frequencies
if not hasattr(ruleset, "uncovered_class_freqs") or (
neg_freq >= min_samples and tn_fn[1] >= required_correct_samples
):
ruleset.uncovered_class_freqs = tn_fn
# Warn if no neg samples
if (
sum([freqs[0] for freqs in rule_class_freqs]) + ruleset.uncovered_class_freqs[0]
== 0
):
_warn_only_single_class(
only_value=1,
pos_class=1,
filename="base_functions",
funcname="recalibrate_proba",
)
# Warn if no pos samples
elif (
sum([freqs[1] for freqs in rule_class_freqs]) + ruleset.uncovered_class_freqs[1]
== 0
):
_warn_only_single_class(
only_value=0,
pos_class=1,
filename="base_functions",
funcname="recalibrate_proba",
)
###################
##### METRICS #####
###################
def gain(before, after, pos_df, neg_df):
"""Calculates the information gain from before to after."""
p0count = before.num_covered(pos_df) # tp
p1count = after.num_covered(pos_df) # tp after action step
n0count = before.num_covered(neg_df) # fn
n1count = after.num_covered(neg_df) # fn after action step
return p1count * (
math.log2((p1count + 1) / (p1count + n1count + 1))
- math.log2((p0count + 1) / (p0count + n0count + 1))
)
def gain_cn(cn, cond_step, rule_covers_pos_idx, rule_covers_neg_idx):
"""Calculates the information gain from adding a Cond."""
p0count = len(rule_covers_pos_idx) # tp
p1count = len(
cn.cond_covers(cond_step, subset=rule_covers_pos_idx)
) # tp after action step
n0count = len(rule_covers_neg_idx) # fn
n1count = len(
cn.cond_covers(cond_step, subset=rule_covers_neg_idx)
) # fn after action step
return p1count * (
math.log2((p1count + 1) / (p1count + n1count + 1))
- math.log2((p0count + 1) / (p0count + n0count + 1))
)
def precision(object, pos_df, neg_df):
"""Calculate precision value of object's classification.
object : Cond, Rule, or Ruleset
"""
pos_covered = object.covers(pos_df)
neg_covered = object.covers(neg_df)
total_n_covered = len(pos_covered) + len(neg_covered)
if total_n_covered == 0:
return None
else:
return len(pos_covered) / total_n_covered
def rule_precision_cn(cn, rule, pos_idx, neg_idx):
"""Calculate precision value of object's classification.
object : Cond, Rule, or Ruleset
"""
pos_covered = cn.rule_covers(rule, pos_idx)
neg_covered = cn.rule_covers(rule, neg_idx)
total_n_covered = len(pos_covered) + len(neg_covered)
if total_n_covered == 0:
return None
else:
return len(pos_covered) / total_n_covered
def score_accuracy(predictions, actuals):
"""Calculate accuracy score of a trained model on a test set.
predictions : iterable<bool>
True for predicted positive class, False otherwise.
actuals : iterable<bool>
True for actual positive class, False otherwise.
"""
t = [pr for pr, act in zip(predictions, actuals) if pr == act]
n = predictions
return len(t) / len(n)
def _accuracy(object, pos_pruneset, neg_pruneset):
"""Calculate accuracy value of object's classification.
object : Cond, Rule, or Ruleset
"""
P = len(pos_pruneset)
N = len(neg_pruneset)
if P + N == 0:
return None
tp = len(object.covers(pos_pruneset))
tn = N - len(object.covers(neg_pruneset))
return (tp + tn) / (P + N)
def _rule_accuracy_cn(cn, rule, pos_pruneset_idx, neg_pruneset_idx):
"""Calculate accuracy value of object's classification.
object: Cond, Rule, or Ruleset
"""
P = len(pos_pruneset_idx)
N = len(neg_pruneset_idx)
if P + N == 0:
return None
tp = len(cn.rule_covers(rule, pos_pruneset_idx))
tn = N - len(cn.rule_covers(rule, neg_pruneset_idx))
return (tp + tn) / (P + N)
def best_successor(rule, possible_conds, pos_df, neg_df, verbosity=0):
"""Return for a Rule its best successor Rule according to FOIL information gain metric."""
best_gain = 0
best_successor_rule = None
for successor in rule.successors(possible_conds, pos_df, neg_df):
g = gain(rule, successor, pos_df, neg_df)
if g > best_gain:
best_gain = g
best_successor_rule = successor
if verbosity >= 5:
print(f"gain {rnd(best_gain)} {best_successor_rule}")
return best_successor_rule
def best_rule_successor_cn(cn, rule, pos_idx, neg_idx, verbosity=0):
"""Return for a Rule its best successor Rule according to FOIL information gain metric."""
best_cond = None
best_gain = float("-inf")
rule_covers_pos_idx = cn.rule_covers(rule, pos_idx)
rule_covers_neg_idx = cn.rule_covers(rule, neg_idx)
for cond_action_step in cn.conds:
g = gain_cn(cn, cond_action_step, rule_covers_pos_idx, rule_covers_neg_idx)
if g > best_gain:
best_gain = g
best_cond = cond_action_step
if verbosity >= 5:
print(f"gain {rnd(best_gain)} {best_cond}")
return Rule(rule.conds + [best_cond]) if best_gain > 0 else None
###################
##### HELPERS #####
###################
def pos_neg_split(df, class_feat, pos_class):
"""Split df into pos and neg classes."""
pos_df = pos(df, class_feat, pos_class)
neg_df = neg(df, class_feat, pos_class)
return pos_df, neg_df
def df_shuffled_split(df, split_size=0.66, random_state=None):
"""Return tuple of shuffled and split DataFrame.
split_size : float
Proportion of rows to include in return[0].
random_state : float, default=None
Random seed.
Returns
Tuple of shuffled and split DataFrame.
"""
idx1, idx2 = random_split(
df.index, split_size, res_type=set, random_state=random_state
)
return df.loc[idx1, :], df.loc[idx2, :]
def set_shuffled_split(set_to_split, split_size, random_state=None):
"""Return tuple of shuffled and split set.
split_size : float
Proportion of set to include in return[0].
random_state : float, default=None
Random seed.
Returns
Tuple of shuffled and split DataFrame.
"""
list_to_split = list(set_to_split)
seed(random_state)
shuffle(list_to_split)
split_at = int(len(list_to_split) * split_size)
return (set(list_to_split[:split_at]), set(list_to_split[split_at:]))
def random_split(to_split, split_size, res_type=set, random_state=None):
"""Return tuple of shuffled and split iterable.
to_split : iterable
What to shuffle and split.
split_size : float
Proportion to include in return[0].
res_type : type
Type of items to return.
random_state : float, default=None
Random seed.
Returns
Tuple of shuffled and split DataFrame.
"""
to_split = list(to_split)
seed(random_state)
shuffle(to_split)
split_at = int(len(to_split) * split_size)
return (res_type(to_split[:split_at]), res_type(to_split[split_at:]))
def pos(df, class_feat, pos_class):
"""Return subset of instances that are labeled positive."""
return df[df[class_feat] == pos_class]
def neg(df, class_feat, pos_class):
"""Return subset of instances that are labeled negative."""
return df[df[class_feat] != pos_class]
def num_pos(df, class_feat, pos_class):
"""Return number of instances that are labeled positive."""
return len(df[df[class_feat] == pos_class])
def num_neg(df, class_feat, pos_class):
""" Return number of instances that are labeled negative."""
return len(df[df[class_feat] != pos_class])
def nCr(n, r):
"""Return number of combinations C(n, r)."""
def product(numbers):
return reduce(op.mul, numbers, 1)
num = product(range(n, n - r, -1))
den = product(range(1, r + 1))
return num // den
def argmin(iterable):
"""Return index of minimum value."""
lowest_val = iterable[0]
lowest_i = 0
for i, val in enumerate(iterable):
if val < lowest_val:
lowest_val = val
lowest_i = i
return lowest_i
def i_replaced(list_, i, value):
"""Return a new list with element i replaced by value.
i : value
Index to replace with value.
value : value
Value to replace at index i. None will return original list with element i removed.
"""
if value is not None:
return list_[:i] + [value] + list_[i + 1 :]
else:
return list_[:i] + list_[i + 1 :]
def rm_covered(object, pos_df, neg_df):
"""Return pos and neg dfs of examples that are not covered by object.
Parameters
----------
object : Cond, Rule, or Ruleset
Object whose coverage predictions to invoke.
pos_df : DataFrame
Positive examples.
neg_df : DataFrame
Negative examples.
Return
------
tuple<DataFrame>
Positive and negative examples not covered by object.
"""
return (
pos_df.drop(object.covers(pos_df).index, axis=0, inplace=False),
neg_df.drop(object.covers(neg_df).index, axis=0, inplace=False),
)
def rm_rule_covers_cn(cn, rule, pos_idx, neg_idx):
"""Return positive and negative indices not covered by object."""
return (
pos_idx - cn.rule_covers(rule, pos_idx),
neg_idx - cn.rule_covers(rule, neg_idx),
)
def truncstr(iterable, limit=5, direction="left"):
"""Return Ruleset string representation limited to a specified number of rules.
limit: how many rules to return
direction: which part to return. (valid options: 'left', 'right')
"""
if len(iterable) > limit:
if direction == "left":
return iterable[:limit].__str__() + "..."
elif direction == "right":
return "..." + iterable[-limit:].__str__()
else:
raise ValueError('direction param must be "left" or "right"')
else:
return str(iterable)
def stop_early(ruleset, max_rules, max_total_conds):
"""Function to decide whether to halt training."""
return (max_rules is not None and len(ruleset.rules) >= max_rules) or (
max_total_conds is not None and ruleset.count_conds() >= max_total_conds
)
| StarcoderdataPython |
3331070 | <gh_stars>1-10
#!/usr/bin/env python2
"""Program to investigate the transition distribution $p(z_{n+1} \mid z_n)$
for the available map trajectories.
Usage: <program name> <trajectory> [<trajectory> [...]]"""
from bz2 import BZ2File
from collections import defaultdict
from sys import argv
from matplotlib import pyplot as plt
import numpy as np
from observation import parse_map_trajectory
if __name__ == '__main__':
assert len(argv) > 1, "Need at least one input file"
pairs = defaultdict(lambda: [])
for filename in argv[1:]:
if filename.endswith('.bz2'):
trajectory_fp = BZ2File(filename)
else:
trajectory_fp = open(filename, 'rb')
try:
traj = list(parse_map_trajectory(trajectory_fp))
finally:
trajectory_fp.close()
for past, present in zip(traj, traj[1:]):
for key in past.data.iterkeys():
pairs[key].append((past[key], present[key]))
vf_pairs = np.array(pairs['vf'])
vf_now = vf_pairs[:, 0]
vf_diff = vf_pairs[:, 1] - vf_now
hist, x_edges, y_edges = np.histogram2d(vf_now, vf_diff, bins=50)
pc_x, pc_y = np.meshgrid(x_edges, y_edges)
# Normalise columns
# sums = np.sum(hist, axis=0)
# sums[sums == 0] = 1
# hist /= sums
plt.pcolormesh(pc_x, pc_y, hist)
plt.xlabel('$v_f$ (m/s)')
plt.ylabel("$v_f' - v_f$ (m/s)")
plt.xlim(x_edges[0], x_edges[-1])
plt.ylim(y_edges[0], y_edges[-1])
plt.title("Transition frequencies for forward velocity ($v_f$)")
plt.show()
# Now do a plot of ve and vn
ve_pairs = np.array(pairs['ve'])
ve_deltas = ve_pairs[:, 1] - ve_pairs[:, 0]
vn_pairs = np.array(pairs['vn'])
vn_deltas = vn_pairs[:, 1] - vn_pairs[:, 0]
full_data = np.zeros((vn_deltas.size, 2))
full_data[:, 0] = ve_deltas
full_data[:, 1] = vn_deltas
# Divide by magnitude of original data (gets percentage increase in a
# particular direction)
# det = 1.0 / np.linalg.norm(
# np.vstack(
# (ve_pairs[:, 0], vn_pairs[:, 1])
# ), axis=0
# ).reshape((-1, 1))
# full_data = det * full_data
print("Covariance of data:")
print(np.cov(full_data.T))
print("Mean of data:")
print(np.mean(full_data, axis=0))
hist, x_edges, y_edges = np.histogram2d(ve_deltas, vn_deltas, bins=100)
pc_x, pc_y = np.meshgrid(x_edges, y_edges)
plt.pcolormesh(pc_x, pc_y, hist)
plt.xlabel("$v_e' - v_e$ (m/s)")
plt.ylabel("$v_n' - v_n$ (m/s)")
plt.xlim(x_edges[0], x_edges[-1])
plt.ylim(y_edges[0], y_edges[-1])
plt.title("Relative frequencies for changes in velocity")
plt.show()
| StarcoderdataPython |
1618556 | <reponame>carlos357890/My-projects---Python
def jogar():
print("_"*30)
print("*"*5+"JOGO DA FORCA"+"*"*5)
print("_"*30)
palavra_secreta = "banana"
letras_acertadas = ['_', '_', '_', '_', '_', '_']
enforcou = False
acertou = False
erros = 0
print(letras_acertadas)
while (not enforcou and not acertou):
chute = str(input("Qual a letra? "))
if chute in palavra_secreta:
posicao = 0
for letra in palavra_secreta:
if (chute.upper() == letra.upper()):
letras_acertadas[posicao] = letra
posicao = posicao + 1
else:
erros = erros + 1
enforcou = erros == 6
acertou = '_' not in letras_acertadas
print(letras_acertadas)
| StarcoderdataPython |
1785512 | # coding:utf-8
from functools import wraps
def login_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
"""装饰器的内层函数"""
pass
return wrapper
@login_required
def logout():
"""登出"""
pass
if __name__ == '__main__':
print(logout.__name__) # -> wrapper
print(logout.__doc__) | StarcoderdataPython |
3386374 | from .server import AGIServer
| StarcoderdataPython |
12471 | """Provides the MENU html string which is appended to all templates
Please note that the MENU only works in [Fast](https://www.fast.design/) based templates.
If you need some sort of custom MENU html string feel free to customize this code.
"""
from awesome_panel_extensions.frameworks.fast.fast_menu import to_menu
from src.shared import config
if config.applications:
MENU = to_menu(
config.applications.values(), accent_color=config.color_primary, expand=["Main"]
).replace("\n", "")
else:
MENU = ""
| StarcoderdataPython |
1635635 | <reponame>gaetanmargueritte/ccg2esn
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
from grammar_manipulation import role_for_words, cat, union, maybe, sentence_to_roles
from predicate_manipulation import WordPredicate, NO_ROLE, ACTION, OBJECT, COLOR
from collections import defaultdict
import numpy as np
import tqdm
def create_dataset(high_difficulty = True, nb_objects = 3, english=True): #if english = False -> japanese
if(english) :
# object_names = ['cup', 'orange', 'bowl', 'apple', 'spoon']
#object_names = ['cup', 'bowl', 'apple', 'spoon'] # remove orange to avoid polysemous words for now
# color_names = ['red', 'orange', 'green', 'blue']
#color_names = ['red', 'orange', 'yellow', 'green', 'blue', 'magenta']
position_names = ['left', 'middle', 'right']
object_names = ['glass', 'cup', 'bowl', 'orange','spoon',
'apple', 'accordion', 'acoustic','bagpipes',
'banjo','bass','bongo','bugle',
'cello','clarinet','cymbals','drums',
'electric','guitar','flute','horn',
'harmonica','harp','keyboard','maracas',
'organ','pan','piano','recorder',
'saxophone','sitar','tambourine','triangle',
'trombone','trumpet','tuba','ukulele',
'violin','xylophone','bassoon','castanets',
'didgeridoo','double','gong','harpsichord',
'lute','mandolin','oboe','piccolo','viola']
object_names = object_names[:nb_objects]
color_names = ['red', 'orange', 'green', 'blue']
position_names = ['left', 'right', 'middle']
else : #japanese
object_names = ['グラス', 'カップ', 'ボウル', 'オレンジ'] #['glass', 'cup', 'bowl', 'orange']
object_names = object_names[:nb_objects]
color_names = ['赤', 'オレンジ', '緑', '青'] #['red', 'orange', 'green', 'blue']
position_names = ['左', '右', '真ん中'] #['left', 'right', 'middle']
clean_dataset()
for obj in object_names:
add_object(obj, build_after = False)
for col in color_names:
add_color(col, build_after = False)
for pos in position_names:
add_position(pos, build_after = False)
# add_position('center', '<middle_pos>', build_after = False)
build_all(english)
def clean_dataset():
global OBJECT_NAMES, COLOR_NAMES, POSITION_NAMES
global CATEGORIES, POSITIONS, COLORS
global OBJ_NAME_TO_CONCEPT, COLOR_NAME_TO_CONCEPT, POSITION_NAME_TO_CONCEPT
OBJECT_NAMES = []
COLOR_NAMES = []
POSITION_NAMES = []
CATEGORIES = []
POSITIONS = []
COLORS = []
OBJ_NAME_TO_CONCEPT = dict()
COLOR_NAME_TO_CONCEPT = dict()
POSITION_NAME_TO_CONCEPT = dict()
def build_all(english):
global SENTENCE_TO_ROLES
global SENTENCE_TO_PREDICATE
global CONCEPT_LISTS
global VISION_ENCODER
CONCEPT_LISTS = [
CATEGORIES,
POSITIONS,
COLORS
]
SENTENCE_TO_ROLES = create_grammar(english)
#SENTENCE_TO_PREDICATE = {s : WordPredicate(s, r) for s, r in SENTENCE_TO_ROLES.items()}
#print(SENTENCE_TO_ROLES)
def create_grammar(english):
if(english):
is_action='is'
is_norole='is'
on_the='on the'
this_is='this is'
that_is='that is'
there_is='there is'
a = 'a'
the='the'
IS_ACTION = role_for_words(ACTION, [is_action])
THIS_IS = union(
sentence_to_roles(this_is, [ACTION, NO_ROLE]),
sentence_to_roles(that_is, [ACTION, NO_ROLE])
) # 2
DET = role_for_words(NO_ROLE, [a, the]) # 2
TO_THE = union(
sentence_to_roles(on_the, [NO_ROLE, NO_ROLE])
)
else : #japanese
is_norole='は'
on_the='側'
this_is='これ は'
there_is='が ある'
THIS_IS = union(
sentence_to_roles(this_is, [ACTION, NO_ROLE])
) # 2
TO_THE = role_for_words(NO_ROLE, ['側'])
OBJ = role_for_words(OBJECT, OBJECT_NAMES)
COL = role_for_words(COLOR, COLOR_NAMES)
POSITIONS = role_for_words(ACTION, POSITION_NAMES)
IS_NOROLE = role_for_words(NO_ROLE, [is_norole])
THERE_IS = sentence_to_roles(there_is, [NO_ROLE, NO_ROLE])
GN = cat(maybe(COL), OBJ) if not english else cat(DET, maybe(COL), OBJ) # 70 -> 10 none col
TO_THE_POSITION = cat(TO_THE, POSITIONS) if english else cat(POSITIONS, TO_THE) # 3
if english :
return union(
cat(THIS_IS, GN), # 140 -> 20 none both + 120 none pos
cat(DET, OBJ, IS_ACTION, COL), # 2x5x6 = 60 -> 60 none pos
cat(DET, OBJ, TO_THE_POSITION, IS_NOROLE, COL), # 2x5x3x6 = 180
cat(GN, IS_NOROLE, TO_THE_POSITION), # 70x3=210 -> 30 none col
cat(THERE_IS, GN, TO_THE_POSITION), # 210 -> 30 none col
cat(TO_THE_POSITION, union(IS_NOROLE, THERE_IS), GN) # 420 -> 60 none col
)
# Different none col predicates = 5obj x 3pos
# Different none pos predicates = 5obj x 6col x 3(this_is, that_is, obj_is_col)
# Different none both predicates = 5obj x 2(this_is, that_is)
else :
return union(
cat(THIS_IS, GN), # 140 -> 20 none both + 120 none pos
cat(POSITIONS, OBJ, IS_NOROLE, COL), # 2x5x3x6 = 180
cat(TO_THE_POSITION, GN, THERE_IS) # 420 -> 60 none col
)
def add_object(name, concept = None, build_after = True):
if concept is None:
concept = '<' + name.lower() + '_obj>'
if name not in OBJECT_NAMES:
OBJECT_NAMES.append(name)
if concept not in CATEGORIES:
CATEGORIES.append(concept)
OBJ_NAME_TO_CONCEPT[name] = concept
if build_after:
build_all()
def add_position(name, concept = None, build_after=True):
if concept is None:
concept = '<' + name.lower() + '_pos>'
if name not in POSITION_NAMES:
POSITION_NAMES.append(name)
if concept not in POSITIONS:
POSITIONS.append(concept)
POSITION_NAME_TO_CONCEPT[name] = concept
if build_after:
build_all()
def add_color(name, concept = None, build_after=True):
if concept is None:
concept = '<' + name.lower() + '_col>'
if name not in COLOR_NAMES:
COLOR_NAMES.append(name)
if concept not in COLORS:
COLORS.append(concept)
COLOR_NAME_TO_CONCEPT[name] = concept
if build_after:
build_all()
def possible_complete_predicates():
possible_predicates = set()
for pos in POSITION_NAMES:
for obj in OBJECT_NAMES:
for col in COLOR_NAMES:
possible_predicates.add(pos+"("+obj+","+col+")")
return possible_predicates
def decompose_predicate_components(predicate):
# Transform predicate to string and strip all spaces
pred = str(predicate).lower().replace(' ','')
if pred == 'none':
return None, None, None
# Parse the string to category, color and prediction
position = pred[:pred.find('(')]
if pred.find(',') != -1:
color = pred[pred.find(',')+1:pred.find(')')]
if color not in COLOR_NAMES:
color = None
category = pred[pred.find('(')+1:pred.find(',')]
else:
color = None
category = pred[pred.find('(')+1:pred.find(')')]
return category, position, color
def decompose_predicate(predicate):
# Transform predicate to string and strip all spaces
pred = str(predicate).lower().replace(' ','')
if pred == 'none':
return None, None, None, False
# Parse the string to category, color and prediction
is_complete_predicate = True
position = pred[:pred.find('(')]
if position not in POSITION_NAMES:
position = None
is_complete_predicate = False
if pred.find(',') != -1:
color = pred[pred.find(',')+1:pred.find(')')]
if color not in COLOR_NAMES:
color = None
category = pred[pred.find('(')+1:pred.find(',')]
else:
color = None
category = pred[pred.find('(')+1:pred.find(')')]
is_complete_predicate = False
return category, position, color, is_complete_predicate
def is_complete_predicate(predicate):
_, _, _, is_complete = decompose_predicate(predicate)
return is_complete
def exact_same_predicates(predicate1, predicate2):
cat1, pos1, col1, _ = decompose_predicate(predicate1)
cat2, pos2, col2, _ = decompose_predicate(predicate2)
return cat1 == cat2 and pos1 == pos2 and col1 == col2
def similar_color_or_position(att1, att2, ATT_NAMES):
if att1 not in ATT_NAMES or att2 not in ATT_NAMES:
return True
return abs(ATT_NAMES.index(att1) - ATT_NAMES.index(att2)) <= 1
def possible_same_meaning_predicates(predicate1, predicate2):
cat1, pos1, col1, pred1_is_complete = decompose_predicate(predicate1)
cat2, pos2, col2, pred2_is_complete = decompose_predicate(predicate2)
# Version most strict (need same color/position) and same cat
is_similar = (cat1 == cat2)
if (pos1 is not None and pos2 is not None):
is_similar &= (pos1 == pos2)
if (col1 is not None and col2 is not None):
is_similar &= (col1 == col2)
return is_similar
def possible_close_meaning_predicates(predicate1, predicate2):
cat1, pos1, col1, pred1_is_complete = decompose_predicate(predicate1)
cat2, pos2, col2, pred2_is_complete = decompose_predicate(predicate2)
# Version most strict (need same color/position) and same cat
is_similar = (cat1 == cat2)
if (pos1 is not None and pos2 is not None):
is_similar &= similar_color_or_position(pos1, pos2, POSITION_NAMES)
if (col1 is not None and col2 is not None):
is_similar &= similar_color_or_position(col1, col2, COLOR_NAMES)
return is_similar
def all_possible_same_meaning_predicates(list_predicates):
dict_predicate_sentences_mapping = defaultdict(list)
one_object_sentences = set()
for predicate in tqdm.tqdm(list_predicates):
for sentence, known_predicate in SENTENCE_TO_PREDICATE.items():
if possible_same_meaning_predicates(predicate, known_predicate):
dict_predicate_sentences_mapping[predicate].append(sentence)
one_object_sentences.add(sentence)
return dict_predicate_sentences_mapping, len(one_object_sentences)
def random_sentence_from_complete_predicate(concept):
sentences_and_predicates = []
for sentence, predicate in SENTENCE_TO_PREDICATE.items():
if possible_same_meaning_predicates(concept, predicate):
sentences_and_predicates.append([sentence, predicate])
sentence, predicate = sentences_and_predicates[np.random.choice(len(sentences_and_predicates))]
return sentence, predicate
# Words
OBJECT_NAMES = []
COLOR_NAMES = []
POSITION_NAMES = []
# Concepts
CATEGORIES = []
POSITIONS = []
COLORS = []
# Word to concept mapping
CONCEPT_LISTS = list()
OBJ_NAME_TO_CONCEPT = dict()
COLOR_NAME_TO_CONCEPT = dict()
POSITION_NAME_TO_CONCEPT = dict()
# Vision one-hot encoder
VISION_ENCODER = None
# Sentences mapping related
SENTENCE_TO_PREDICATE = dict()
SENTENCE_TO_ROLES = None
# Max nb of objects seen in an image
LIMIT_NB_OBJECTS_IN_ONE_IMAGE = 2
#create_dataset()
#print(CONCEPT_LISTS)
#print(f'obj_name_dict {OBJ_NAME_TO_CONCEPT}')
#print(f'col_name_dict {COLOR_NAME_TO_CONCEPT}')
| StarcoderdataPython |
3206826 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Sets up repositories for use by rules_webtesting at version 0.3.3."""
load("//web:web.bzl", "platform_archive")
def browser_repositories(firefox = False, chromium = False, sauce = False):
"""Sets up repositories for browsers defined in //browsers/....
Args:
firefox: Configure repositories for //browsers:firefox-native.
chromium: Configure repositories for //browsers:chromium-native.
sauce: Configure repositories for //browser/sauce:chrome-win10-connect.
"""
if chromium:
org_chromium_chromedriver()
org_chromium_chromium()
if firefox:
org_mozilla_firefox()
org_mozilla_geckodriver()
if sauce:
com_saucelabs_sauce_connect()
def com_saucelabs_sauce_connect():
platform_archive(
name = "com_saucelabs_sauce_connect_linux_x64",
licenses = ["by_exception_only"], # SauceLabs EULA
sha256 = "6eb18a5a3f77b190fa0bb48bcda4694d26731703ac3ee56499f72f820fe10ef1",
urls = [
"https://saucelabs.com/downloads/sc-4.5.4-linux.tar.gz",
],
named_files = {
"SAUCE_CONNECT": "sc-4.5.4-linux/bin/sc",
},
)
platform_archive(
name = "com_saucelabs_sauce_connect_macos_x64",
licenses = ["by_exception_only"], # SauceLabs EULA
sha256 = "7dd691a46a57c7c39f527688abd4825531d25a8a1c5b074f684783e397529ba6",
urls = [
"https://saucelabs.com/downloads/sc-4.5.4-osx.zip",
],
named_files = {
"SAUCE_CONNECT": "sc-4.5.4-osx/bin/sc",
},
)
platform_archive(
name = "com_saucelabs_sauce_connect_windows_x64",
licenses = ["by_exception_only"], # SauceLabs EULA
sha256 =
"4b2baaeb32624aa4e60ea4a2ca51f7c5656d476ba29f650a5dabb0faaf6cb793",
urls = [
"https://saucelabs.com/downloads/sc-4.5.4-win32.zip",
],
named_files = {
"SAUCE_CONNECT": "sc-4.5.4-win32/bin/sc.exe",
},
)
# To update Chromium, do the following:
# Step 1: Go to https://omahaproxy.appspot.com/
# Step 2: Look for branch_base_position of current stable releases
# Step 3: Go to https://commondatastorage.googleapis.com/chromium-browser-snapshots/index.html?prefix=Linux_x64/ etc to verify presence of that branch release for that platform.
# If no results, delete the last digit to broaden your search til you find a result.
# Step 4: Verify both Chromium and ChromeDriver are released at that version.
# Step 5: Update the URL to the new release.
def org_chromium_chromedriver():
platform_archive(
name = "org_chromium_chromedriver_linux_x64",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "1d2e73a19632031f5de876916e12b497d5b0e3dc83d1ce2fbe8665061adfd114",
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/902390/chromedriver_linux64.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chromedriver_linux64.zip",
],
named_files = {
"CHROMEDRIVER": "chromedriver_linux64/chromedriver",
},
)
platform_archive(
name = "org_chromium_chromedriver_macos_x64",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "36cc50c5194767b043913534f6ec16a7d7a85636b319729a67ffff486b30a5f6",
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Mac/902390/chromedriver_mac64.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chromedriver_mac_x64.zip",
],
named_files = {
"CHROMEDRIVER": "chromedriver_mac64/chromedriver",
},
)
platform_archive(
name = "org_chromium_chromedriver_macos_arm64",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "1f100aacf4bab4b3ac4218ecf654b17d66f2e07dd455f887bb3d9aa8d21862e1",
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Mac_Arm/902390/chromedriver_mac64.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chromedriver_mac_arm64.zip",
],
named_files = {
"CHROMEDRIVER": "chromedriver_mac64/chromedriver",
},
)
platform_archive(
name = "org_chromium_chromedriver_windows_x64",
licenses = ["reciprocal"], # BSD 3-clause, ICU, MPL 1.1, libpng (BSD/MIT-like), Academic Free License v. 2.0, BSD 2-clause, MIT
sha256 = "48392698f2ba338a0b9192f7c2154058a0b0b926aef0a5ef22aa6706b2bbc7b6",
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Win/902390/chromedriver_win32.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chromedriver_win32.zip",
],
named_files = {
"CHROMEDRIVER": "chromedriver_win32/chromedriver.exe",
},
)
def org_chromium_chromium():
platform_archive(
name = "org_chromium_chromium_linux_x64",
licenses = ["notice"], # BSD 3-clause (maybe more?)
# 94.0.4578.0
sha256 = "673ee08b4cfaff128ef0b4f7517acb6b6b25c9315fc6494ec328ab38aaf952d1",
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Linux_x64/902390/chrome-linux.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chrome-linux.zip",
],
named_files = {
"CHROMIUM": "chrome-linux/chrome",
},
)
platform_archive(
name = "org_chromium_chromium_macos_x64",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "75f6bd26744368cd0fcbbec035766dea82e34def60e938fb48630be6799d46c7",
# 94.0.4578.0
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Mac/902390/chrome-mac.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chrome-mac_x64.zip",
],
named_files = {
"CHROMIUM": "chrome-mac/Chromium.app/Contents/MacOS/Chromium",
},
)
platform_archive(
name = "org_chromium_chromium_macos_arm64",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "4845ce895d030aeb8bfd877a599f1f07d8c7a77d1e08513e80e60bb0093fca24",
# 94.0.4578.0
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Mac_Arm/902390/chrome-mac.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chrome-mac_arm64.zip",
],
named_files = {
"CHROMIUM": "chrome-mac/Chromium.app/Contents/MacOS/Chromium",
},
)
platform_archive(
name = "org_chromium_chromium_windows_x64",
licenses = ["notice"], # BSD 3-clause (maybe more?)
sha256 = "8919cd2f8a4676af4acc50d022b6a946a5b21a5fec4e078b0ebb0c8e18f1ce90",
# 94.0.4578.0
urls = [
"https://storage.googleapis.com/chromium-browser-snapshots/Win/902390/chrome-win.zip",
"https://storage.googleapis.com/dev-infra-mirror/chromium/902390/chrome-win.zip",
],
named_files = {
"CHROMIUM": "chrome-win/chrome.exe",
},
)
def org_mozilla_firefox():
platform_archive(
name = "org_mozilla_firefox_linux_x64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "998607f028043b3780f296eee03027279ef059acab5b50f9754df2bd69ca42b3",
# Firefox v90.0.1
urls = [
"https://ftp.mozilla.org/pub/firefox/releases/90.0.1/linux-x86_64/en-US/firefox-90.0.1.tar.bz2",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/firefox/firefox-90.0.1.tar.bz2",
],
named_files = {
"FIREFOX": "firefox/firefox",
},
)
platform_archive(
# Firefox has a launcher that conditionally starts x64/arm64
name = "org_mozilla_firefox_macos_x64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "76c1b9c42b52c7e5be4c112a98b7d3762a18841367f778a179679ac0de751f05",
# Firefox v90.0.1
urls = [
"https://ftp.mozilla.org/pub/firefox/releases/90.0.1/mac/en-US/Firefox%2090.0.1.dmg",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/firefox/Firefox%2090.0.1.dmg",
],
named_files = {
"FIREFOX": "Firefox.app/Contents/MacOS/firefox",
},
)
platform_archive(
# Firefox has a launcher that conditionally starts x64/arm64. This means that the
# x64 and arm64 repositories download the same binaries. We preserve separate
# repositories to allow for dedicated ARM/x64 binaries if needed in the future.
name = "org_mozilla_firefox_macos_arm64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "76c1b9c42b52c7e5be4c112a98b7d3762a18841367f778a179679ac0de751f05",
# Firefox v90.0.1
urls = [
"https://ftp.mozilla.org/pub/firefox/releases/90.0.1/mac/en-US/Firefox%2090.0.1.dmg",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/firefox/Firefox%2090.0.1.dmg",
],
named_files = {
"FIREFOX": "Firefox.app/Contents/MacOS/firefox",
},
)
def org_mozilla_geckodriver():
platform_archive(
name = "org_mozilla_geckodriver_linux_x64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "ec164910a3de7eec71e596bd2a1814ae27ba4c9d112b611680a6470dbe2ce27b",
# Geckodriver v0.29.1
urls = [
"https://github.com/mozilla/geckodriver/releases/download/v0.29.1/geckodriver-v0.29.1-linux64.tar.gz",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/geckodriver/0.29.1/geckodriver-v0.29.1-linux64.tar.gz",
],
named_files = {
"GECKODRIVER": "geckodriver",
},
)
platform_archive(
name = "org_mozilla_geckodriver_macos_x64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "9929c804ad0157ca13fdafca808866c88815b658e7059280a9f08f7e70364963",
# Geckodriver v0.29.1
urls = [
"https://github.com/mozilla/geckodriver/releases/download/v0.29.1/geckodriver-v0.29.1-macos.tar.gz",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/geckodriver/0.29.1/geckodriver-v0.29.1-macos.tar.gz",
],
named_files = {
"GECKODRIVER": "geckodriver",
},
)
platform_archive(
name = "org_mozilla_geckodriver_macos_arm64",
licenses = ["reciprocal"], # MPL 2.0
sha256 = "a1ec058b930fbfb684e30071ea47eec61bc18acb489914a9e0d095ede6088eea",
# Geckodriver v0.29.1
urls = [
"https://github.com/mozilla/geckodriver/releases/download/v0.29.1/geckodriver-v0.29.1-macos-aarch64.tar.gz",
"https://storage.googleapis.com/dev-infra-mirror/mozilla/geckodriver/0.29.1/geckodriver-v0.29.1-macos-aarch64.tar.gz",
],
named_files = {
"GECKODRIVER": "geckodriver",
},
)
| StarcoderdataPython |
3372595 | <gh_stars>0
from collections import defaultdict
import os
import pickle
class DataContainer(object):
def __init__(self, name):
super().__init__()
self.name = name
self._data_dict = defaultdict(lambda: [])
def __getitem__(self, key):
return self._data_dict[key]
def __setitem__(self, key, value):
self._data_dict[key] = value
def keys(self):
return self._data_dict.keys()
def items(self, *keys):
zip_items = [self[key] for key in keys]
for item in zip(*zip_items):
yield item
def append(self, data_dict):
assert isinstance(data_dict, dict)
for key in data_dict.keys():
self._data_dict[key].append(data_dict[key])
def load(self, path):
with open(path, 'rb') as f:
data = pickle.load(f)
self._data_dict = data
return data
def dump(self, path):
with open(path, 'wb') as f:
_data_dict = dict(self._data_dict)
pickle.dump(_data_dict, f)
def reset(self):
self._data_dict = defaultdict(lambda: [])
| StarcoderdataPython |
9084 | <gh_stars>1-10
"""Module to initialize Maxmind databases and lookup IP metadata."""
import logging
import os
from typing import Optional, Tuple, NamedTuple
import geoip2.database
from pipeline.metadata.mmdb_reader import mmdb_reader
MAXMIND_CITY = 'GeoLite2-City.mmdb'
MAXMIND_ASN = 'GeoLite2-ASN.mmdb'
# Tuple(netblock, asn, as_name, country)
# ex: ("1.0.0.1/24", 13335, "CLOUDFLARENET", "AU")
MaxmindReturnValues = NamedTuple('MaxmindReturnValues',
[('netblock', Optional[str]), ('asn', int),
('as_name', Optional[str]),
('country', Optional[str])])
class MaxmindIpMetadata():
"""Lookup database for Maxmind ASN and country metadata."""
def __init__(self, maxmind_folder: str) -> None:
"""Create a Maxmind Database.
Args:
maxmind_folder: a folder containing maxmind files.
Either a gcs filepath or a local system folder.
"""
maxmind_city_path = os.path.join(maxmind_folder, MAXMIND_CITY)
maxmind_asn_path = os.path.join(maxmind_folder, MAXMIND_ASN)
self.maxmind_city = mmdb_reader(maxmind_city_path)
self.maxmind_asn = mmdb_reader(maxmind_asn_path)
def lookup(self, ip: str) -> MaxmindReturnValues:
"""Lookup metadata infomation about an IP.
Args:
ip: string of the format 1.1.1.1 (ipv4 only)
Returns: MaxmindReturnValues
Raises:
KeyError: when the IP's ASN can't be found
"""
(asn, as_name, netblock) = self._get_maxmind_asn(ip)
country = self._get_country_code(ip)
if not asn:
raise KeyError(f"No Maxmind entry for {ip}")
return MaxmindReturnValues(netblock, asn, as_name, country)
def _get_country_code(self, vp_ip: str) -> Optional[str]:
"""Get country code for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
2-letter ISO country code
"""
try:
vp_info = self.maxmind_city.city(vp_ip)
return vp_info.country.iso_code
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None
def _get_maxmind_asn(
self, vp_ip: str) -> Tuple[Optional[int], Optional[str], Optional[str]]:
"""Get ASN information for IP address.
Args:
vp_ip: IP address of vantage point (as string)
Returns:
Tuple containing AS num, AS org, and netblock
"""
try:
vp_info = self.maxmind_asn.asn(vp_ip)
asn = vp_info.autonomous_system_number
as_name = vp_info.autonomous_system_organization
if vp_info.network:
netblock: Optional[str] = vp_info.network.with_prefixlen
else:
netblock = None
return asn, as_name, netblock
except (ValueError, geoip2.errors.AddressNotFoundError) as e:
logging.warning('Maxmind: %s\n', e)
return None, None, None
class FakeMaxmindIpMetadata(MaxmindIpMetadata):
"""A fake lookup table for testing MaxmindIpMetadata."""
# pylint: disable=super-init-not-called
def __init__(self) -> None:
pass
# pylint: disable=no-self-use
def lookup(self, _: str) -> MaxmindReturnValues:
return MaxmindReturnValues('192.168.127.12/16', 1221, 'ASN-TELSTRA', 'AU')
| StarcoderdataPython |
164202 | <filename>src/niweb/apps/noclook/forms/nordunet.py<gh_stars>1-10
# -*- coding: utf-8 -*-
__author__ = 'lundberg'
from django import forms
from django.db import IntegrityError
from apps.noclook.models import UniqueIdGenerator, NordunetUniqueId, NodeHandle
from apps.noclook.helpers import get_provider_id
from .. import unique_ids
from . import common
class NewCableForm(common.NewCableForm):
def __init__(self, *args, **kwargs):
super(NewCableForm, self).__init__(*args, **kwargs)
self.fields['relationship_provider'].initial = get_provider_id('NORDUnet')
name = forms.CharField(required=False,
help_text="If no name is specified the next NORDUnet cable ID will be used.")
class Meta:
id_generator_name = 'nordunet_cable_id'
id_collection = NordunetUniqueId
def clean(self):
"""
Sets name to next generated ID or register the name in the ID collection.
"""
cleaned_data = super(NewCableForm, self).clean()
# Set name to a generated id if the cable is not a manually named cable.
name = cleaned_data.get("name")
if self.is_valid():
if not name:
if not self.Meta.id_generator_name or not self.Meta.id_collection:
raise Exception('You have to set id_generator_name and id_collection in form Meta class.')
try:
id_generator = UniqueIdGenerator.objects.get(name=self.Meta.id_generator_name)
cleaned_data['name'] = unique_ids.get_collection_unique_id(id_generator, self.Meta.id_collection)
except UniqueIdGenerator.DoesNotExist as e:
raise e
else:
try:
unique_ids.register_unique_id(self.Meta.id_collection, name)
except IntegrityError as e:
if NodeHandle.objects.filter(node_name=name):
self.add_error('name', str(e))
return cleaned_data
class EditCableForm(common.EditCableForm):
name = forms.CharField(help_text='Name will be superseded by Telenor Trunk ID if set.')
telenor_tn1_number = forms.CharField(required=False, help_text='Telenor TN1 number, nnnnn.', label='TN1 Number')
telenor_trunk_id = forms.CharField(required=False, help_text='Telenor Trunk ID, nnn-nnnn.', label='Trunk ID')
global_crossing_circuit_id = forms.CharField(required=False, help_text='Global Crossing circuit ID, nnnnnnnnnn', label='Circuit ID')
global_connect_circuit_id = forms.CharField(required=False, help_text='Global Connect circuit ID', label='Circuit ID')
def clean(self):
cleaned_data = super(EditCableForm, self).clean()
if cleaned_data.get('telenor_trunk_id', None):
cleaned_data['name'] = cleaned_data['telenor_trunk_id']
return cleaned_data
class NewServiceForm(common.NewServiceForm):
def __init__(self, *args, **kwargs):
super(NewServiceForm, self).__init__(*args, **kwargs)
self.fields['relationship_provider'].initial = get_provider_id('NORDUnet')
project_end_date = common.DatePickerField(required=False)
def clean(self):
"""
Checks that project_end_date was not omitted if service is of type project.
"""
cleaned_data = super(NewServiceForm, self).clean()
if cleaned_data['service_type'] == 'Project' and not cleaned_data['project_end_date']:
self.add_error('project_end_date', 'Missing project end date.')
# Convert project_end_date to string if set
if cleaned_data.get('project_end_date', None):
cleaned_data['project_end_date'] = cleaned_data['project_end_date'].strftime('%Y-%m-%d')
return cleaned_data
class NewL2vpnServiceForm(NewServiceForm):
ncs_service_name = forms.CharField(required=False, help_text='')
vpn_type = forms.CharField(required=False, help_text='')
vlan = forms.CharField(required=False, help_text='')
vrf_target = forms.CharField(required=False, help_text='')
route_distinguisher = forms.CharField(required=False, help_text='')
class NewOpticalLinkForm(common.NewOpticalLinkForm):
def __init__(self, *args, **kwargs):
super(NewOpticalLinkForm, self).__init__(*args, **kwargs)
self.fields['relationship_provider'].initial = get_provider_id('NORDUnet')
class Meta(common.NewOpticalLinkForm.Meta):
id_generator_name = 'nordunet_optical_link_id'
id_collection = NordunetUniqueId
name = forms.CharField(required=False, widget=forms.widgets.HiddenInput)
def clean(self):
cleaned_data = super(NewOpticalLinkForm, self).clean()
return cleaned_data
class EditOpticalLinkForm(common.EditOpticalLinkForm):
name = forms.CharField(required=False, widget=forms.widgets.HiddenInput)
class NewOpticalMultiplexSectionForm(common.NewOpticalMultiplexSectionForm):
def __init__(self, *args, **kwargs):
super(NewOpticalMultiplexSectionForm, self).__init__(*args, **kwargs)
self.fields['relationship_provider'].initial = get_provider_id('NORDUnet')
class NewOpticalPathForm(common.NewOpticalPathForm):
class Meta(common.NewOpticalLinkForm.Meta):
id_generator_name = 'nordunet_optical_path_id'
id_collection = NordunetUniqueId
name = forms.CharField(required=False, widget=forms.widgets.HiddenInput)
def clean(self):
cleaned_data = super(NewOpticalPathForm, self).clean()
return cleaned_data
class EditOpticalPathForm(common.EditOpticalPathForm):
name = forms.CharField(required=False, widget=forms.widgets.HiddenInput)
class NewSiteForm(common.NewSiteForm):
"""
Concatenate country code with site name
"""
def clean(self):
cleaned_data = super(NewSiteForm, self).clean()
cleaned_data['name'] = '%s-%s' % (cleaned_data['country_code'], cleaned_data['name'].upper())
return cleaned_data
| StarcoderdataPython |
3337226 | <gh_stars>0
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/ansible_inventory').get_hosts('all')
def test_vault_running_and_enabled(host):
vault = host.service("vault")
assert vault.is_running
assert vault.is_enabled
| StarcoderdataPython |
4834697 | from django.conf.urls.defaults import *
rootpatterns = patterns('',
(r'^social/account/', include('socialregistration.urls')),
)
| StarcoderdataPython |
179582 | <reponame>hgt312/EE334
import tensorflow as tf
import numpy as np
x_data = np.loadtxt('datax.txt')
x_data = np.reshape(x_data, (-1,))
y_data = np.loadtxt('datay.txt')
y_data = np.reshape(y_data, (-1,))
W = tf.Variable(tf.random_uniform((1,), -1., 1.))
b = tf.Variable(tf.zeros((1,)))
y = W * x_data + b
# Minimize the mean squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.001)
train = optimizer.minimize(loss)
# Before starting, initialize the variables. We will 'run' this first.
init = tf.global_variables_initializer()
# Launch the graph.
sess = tf.Session()
sess.run(init)
# Fit the line.
for step in range(12001):
sess.run(train)
if step % 500 == 0:
print(step, sess.run(loss), sess.run(W), sess.run(b))
| StarcoderdataPython |
1656838 | from django.urls import path
from .views import HistoryTemplateView
urlpatterns = [
path('', HistoryTemplateView.as_view(), name='history-index'),
]
| StarcoderdataPython |
1786080 |
#------------------------------------#
# Author: <NAME> #
# Update: 7/10/2019 #
# E-mail: <EMAIL> #
#------------------------------------#
"""--------------------------------
- Morphological Transformations
- Image erosion
- Image dilation
- Function morphologyEx
- six different method
---------------------------------"""
# Import OpenCV Library, numpy and command line interface
import cv2
import numpy as np
import argparse
from matplotlib import pyplot as plt
def Morphological_Transformations(img):
# 6 by 6 kernel
kernel = np.ones((6,6),np.uint8)
erosion = cv2.erode(img, kernel, iterations = 2)
dilation = cv2.dilate(img, kernel, iterations = 2)
# show result images
plt.subplot(131),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(132),plt.imshow(erosion),plt.title('erosion')
plt.xticks([]), plt.yticks([])
plt.subplot(133),plt.imshow(dilation),plt.title('dilation')
plt.xticks([]), plt.yticks([])
plt.show()
def morphologyEx(img):
kernel = np.ones((5,5),np.uint8)
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
gradient = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
tophat = cv2.morphologyEx(img, cv2.MORPH_TOPHAT, kernel)
blackhat = cv2.morphologyEx(img, cv2.MORPH_BLACKHAT, kernel)
# show result images
plt.subplot(231),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(232),plt.imshow(opening),plt.title('opening')
plt.xticks([]), plt.yticks([])
plt.subplot(233),plt.imshow(closing),plt.title('closing')
plt.xticks([]), plt.yticks([])
plt.subplot(234),plt.imshow(gradient),plt.title('gradient')
plt.xticks([]), plt.yticks([])
plt.subplot(235),plt.imshow(tophat),plt.title('tophat')
plt.xticks([]), plt.yticks([])
plt.subplot(236),plt.imshow(blackhat),plt.title('blackhat')
plt.xticks([]), plt.yticks([])
plt.show()
# -------------------------- main -------------------------- #
if __name__ == '__main__':
# read one input from terminal
# (1) command line >> python Morphological_Transformations.py -i morpho_img.png
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the input image")
args = vars(ap.parse_args())
# Read image
image = cv2.imread(args["image"])
# Functions
Morphological_Transformations(image)
morphologyEx(image)
# Reference:
# Website: OpenCV-Python Document
# Link: https://docs.opencv.org/2.4/modules/imgproc/doc/filtering.html#void%20filter2D(InputArray%20src,%20OutputArray%20dst,%20int%20ddepth,%20InputArray%20kernel,%20Point%20anchor,%20double%20delta,%20int%20borderType)
| StarcoderdataPython |
127714 | <filename>setup.py
from pathlib import Path
from distutils.core import setup
def get_version():
basedir = Path(__file__).parent
with open(basedir / 'urlazy.py') as f:
version_line = next(line for line in f
if line.startswith('__version__'))
return eval(version_line.split('=')[1])
raise RuntimeError('No version info found.')
def get_long_description():
basedir = Path(__file__).parent
with open(basedir / 'README.txt') as f:
long_description = f.read()
return long_description
setup(
name='URLazy',
version=get_version(),
py_modules=['urlazy'],
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/i-trofimtschuk/urlazy/',
license='Unlicense',
description='URLazy lets you build URLs incrementally with ease',
long_description=get_long_description(),
platforms=['any'],
classifiers=[
'License :: OSI Approved :: The Unlicense (Unlicense)',
'Natural Language :: English',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
| StarcoderdataPython |
3262341 | from setuptools import find_packages, setup
import os.path
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, 'README.md'), encoding='utf-8') as handle:
long_description = handle.read()
setup(
name='actiontest',
version='0.1',
description='Testing Action using Conda and Python',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/matthewrmshin/actiontest',
author='<NAME>',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Testing',
'Topic :: System :: Software Distribution',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
# keywords='post-processing',
package_dir={'': 'src'},
packages=find_packages(where='src'),
python_requires='>=3.6, <4',
install_requires=[
],
# extra_requires={
# 'test': ['flake8', 'pytest', 'pytest-cov'],
# },
# package_data={},
entry_points={'console_scripts': ['hello=hello.__main__:main']},
# project_urls={},
)
| StarcoderdataPython |
1798423 | import pytest
from sanic_routing import BaseRouter
from sanic_routing.exceptions import NotFound
@pytest.fixture
def handler():
def handler(**kwargs):
return list(kwargs.values())[0]
return handler
class Router(BaseRouter):
def get(self, path, method, extra=None):
return self.resolve(path=path, method=method, extra=extra)
def test_alpha_does_cast(handler):
router = Router()
router.add("/<alphaonly:alpha>", handler)
router.finalize()
_, handler, params = router.get("/foobar", "BASE")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == "foobar"
def test_alpha_does_not_cast(handler):
router = Router()
router.add("/<alphaonly:alpha>", handler)
router.finalize()
with pytest.raises(NotFound):
router.get("/notfound123", "BASE")
def test_correct_alpha_v_string(handler):
router = Router()
router.add("/<alphaonly:alpha>", handler, methods=["alpha"])
router.add("/<anystring:str>", handler, methods=["str"])
router.finalize()
_, handler, params = router.get("/foobar", "alpha")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == "foobar"
_, handler, params = router.get("/foobar123", "str")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == "foobar123"
def test_use_string_raises_deprecation_warning(handler):
router = Router()
with pytest.warns(DeprecationWarning) as record:
router.add("/<foo:string>", handler)
assert len(record) == 1
assert record[0].message.args[0] == (
"Use of 'string' as a path parameter type is deprected, and will be "
"removed in Sanic v21.12. Instead, use <foo:str>."
)
def test_use_number_raises_deprecation_warning(handler):
router = Router()
with pytest.warns(DeprecationWarning) as record:
router.add("/<foo:number>", handler)
assert len(record) == 1
assert record[0].message.args[0] == (
"Use of 'number' as a path parameter type is deprected, and will be "
"removed in Sanic v21.12. Instead, use <foo:float>."
)
@pytest.mark.parametrize(
"value", ("foo-bar", "foobar", "foo-bar-thing123", "foobar123", "123")
)
def test_slug_does_cast(handler, value):
router = Router()
router.add("/<slug:slug>", handler)
router.finalize()
_, handler, params = router.get(f"/{value}", "BASE")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == value
@pytest.mark.parametrize("value", ("-aaa", "FooBar", "Foo-Bar"))
def test_slug_does_not_cast(handler, value):
router = Router()
router.add("/<slug:slug>", handler)
router.finalize()
with pytest.raises(NotFound):
router.get(f"/{value}", "BASE")
def test_correct_slug_v_string(handler):
router = Router()
router.add("/<slug:slug>", handler, methods=["slug"])
router.add("/<anystring:str>", handler, methods=["str"])
router.finalize()
_, handler, params = router.get("/foo-bar", "slug")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == "foo-bar"
_, handler, params = router.get("/FooBar", "str")
retval = handler(**params)
assert isinstance(retval, str)
assert retval == "FooBar"
| StarcoderdataPython |
181001 | <reponame>mburq/gym-matching
import gym
import gym_matching
import numpy as np
import argparse
import time
from baselines.common.misc_util import boolean_flag
from collections import deque
def run(env_id, seed, evaluation, nb_epochs, nb_rollout_steps):
# assert env_id in ['Matching-v3', 'Matching-v4'] # only works for shadow prices env.
env = gym.make(env_id)
start_time = time.time()
obs = env.reset()
action_shape = env.action_space.shape
epoch_episodes = 0
episode_reward = 0.
episode_step = 0
episodes = 0
t = 0
epoch_episode_rewards = []
episode_rewards_history = deque(maxlen=200)
epoch_episode_steps = []
for epoch in range(nb_epochs):
for t_rollout in range(nb_rollout_steps):
action = -0.5 * np.ones(action_shape)
# action = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
new_obs, r, done, info = env.step(action)
t += 1
episode_reward += r
episode_step += 1
obs = new_obs
if done:
# Episode done.
epoch_episode_rewards.append(episode_reward)
episode_rewards_history.append(episode_reward)
epoch_episode_steps.append(episode_step)
episode_reward = 0.
episode_step = 0
epoch_episodes += 1
episodes += 1
obs = env.reset()
break
print("Mean reward: {}".format(np.mean(epoch_episode_rewards)))
print(time.time() - start_time)
def parse_args():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--env-id', type=str, default='Matching-v0')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
parser.add_argument('--nb_epochs', type=int, default=50)
boolean_flag(parser, 'evaluation', default=True)
parser.add_argument('--nb-rollout-steps', type=int, default=200)
args = parser.parse_args()
dict_args = vars(args)
return dict_args
if __name__ == '__main__':
args = parse_args()
run(**args)
| StarcoderdataPython |
3240302 | <gh_stars>0
# Generated by Django 3.1.2 on 2021-02-17 22:19
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core_marketing', '0017_auto_20210217_2216'),
]
operations = [
migrations.RemoveField(
model_name='corevendormlmorders',
name='ordered_from',
),
]
| StarcoderdataPython |
84783 | <reponame>andreatulimiero/netsec-hs18
from django.shortcuts import render, redirect
from django.db import connection
from django.http import HttpResponse
from django.views import View
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth import login, authenticate, logout
from .forms import *
from .models import *
PASS_COST = 10
DISALLOWED_SQL = ['select', 'insert', 'delete', 'create', 'update']
# Create your views here.
class Index(View):
def get(self, req):
return render(req, 'passes/index.html')
class Signup(View):
def get(self, req):
form = UserCreationForm()
return render(req, 'passes/signup.html', {'form': form})
def post(self, req):
form = UserCreationForm(req.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('<PASSWORD>')
user = authenticate(username=username, password=<PASSWORD>)
login(req, user)
account = Account.objects.create(user=user, balance=10, clear_password=<PASSWORD>)
return redirect('account')
else:
return render(req, 'passes/signup.html', {'form': form})
class Login(View):
def get(self, req):
form = UsernamePasswordForm()
return render(req, 'passes/login.html', {'form': form})
def post(self, req):
form = UsernamePasswordForm(req.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(req, username=username, password=password)
if user is not None:
login(req, user)
return redirect('account')
return render(req, 'passes/login.html', {'form': form})
class Logout(View):
def get(self, req):
logout(req)
return redirect('index')
class AccountView(View):
def get(self, req):
if not req.user.is_authenticated:
return redirect('index')
form = TransactionForm()
balance = req.user.account.balance
return render(req, 'passes/account.html', {'form': form, 'balance': balance})
def post(self, req):
if not req.user.is_authenticated:
return redirect('index')
form = TransactionForm(req.POST)
if form.is_valid():
from_user = req.POST.get('from_user')
from_user_pwd = req.POST.get('from_user_pwd')
to_user_account = form.cleaned_data.get('to_user')
amount = form.cleaned_data.get('amount')
try:
amount = int(amount)
if amount < 0:
return HttpResponse('Te piacerebbe...', status=400)
except Exception as e:
return HttpResponse('Wrong amount',status=400)
cursor = connection.cursor()
row = cursor.execute("select user_id from passes_account where user_id = (select id from auth_user where username = '{}') and clear_password = '{}'".format(from_user, from_user_pwd))
account_id = row.fetchone()
print(account_id)
if account_id is None:
return HttpResponse('Oh no ...', status=401)
sender = User.objects.filter(username=from_user).first()
account_to = Account.objects.filter(id=int(to_user_account)).first()
if account_to is None:
return HttpResponse('No such account', status=400)
account_from = Account.objects.filter(user=sender).first()
if account_from.id == account_to.id:
return HttpResponse('Canno send money to your own account')
account_from.balance -= amount
account_to.balance += amount
account_from.save()
account_to.save()
return redirect('account')
class BuyPassView(View):
def get(self, req):
if not req.user.is_authenticated:
return redirect('index')
account = req.user.account
success = False
if account.balance >= PASS_COST:
account.balance -= PASS_COST
account.save()
success = True
return render(req, 'passes/pass_reveal.html', {'success': success})
class GetAccountId(View):
def get(self, req):
if not req.user.is_authenticated:
return redirect('index')
form = SearchAccountForm()
res = ''
return render(req, 'passes/get_user_account.html', {'form':form, 'res': res})
def post(self, req):
if not req.user.is_authenticated:
return redirect('index')
form = SearchAccountForm()
username = req.POST.get('username')
if username.lower() in DISALLOWED_SQL:
res = 'You cannot modify the DB'
cursor = connection.cursor()
accounts = cursor.execute("select id from passes_account where user_id = (select id from auth_user where username = '{}')".format(username))
account_id = accounts.fetchone()
if account_id is None:
res = 'No account found'
else:
res = account_id
return render(req, 'passes/get_user_account.html', {'form': form,'res': res})
| StarcoderdataPython |
155720 | from datetime import datetime, timedelta, timezone
def utc_now() -> datetime:
return datetime.now(timezone.utc)
def datetime_dump(dt: datetime) -> str:
return str(dt.timestamp())
def datetime_load(raw: str) -> datetime:
return datetime.fromtimestamp(float(raw), timezone.utc)
def timedelta_dump(td: timedelta) -> str:
return str(td.total_seconds())
def timedelta_load(raw: str) -> timedelta:
return timedelta(seconds=float(raw))
| StarcoderdataPython |
7027 | import discord
from jshbot import utilities, data, configurations, plugins, logger
from jshbot.exceptions import BotException, ConfiguredBotException
from jshbot.commands import (
Command, SubCommand, Shortcut, ArgTypes, Attachment, Arg, Opt, MessageTypes, Response)
__version__ = '0.1.0'
CBException = ConfiguredBotException('0.3 to 0.4 plugin')
@plugins.command_spawner
def get_commands(bot):
return [Command('convertdata', hidden=True, elevated_level=3)]
async def get_response(bot, context):
for guild in bot.guilds:
convert_core(bot, guild)
if 'tags.py' in bot.plugins:
convert_tags(bot, guild)
return Response("Converted.")
def convert_core(bot, guild):
if data.get(bot, 'core', None, guild_id=guild.id):
logger.warn("Guild %s (%s) already had core converted", guild.name, guild.id)
return
base_data = data.get(bot, 'base', None, guild_id=guild.id, default={})
if 'disabled' in base_data:
# TODO: Iterate through toggled commands
pass
if 'blocked' in base_data:
replacement = []
for entry in base_data['blocked']:
replacement.append(int(entry))
base_data['blocked'] = replacement
if 'muted_channels' in base_data:
replacement = []
for entry in base_data['muted_channels']:
replacement.append(int(entry))
base_data['muted_channels'] = replacement
if 'moderators' in base_data:
del base_data['moderators']
if base_data:
for key, value in base_data.items():
data.add(bot, 'core', key, value, guild_id=guild.id)
data.remove(bot, 'base', None, guild_id=guild.id)
def convert_tags(bot, guild):
if not data.get(bot, 'tags.py', 'tags', guild_id=guild.id):
logger.warn("Guild %s (%s) already had tags converted", guild.name, guild.id)
return
tags = data.get(bot, 'tags.py', 'tags', guild_id=guild.id, default={})
add_tag = bot.plugins['tags.py']._add_tag
#key,value,length,volume,name,flags,author,hits,created,last_used,last_used_by,complex,extra
for key, tag in tags.items():
to_insert = [
key, # key
tag['value'], # value
tag['length'], # length
tag['volume'], # volume
tag['name'], # name
tag['flags'], # flags
int(tag['author']), # author
tag['hits'], # hits
int(tag['created']), # created
int(tag['last_used']), # last_used
None, # last_used_by
{}, # complex
{} # extra
]
add_tag(bot, to_insert, guild.id)
data.remove(bot, 'tags.py', 'tags', guild_id=guild.id, safe=True)
| StarcoderdataPython |
138429 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
n1 = self.ll2num(l1)
n2 = self.ll2num(l2)
n3 = n1 + n2
return self.num2ll(n3)
def ll2num(self, head):
n = head.val
while head.next:
head = head.next
n = n * 10 + head.val
return n
def num2ll(self, n):
print 'num2ll', n
head = ListNode(n % 10)
n //= 10
while n:
newhead = ListNode(n % 10)
n //= 10
newhead.next = head
head = newhead
return head
| StarcoderdataPython |
4822974 | from node_exec.base_nodes import defNode
CONVERSION_IDENTIFIER = 'Convert'
@defNode(name='To Int', returnNames=["int"], identifier=CONVERSION_IDENTIFIER)
def toInt(value):
return int(value)
@defNode(name='To String', returnNames=["str"], identifier=CONVERSION_IDENTIFIER)
def toString(value):
return str(value) | StarcoderdataPython |
3353139 | from typing import List, Tuple, cast
import pytest
from galaxyls.services.xml.nodes import XmlElement
from galaxyls.tests.unit.utils import TestUtils
class TestXmlElementClass:
@pytest.mark.parametrize(
"source, expected_offsets",
[
("<test", (5, 5)),
("<test>", (5, 5)),
("<test ", (5, 5)),
('<test attr="val">', (6, 16)),
('<test attr="val" attr2="value" >', (6, 32)),
],
)
def test_get_attributes_offsets_returns_expected(self, source: str, expected_offsets: Tuple[int, int]) -> None:
xml_document = TestUtils.from_source_to_xml_document(source)
node = xml_document.get_node_at(1)
assert node.is_element
element = cast(XmlElement, node)
actual_offsets = element.get_attributes_offsets()
assert actual_offsets == expected_offsets
@pytest.mark.parametrize(
"source, expected_contents",
[
('<test attr="val">', ['"val"']),
('<test attr="val" attr2="value" >', ['"val"', '"value"']),
],
)
def test_get_attribute_content_returns_expected(self, source: str, expected_contents: List[str]) -> None:
xml_document = TestUtils.from_source_to_xml_document(source)
node = xml_document.get_node_at(1)
assert node.is_element
element = cast(XmlElement, node)
actual_contents = [attr.value.get_content(source) for attr in element.attributes.values()]
assert actual_contents == expected_contents
| StarcoderdataPython |
1692784 | <gh_stars>0
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.models import User
from django import forms
import selectable.forms as selectable
from .models import Fruit, Farm, ReferencesTest
from .lookups import FruitLookup, OwnerLookup
from .forms import ReferencesTestForm
class FarmAdminForm(forms.ModelForm):
#owner = selectable.AutoComboboxSelectField(lookup_class=OwnerLookup, allow_new=True)
class Meta(object):
model = Farm
widgets = {
'fruit': selectable.AutoCompleteSelectMultipleWidget(lookup_class=FruitLookup),
}
exclude = ('owner', )
def __init__(self, *args, **kwargs):
super(FarmAdminForm, self).__init__(*args, **kwargs)
if self.instance and self.instance.pk and self.instance.owner:
self.initial['owner'] = self.instance.owner
def save(self, *args, **kwargs):
owner = self.cleaned_data['owner']
if owner and not owner.pk:
owner = User.objects.create_user(username=owner.username, email='')
self.instance.owner = owner
return super(FarmAdminForm, self).save(*args, **kwargs)
class FarmAdmin(admin.ModelAdmin):
form = FarmAdminForm
class FarmInline(admin.TabularInline):
model = Farm
form = FarmAdminForm
class NewUserAdmin(UserAdmin):
inlines = [
FarmInline,
]
class ReferencesTestAdmin(admin.ModelAdmin):
form = ReferencesTestForm
#raw_id_fields = ('city', 'farm')
admin.site.unregister(User)
admin.site.register(User, NewUserAdmin)
admin.site.register(Fruit)
admin.site.register(Farm, FarmAdmin)
admin.site.register(ReferencesTest, ReferencesTestAdmin)
| StarcoderdataPython |
1604643 | <reponame>jchidley/OctopusEnergyMonitor<gh_stars>0
# Modified from
# https://gist.github.com/codeinthehole/5f274f46b5798f435e6984397f1abb64
# Requires the requests library (install with 'pip install requests')
import requests
import pandas as pd
from enum import Enum, auto
class OctopusEnergy(object):
BASE_URL = "https://api.octopus.energy/v1"
class DataUnavailable(Exception):
"""
Catch-all exception indicating we can't get data back from the API
"""
def __init__(self, cfg):
"""
Get the configuration data https://octopus.energy/dashboard/developer/. This includes:
API key API_KEY = sk_live_ZZh...
account number ACCOUNT_NUMBER = A-D...
electricity meter-point MPAN MPAN = 101...
electricity meter serial number E_SERIAL = 19L...
gas meter-point MPRN MPRN = 305...
gas meter serial number G_SERIAL = E6S...
"""
self.cfg = cfg
self.session = requests.Session()
def _get(self, path, params=None):
"""
Make a GET HTTP request
"""
if params is None:
params = {}
url = self.BASE_URL + path
try:
response = self.session.request(
method="GET",
url=url,
auth=(self.cfg["octopus"]["api_key"], ""),
params=params,
)
except requests.RequestException as e:
raise self.DataUnavailable("Network exception") from e
if response.status_code != 200:
raise self.DataUnavailable(
"Unexpected response status (%s)" % response.status_code
)
return response.json()
def electricity_meter_point(self):
# See https://developer.octopus.energy/docs/api/#electricity-meter-points
return self._get("/electricity-meter-points/%s/" % self.cfg["octopus"]["mpan"])
def electricity_tariff_unit_rates(self, product_code, tariff_code, params=None):
# See https://developer.octopus.energy/docs/api/#list-tariff-charges
return self._get(
"/products/%s/electricity-tariffs/%s/standard-unit-rates/"
% (product_code, tariff_code),
params=params,
)
def electricity_tariff_standing_charges(self, product_code, tariff_code, **params):
# See https://developer.octopus.energy/docs/api/#list-tariff-charges
return self._get(
"/products/%s/electricity-tariffs/%s/standing-charges/"
% (product_code, tariff_code),
params=params,
)
def agile_tariff_unit_rates(self, **params):
"""
Helper method to easily look-up the electricity unit rates for given GSP
"""
gsp = self.electricity_meter_point()["gsp"]
# Handle GSPs passed with leading underscore
if len(gsp) == 2:
gsp = gsp[1]
assert gsp in (
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"P",
"N",
"J",
"H",
"K",
"L",
"M",
)
return self.electricity_tariff_unit_rates(
product_code="AGILE-18-02-21",
tariff_code="E-1R-AGILE-18-02-21-%s" % gsp,
params=params,
)
def electricity_meter_consumption(self, **params):
# See https://developer.octopus.energy/docs/api/#list-consumption-for-a-meter
return self._get(
"/electricity-meter-points/%s/meters/%s/consumption/"
% (self.cfg["octopus"]["mpan"], self.cfg["octopus"]["e_serial"]),
params=params,
)
def gas_meter_consumption(self, **params):
# See https://developer.octopus.energy/docs/api/#list-consumption-for-a-meter
return self._get(
"/gas-meter-points/%s/meters/%s/consumption/"
% (self.cfg["octopus"]["mprn"], self.cfg["octopus"]["g_serial"]),
params=params,
)
class FuelType(Enum):
ELECTRIC = auto()
GAS = auto()
def getAgileTarriffRates(
self, current_agile_rates=pd.DataFrame([]), page_size=1500
):
response = self.agile_tariff_unit_rates(page_size=page_size)
results = pd.DataFrame(response["results"])
dt = pd.to_datetime(results["valid_from"], utc=True)
dti = pd.DatetimeIndex(dt)
results_reindex = results.set_index(dti).drop("valid_from", axis=1)
# agile_tariff["valid_from"] = pd.to_datetime(agile_tariff["valid_from"]) # to date only .apply(lambda a: pd.to_datetime(a).date()) # for excel
results_reindex.loc[:, "valid_to"] = pd.to_datetime(results_reindex["valid_to"])
agile_tariff = pd.concat([results_reindex, current_agile_rates])
return agile_tariff.dropna().drop_duplicates()
def missing(consumption):
first = consumption.index.min()
last = consumption.index.max()
total = pd.date_range(first, last, freq="30 min")
missing = total.difference(consumption.index)
return missing
def consumption(self, fuel=None, current_consumption=pd.DataFrame([]), **params):
def consumption_from_response(response):
results = pd.DataFrame(response["results"]).dropna().drop_duplicates()
dt = pd.to_datetime(results["interval_start"], utc=True)
dti = pd.DatetimeIndex(dt)
new_consumption = results.set_index(dti).drop("interval_start", axis=1)
# needs to be forced to UTC? otherwise treats it as an object
new_consumption["interval_end"] = pd.to_datetime(
new_consumption["interval_end"], utc=True
)
# https://stackoverflow.com/questions/55385497/how-can-i-convert-my-datetime-column-in-pandas-all-to-the-same-timezone
# https://stackoverflow.com/questions/63495502/creating-pandas-datetimeindex-in-dataframe-from-dst-aware-datetime-objects
# https://queirozf.com/entries/pandas-time-series-examples-datetimeindex-periodindex-and-timedeltaindex
return new_consumption.dropna().drop_duplicates()
if fuel == self.FuelType.ELECTRIC:
# https://treyhunner.com/2018/10/asterisks-in-python-what-they-are-and-how-to-use-them/
# When calling a function, the * operator can be used to unpack an iterable into the arguments in the function call:
# The ** operator does something similar, but with keyword arguments. The ** operator allows us to take a dictionary of key-value pairs and unpack it into keyword arguments in a function call.
response = self.electricity_meter_consumption(**params)
if fuel is self.FuelType.GAS:
response = self.gas_meter_consumption(**params)
new_consumption = consumption_from_response(response)
consumption = pd.concat([new_consumption, current_consumption])
return consumption.dropna().drop_duplicates().sort_index()
def update_consumption(
self, fuel=FuelType.ELECTRIC, original_consumption=pd.DataFrame([])
):
"""
Assume that there is a single contiguous block of readings (with some missing). Build upwards from that before starting to build downwards. If there is not data, get some first.
"""
max_page_size = int(self.cfg["octopus"]["CONSUMPTION_PAGE_SIZE"])
octopus_join_datetime = pd.to_datetime(
self.cfg["octopus"]["OCTOPUS_JOIN_DATETIME"], utc=True
)
now = pd.Timestamp.now(tz="utc")
if original_consumption.empty:
original_consumption = self.consumption(fuel, page_size=max_page_size)
original_max = original_consumption.index.max()
original_min = original_consumption.index.min()
def additionalConsuption(min_time, max_time):
new_consumption = pd.DataFrame([])
previous_min = max_time
new_min = min_time
while (new_min < previous_min) and (max_time > min_time):
previous_min = new_min
old_consumption = new_consumption
new_consumption = self.consumption(
fuel,
period_from=min_time.isoformat(),
period_to=max_time.isoformat(),
page_size=max_page_size,
)
new_consumption = (
pd.concat([old_consumption, new_consumption])
.dropna()
.drop_duplicates()
)
new_min = new_consumption.index.min()
return new_consumption
newerConsumption = additionalConsuption(original_max, now)
olderConsumption = additionalConsuption(octopus_join_datetime, original_min)
new_consumption = pd.concat(
[olderConsumption, original_consumption, newerConsumption]
)
return new_consumption.dropna().drop_duplicates().sort_index()
def gasCost(g_consumption, start_date, end_date):
selection = g_consumption[
(g_consumption.index > start_date) & (g_consumption.index < end_date)
]
# print(f"{selection.head(2)}\n{selection.tail(2)}")
c = selection["consumption"].sum()
kWh = (c * 1.02265 * 39.3) / 3.6
standingCharge = 18.7 / 100
unitCost = 2.74 / 100
cost = 30 * standingCharge + kWh * unitCost
print(f"{c:.1f} kWh \t£{cost * 1.05:.2f}")
| StarcoderdataPython |
1692292 | <filename>edh_web_application/foto/routes.py<gh_stars>1-10
import csv
import io
import json
from flask import render_template, request, jsonify, Response, current_app
from flask_babel import _
from . import bp_foto
from .forms import FotoSearchDe, FotoSearchEn
from ..models.Foto import Foto
@bp_foto.route('/foto/suche', strict_slashes=False)
def search_foto():
"""
route for queries in foto database
"""
if _('lang') == "de":
form = FotoSearchDe(data=request.args)
else:
form = FotoSearchEn(data=request.args)
form.fo_antik.data = request.args.get('fo_antik')
form.fo_modern.data = request.args.get('fo_modern')
form.kommentar.data = request.args.get('kommentar')
form.provinz.data = request.args.getlist('provinz')
form.land.data = request.args.getlist('land')
form.aufbewahrung.data = request.args.get('aufbewahrung')
form.cil.data = request.args.get('cil')
form.ae.data = request.args.get('ae')
form.andere.data = request.args.get('andere')
if len(request.args) > 0:
# create query string
query_string = Foto.create_query_string(request.args)
results = Foto.query(query_string)
if results['metadata']['number_of_hits'] > 0:
# CSV export of results
if request.args.get('export') and request.args.get('export') == 'csv':
results = Foto.query(query_string, number_of_results=100000, start=0)
output = io.StringIO()
writer = csv.writer(output, quoting=csv.QUOTE_ALL)
first_row = ['f-no.', 'province', 'country', 'ancient findspot',
'modern findspot', 'present location', 'date of photograph', 'cil', 'ae', 'other literature' ]
writer.writerow(first_row)
for i in results['items']:
writer.writerow((
i.f_nr,
i.provinz,
i.land,
i.fo_antik,
i.fo_modern,
i.aufbewahrung,
i.aufnahme_jahr,
i.cil,
i.ae,
i.andere
))
output.seek(0)
return Response(output, mimetype="text/csv", headers={"Content-Disposition":"attachment;filename=edh_foto.csv"})
else:
if request.args.get('view') == 'grid':
return render_template('foto/search_results_grid.html',
title=_("Foto Database"),
subtitle=_("Search results"),
data=results,
number_of_hits=results['metadata']['number_of_hits'], form=form)
else:
return render_template('foto/search_results.html',
title=_("Foto Database"),
subtitle=_("Search results"),
data=results,
number_of_hits=results['metadata']['number_of_hits'], form=form)
else:
return render_template('foto/no_hits.html', title=_("Foto Database"),
subtitle=_("Search results"), data=results,
number_of_hits=results['metadata']['number_of_hits'], form=form)
else:
return render_template('foto/search.html', title=_("Foto Database"), subtitle=_("Search"), form=form)
@bp_foto.route('/edh/foto/<f_nr>', strict_slashes=False)
@bp_foto.route('/edh/foto/<f_nr>/<conv_format>', strict_slashes=False)
def detail_view(f_nr, conv_format=''):
"""
route for displaying single image record, either html or json
"""
results = Foto.query("f_nr:" + f_nr)
if results['metadata']['number_of_hits'] == 0:
results['metadata']['query_params'] = {'f_nr': f_nr}
return render_template('foto/no_hits.html',
title=_("Foto Database"), subtitle=_("Detail View"), data=results)
else:
if conv_format == '':
return render_template('foto/detail_view.html',
title=_("Foto Database"), subtitle=_("Detail View"),
data={"results": results['items'][0]},
)
elif conv_format == 'json':
return_dict = Foto.get_items_as_list_of_dicts(results)
pub_dict = {'items': return_dict, 'limit': 1, 'total': 1}
return_dict_json = jsonify(pub_dict)
return_dict_json.headers.add('Access-Control-Allow-Origin', '*')
return return_dict_json
else:
return {'error': 'wrong data format'}
@bp_foto.route('/foto/ac/fo_modern', methods=['GET', 'POST'], strict_slashes=False)
@bp_foto.route('/foto/ac/fo_modern/<short>', methods=['GET', 'POST'], strict_slashes=False)
def autocomplete_fo_modern(short=None):
"""
route for retrieving autocomplete entries for field fo_modern
:return: list of entries for autocomplete
"""
if short:
return json.dumps(Foto.get_autocomplete_entries("fo_modern", request.args['term'], 10))
else:
return json.dumps(Foto.get_autocomplete_entries("fo_modern", request.args['term'], 20))
@bp_foto.route('/foto/ac/fo_antik', methods=['GET', 'POST'], strict_slashes=False)
@bp_foto.route('/foto/ac/fo_antik/<short>', methods=['GET', 'POST'], strict_slashes=False)
def autocomplete_fo_antik(short=None):
"""
route for retrieving autocomplete entries for field fo_antik
:return: list of entries for autocomplete
"""
if short:
return json.dumps(Foto.get_autocomplete_entries("fo_antik", request.args['term'], 10))
else:
return json.dumps(Foto.get_autocomplete_entries("fo_antik", request.args['term'], 20))
@bp_foto.route('/foto/ac/aufbewahrung', methods=['GET', 'POST'], strict_slashes=False)
@bp_foto.route('/foto/ac/aufbewahrung/<short>', methods=['GET', 'POST'], strict_slashes=False)
def autocomplete_aufbewahrung(short=None):
"""
route for retrieving autocomplete entries for field present location
:return: list of entries for autocomplete
"""
if short:
return json.dumps(Foto.get_autocomplete_entries("aufbewahrung", request.args['term'], 10))
else:
return json.dumps(Foto.get_autocomplete_entries("aufbewahrung", request.args['term'], 20))
@bp_foto.route('/foto/lastUpdates', methods=['GET', 'POST'])
def last_updates():
"""
route for displaying last updates in bibliographic database
:return: orderedDictionary of bibliographic entries grouped by date
"""
results = Foto.last_updates()
results_grouped_by_date = Foto.group_results_by_date(results)
return render_template('foto/last_updates.html',
title=_("Foto Database"),
subtitle=_("Last Updates"),
data=results_grouped_by_date) | StarcoderdataPython |
4812308 | from typing import Optional
from fastapi import FastAPI
from demo.hello_world import greet
from demo.my_logger import getLogger
logger = getLogger("my-FastAPI-logger")
app = FastAPI()
@app.get("/")
def root():
logger.info("The FastAPI root endpoint was called.")
return {"message": greet()}
| StarcoderdataPython |
4842924 | <reponame>fluidattacks/bugsnag-python
import tornado
from tornado.web import RequestHandler, HTTPError
from tornado.wsgi import WSGIContainer
from typing import Dict, Any
from urllib.parse import parse_qs
from bugsnag.breadcrumbs import BreadcrumbType
from bugsnag.utils import is_json_content_type, sanitize_url
from bugsnag.legacy import _auto_leave_breadcrumb
import bugsnag
import json
class BugsnagRequestHandler(RequestHandler):
def add_tornado_request_to_notification(self, event: bugsnag.Event):
if not hasattr(self, "request"):
return
event.request = self.request
request_tab = {
'method': self.request.method,
'path': self.request.path,
'GET': parse_qs(self.request.query),
'POST': {},
'url': self.request.full_url(),
} # type: Dict[str, Any]
try:
if (len(self.request.body) > 0):
headers = self.request.headers
body = self.request.body.decode('utf-8', 'replace')
is_json = is_json_content_type(headers.get('Content-Type', ''))
if is_json and request_tab["method"] == "POST":
request_tab["POST"] = json.loads(body)
else:
request_tab["POST"] = self.request.body_arguments
except Exception:
pass
event.add_tab("request", request_tab)
if bugsnag.configure().send_environment:
env = WSGIContainer.environ(self.request)
event.add_tab("environment", env)
def _handle_request_exception(self, exc: BaseException):
options = {
"user": {"id": self.request.remote_ip},
"context": self._get_context(),
"request": {
"url": self.request.full_url(),
"method": self.request.method,
"arguments": self.request.arguments,
},
"severity_reason": {
"type": "unhandledExceptionMiddleware",
"attributes": {
"framework": "Tornado"
}
}
}
# Notify bugsnag, unless it's an HTTPError that we specifically want
# to ignore
should_notify_bugsnag = True
if isinstance(exc, HTTPError):
ignore_status_codes = self.bugsnag_ignore_status_codes()
if ignore_status_codes and exc.status_code in ignore_status_codes:
should_notify_bugsnag = False
if should_notify_bugsnag:
bugsnag.auto_notify(exc, **options)
# Call the parent handler
RequestHandler._handle_request_exception(self, exc) # type: ignore
def prepare(self):
middleware = bugsnag.configure().internal_middleware
bugsnag.configure().runtime_versions['tornado'] = tornado.version
middleware.before_notify(self.add_tornado_request_to_notification)
bugsnag.configure()._breadcrumbs.create_copy_for_context()
_auto_leave_breadcrumb(
'http request',
self._get_breadcrumb_metadata(),
BreadcrumbType.NAVIGATION
)
if bugsnag.configuration.auto_capture_sessions:
bugsnag.start_session()
def _get_breadcrumb_metadata(self) -> Dict[str, str]:
if not hasattr(self, 'request'):
return {}
metadata = {'to': self.request.path}
if 'Referer' in self.request.headers:
metadata['from'] = sanitize_url(self.request.headers['Referer'])
return metadata
def _get_context(self):
return "%s %s" % (self.request.method, self.request.uri.split("?")[0])
def bugsnag_ignore_status_codes(self):
# Subclasses can override to add or remove codes
return range(400, 500)
| StarcoderdataPython |
1766595 | import subprocess
import os
import uuid
import hashlib
import logging
import basedefs
import common_utils as utils
import output_messages
SELINUX_RW_LABEL = "public_content_rw_t"
SHA_CKSUM_TAG = "_SHA_CKSUM"
_preprocessLine = lambda line : unicode.encode(unicode(line), 'ascii', 'xmlcharrefreplace')
def addNfsExport(path, authInfo, comment=None, exportFilePath=basedefs.FILE_ETC_EXPORTS):
logging.debug("adding path %s to %s" % (path, exportFilePath))
line = path + "\t"
for ip, mask, options in authInfo:
line += ip
if mask:
line += "/" + mask
line += "(%s)\t" % (",".join(options),)
if comment:
line += "#" + comment
exportFile = open(exportFilePath, "a")
try:
exportFile.write(line + "\n")
finally:
exportFile.close()
def setSELinuxContextForDir(path, contextName):
logging.debug("setting selinux context for %s" % (path))
if path.endswith("/"):
path = path[:-1]
pattern = "%s(/.*)?" % (path,)
# Run semanage
cmd = [basedefs.EXEC_SEMANAGE, "fcontext", "-a", "-t", SELINUX_RW_LABEL, pattern]
utils.execCmd(cmd, None, True, output_messages.ERR_SET_SELINUX_NFS_SHARE)
cmd = [basedefs.EXEC_RESTORECON, "-r", path]
utils.execCmd(cmd, None, True, output_messages.ERR_REFRESH_SELINUX_CONTEXT)
DEFAULT_MD = {
"CLASS" : "Iso",
"DESCRIPTION" : "isofun",
"IOOPTIMEOUTSEC" : "1",
"LEASERETRIES" : "3",
"LEASETIMESEC" : "5",
"LOCKPOLICY" : "",
"LOCKRENEWALINTERVALSEC" : "5",
"POOL_UUID" : "",
"REMOTE_PATH" : "no.one.reads.this:/rhev",
"ROLE" : "Regular",
"SDUUID" : "",
"TYPE" : "NFS",
"VERSION" : "0"}
# This is modified from persitantDict
def writeMD(filePath, md):
logging.debug("generating metadata")
checksumCalculator = hashlib.sha1()
lines = []
keys = md.keys()
keys.sort()
for key in keys:
value = md[key]
line = "=".join([key, str(value).strip()])
checksumCalculator.update(_preprocessLine(line))
lines.append(line)
computedChecksum = checksumCalculator.hexdigest()
logging.debug("checksum of metadata is %s" % (computedChecksum))
lines.append("=".join([SHA_CKSUM_TAG, computedChecksum]))
logging.debug("writing metadata file (%s)" % (filePath))
f = open(filePath, "w")
f.writelines([l + "\n" for l in lines])
f.flush()
os.fsync(f.fileno())
f.close()
#. Iso domain structure
#`-- 2325a2fa-4bf4-47c4-81e1-f60d80dbe968
# |-- dom_md
# | |-- ids
# | |-- inbox
# | |-- leases
# | |-- metadata
# | `-- outbox
# `-- images
# `-- 11111111-1111-1111-1111-111111111111
#since the uuid package is not supported in python v2.4
#we had to use this implementation
def generateUUID():
logging.debug("Generating unique uuid")
generateUUID = str(uuid.uuid4())
return generateUUID
def createISODomain(path, description, sdUUID):
logging.debug("creating iso domain for %s. uuid: %s" % (path, sdUUID))
basePath = os.path.join(path, sdUUID)
os.mkdir(basePath)
imagesDir = os.path.join(basePath, "images")
os.mkdir(imagesDir)
os.mkdir(os.path.join(imagesDir, "11111111-1111-1111-1111-111111111111"))
domMdDir = os.path.join(basePath, "dom_md")
os.mkdir(domMdDir)
logging.debug("creating empty files")
for fname in ("ids", "inbox", "leases", "outbox"):
f = open(os.path.join(domMdDir, fname), "w")
f.write("\0")
f.flush()
f.close()
logging.debug("writing metadata")
mdFilePath = os.path.join(domMdDir, "metadata")
md = DEFAULT_MD.copy()
md.update({"SDUUID" : sdUUID, "DESCRIPTION" : description})
writeMD(mdFilePath, md)
logging.debug("setting directories & files permissions to %s:%s" % (basedefs.CONST_VDSM_UID, basedefs.CONST_KVM_GID))
os.chmod(path, 0755)
for base, dirs, files in os.walk(path):
allFsObjects = [base]
allFsObjects.extend([os.path.join(base, fname) for fname in files])
for fname in allFsObjects:
os.chown(fname, basedefs.CONST_VDSM_UID, basedefs.CONST_KVM_GID)
def refreshNfsExports():
logging.debug("refreshing NFS exports")
p = subprocess.Popen([basedefs.EXEC_EXPORTFS, "-a"], stderr=subprocess.PIPE)
_, err = p.communicate()
rc = p.returncode
if rc != 0:
raise RuntimeError("Could not refresh NFS exports (%d: %s)" % (rc, err))
| StarcoderdataPython |
38443 | # Copyright 2003, 2007 by <NAME>. <EMAIL>
# All rights reserved. This code is part of the Biopython
# distribution and governed by its license.
# Please see the LICENSE file that should have been included as part
# of this package.
import math
def lcc_mult(seq,wsize):
"""Local Composition Complexity (LCC) values over sliding window.
Returns a list of floats, the LCC values for a sliding window over
the sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
wsize - window size, integer
The result is the same as applying lcc_simp multiple times, but this
version is optimized for speed. The optimization works by using the
value of previous window as a base to compute the next one."""
l2=math.log(2)
tamseq=len(seq)
try:
#Assume its a string
upper = seq.upper()
except AttributeError:
#Should be a Seq object then
upper = seq.tostring().upper()
compone=[0]
lccsal=[0]
for i in range(wsize):
compone.append(((i+1)/float(wsize))*
((math.log((i+1)/float(wsize)))/l2))
window=seq[0:wsize]
cant_a=window.count('A')
cant_c=window.count('C')
cant_t=window.count('T')
cant_g=window.count('G')
term_a=compone[cant_a]
term_c=compone[cant_c]
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
tail=seq[0]
for x in range (tamseq-wsize):
window=upper[x+1:wsize+x+1]
if tail==window[-1]:
lccsal.append(lccsal[-1])
elif tail=='A':
cant_a=cant_a-1
if window.endswith('C'):
cant_c=cant_c+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='C':
cant_c=cant_c-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_c=compone[cant_c]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='T':
cant_t=cant_t-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('C'):
cant_c=cant_c+1
term_c=compone[cant_c]
term_t=compone[cant_t]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('G'):
cant_g=cant_g+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif tail=='G':
cant_g=cant_g-1
if window.endswith('A'):
cant_a=cant_a+1
term_a=compone[cant_a]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('C'):
cant_c=cant_c+1
term_c=compone[cant_c]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
elif window.endswith('T'):
cant_t=cant_t+1
term_t=compone[cant_t]
term_g=compone[cant_g]
lccsal.append(-(term_a+term_c+term_t+term_g))
tail=window[0]
return lccsal
def lcc_simp(seq):
"""Local Composition Complexity (LCC) for a sequence.
seq - an unambiguous DNA sequence (a string or Seq object)
Returns the Local Composition Complexity (LCC) value for the entire
sequence (as a float).
Reference:
<NAME> (2005) Sequence Complexity and Composition
DOI: 10.1038/npg.els.0005260
"""
wsize=len(seq)
try:
#Assume its a string
upper = seq.upper()
except AttributeError:
#Should be a Seq object then
upper = seq.tostring().upper()
l2=math.log(2)
if 'A' not in seq:
term_a=0
# Check to avoid calculating the log of 0.
else:
term_a=((upper.count('A'))/float(wsize))*((math.log((upper.count('A'))
/float(wsize)))/l2)
if 'C' not in seq:
term_c=0
else:
term_c=((upper.count('C'))/float(wsize))*((math.log((upper.count('C'))
/float(wsize)))/l2)
if 'T' not in seq:
term_t=0
else:
term_t=((upper.count('T'))/float(wsize))*((math.log((upper.count('T'))
/float(wsize)))/l2)
if 'G' not in seq:
term_g=0
else:
term_g=((upper.count('G'))/float(wsize))*((math.log((upper.count('G'))
/float(wsize)))/l2)
lccsal=-(term_a+term_c+term_t+term_g)
return lccsal
| StarcoderdataPython |
3238552 | <reponame>zmoon92/proplot
#!/usr/bin/env python3
"""
The standard x-y axes used for most ProPlot figures.
"""
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
import numpy as np
from .. import constructor
from .. import scale as pscale
from .. import ticker as pticker
from ..config import rc
from ..internals import ic # noqa: F401
from ..internals import _not_none, docstring, rcsetup, warnings
from ..utils import units
from . import base
__all__ = ['CartesianAxes']
_alt_doc = """
Return an axes in the same location as this one but whose {x} axis is on
the {x2}. This is an alias and more intuitive name for
`~CartesianAxes.twin{y}`, which generates two *{x}* axes with
a shared ("twin") *{y}* axes.
Also enforces the following settings:
* Places the old *{x}* axis on the {x1} and the new *{x}* axis
on the {x2}.
* Makes the old {x2} spine invisible and the new {x1}, {y1},
and {y2} spines invisible.
* Adjusts the *{x}* axis tick, tick label, and axis label positions
according to the visible spine positions.
* Locks the old and new *{y}* axis limits and scales, and makes the new
{y} axis labels invisible.
Parameters
----------
{xargs} : optional
Passed to `Axes.format`.
{args} : optional
Prepended with ``'{x}'`` and passed to `Axes.format`.
"""
_alt_kwargs = ( # TODO: More systematic approach?
'lim', 'reverse', 'scale', 'label',
'tickdir', 'grid', 'gridminor',
'tickminor', 'ticklabeldir', 'tickrange', 'wraprange',
'rotation', 'formatter', 'ticklabels',
'ticks', 'locator', 'minorticks', 'minorlocator',
'bounds', 'margin', 'color',
'ticklen', 'linewidth', 'gridcolor',
'label_kw', 'scale_kw', 'locator_kw', 'formatter_kw', 'minorlocator_kw',
)
docstring.snippets['axes.altx'] = _alt_doc.format(
x='x',
x1='bottom',
x2='top',
y='y',
y1='left',
y2='right',
args=', '.join(_alt_kwargs),
xargs=', '.join('x' + key for key in _alt_kwargs),
)
docstring.snippets['axes.alty'] = _alt_doc.format(
x='y',
x1='left',
x2='right',
y='x',
y1='bottom',
y2='top',
args=', '.join(_alt_kwargs),
xargs=', '.join('y' + key for key in _alt_kwargs),
)
_dual_doc = """
Return a secondary *{x}* axis for denoting equivalent *{x}*
coordinates in *alternate units*.
Parameters
----------
funcscale : function, (function, function), or scale-spec
Used to transform units from the parent axis to the secondary axis.
This can be a `~proplot.scale.FuncScale` itself or a function,
(function, function) tuple, or an axis scale specification interpreted
by the `~proplot.constructor.Scale` constructor function, any of which will
be used to build a `~proplot.scale.FuncScale` and applied to the dual axis
(see `~proplot.scale.FuncScale` for details).
{args} : optional
Prepended with ``'{x}'`` and passed to `Axes.format`.
"""
docstring.snippets['axes.dualx'] = _dual_doc.format(
x='x',
args=', '.join(_alt_kwargs),
xargs=', '.join('x' + key for key in _alt_kwargs),
)
docstring.snippets['axes.dualy'] = _dual_doc.format(
x='y',
args=', '.join(_alt_kwargs),
xargs=', '.join('y' + key for key in _alt_kwargs),
)
_twin_doc = """
Mimics the builtin `~matplotlib.axes.Axes.twin{y}` method.
Also enforces the following settings:
* Places the old *{x}* axis on the {x1} and the new *{x}* axis
on the {x2}.
* Makes the old {x2} spine invisible and the new {x1}, {y1},
and {y2} spines invisible.
* Adjusts the *{x}* axis tick, tick label, and axis label positions
according to the visible spine positions.
* Locks the old and new *{y}* axis limits and scales, and makes the new
{y} axis labels invisible.
Parameters
----------
{xargs} : optional
Passed to `Axes.format`.
{args} : optional
Prepended with ``'{x}'`` and passed to `Axes.format`.
"""
docstring.snippets['axes.twinx'] = _twin_doc.format(
x='y', x1='left', x2='right',
y='x', y1='bottom', y2='top',
args=', '.join(_alt_kwargs),
xargs=', '.join('y' + key for key in _alt_kwargs),
)
docstring.snippets['axes.twiny'] = _twin_doc.format(
x='x', x1='bottom', x2='top',
y='y', y1='left', y2='right',
args=', '.join(_alt_kwargs),
xargs=', '.join('x' + key for key in _alt_kwargs),
)
def _parse_alt(x, kwargs):
"""
Interpret keyword args passed to all "twin axis" methods so they
can be passed to Axes.format.
"""
kw_bad, kw_out = {}, {}
for key, value in kwargs.items():
if key in _alt_kwargs:
kw_out[x + key] = value
elif key[0] == x and key[1:] in _alt_kwargs:
# NOTE: We permit both e.g. 'locator' and 'xlocator' because
# while is more elegant and consistent with e.g. colorbar() syntax
# but latter is more consistent and easier to use when refactoring.
kw_out[key] = value
elif key in rcsetup._rc_nodots:
kw_out[key] = value
else:
kw_bad[key] = value
if kw_bad:
raise TypeError(f'Unexpected keyword argument(s): {kw_bad!r}')
return kw_out
def _parse_rcloc(x, string): # figures out string location
"""
Convert the *boolean* "left", "right", "top", and "bottom" rc settings
to a location string. Returns ``None`` if settings are unchanged.
"""
if x == 'x':
top = rc.get(f'{string}.top', context=True)
bottom = rc.get(f'{string}.bottom', context=True)
if top is None and bottom is None:
return None
elif top and bottom:
return 'both'
elif top:
return 'top'
elif bottom:
return 'bottom'
else:
return 'neither'
else:
left = rc.get(f'{string}.left', context=True)
right = rc.get(f'{string}.right', context=True)
if left is None and right is None:
return None
elif left and right:
return 'both'
elif left:
return 'left'
elif right:
return 'right'
else:
return 'neither'
class CartesianAxes(base.Axes):
"""
Axes subclass for plotting in ordinary Cartesian coordinates.
Adds the `~CartesianAxes.format` method and overrides several existing
methods.
"""
#: The registered projection name.
name = 'cartesian'
def __init__(self, *args, **kwargs):
"""
See also
--------
proplot.ui.subplots
"""
# Impose default formatter
super().__init__(*args, **kwargs)
formatter = pticker.AutoFormatter()
self.xaxis.set_major_formatter(formatter)
self.yaxis.set_major_formatter(formatter)
self.xaxis.isDefault_majfmt = True
self.yaxis.isDefault_majfmt = True
self._datex_rotated = False # whether manual rotation has been applied
self._dualy_funcscale = None # for scaling units on opposite side of ax
self._dualx_funcscale = None
self._dualy_parent_prev_state = None # prevent excess _dualy_overrides calls
self._dualx_parent_prev_state = None
def _altx_overrides(self):
"""
Apply alternate *x* axis overrides.
"""
# Unlike matplotlib API, we strong arm user into certain twin axes
# settings... doesn't really make sense to have twin axes without this
# NOTE: Could also use _panel_sharey_group = True to hide xaxis content
# but instead we set entire axis to visible = False. Safer that way.
if self._altx_child is not None: # altx was called on this axes
self.spines['top'].set_visible(False)
self.spines['bottom'].set_visible(True)
self.xaxis.tick_bottom()
self.xaxis.set_label_position('bottom')
if self._altx_parent is not None: # this axes is the result of altx
self.spines['bottom'].set_visible(False)
self.spines['top'].set_visible(True)
self.spines['left'].set_visible(False)
self.spines['right'].set_visible(False)
self.xaxis.tick_top()
self.xaxis.set_label_position('top')
self.yaxis.set_visible(False)
self.patch.set_visible(False)
def _alty_overrides(self):
"""
Apply alternate *y* axis overrides.
"""
if self._alty_child is not None:
self.spines['right'].set_visible(False)
self.spines['left'].set_visible(True)
self.yaxis.tick_left()
self.yaxis.set_label_position('left')
if self._alty_parent is not None:
self.spines['left'].set_visible(False)
self.spines['right'].set_visible(True)
self.spines['top'].set_visible(False)
self.spines['bottom'].set_visible(False)
self.yaxis.tick_right()
self.yaxis.set_label_position('right')
self.xaxis.set_visible(False)
self.patch.set_visible(False)
def _apply_axis_sharing(self):
"""
Enforce the "shared" axis labels and axis tick labels. If this is not called
at drawtime, "shared" labels can be inadvertantly turned off.
"""
# X axis
# NOTE: The "panel sharing group" refers to axes and panels *above* the
# bottommost or to the *right* of the leftmost panel. But the edge panel
# sharing level is the *figure* sharing level.
axis = self.xaxis
if self._sharex is not None:
level = 3 if self._panel_sharex_group else self.figure._sharex
if level > 0:
axis.label.set_visible(False)
if level > 2:
# WARNING: Cannot set NullFormatter because shared axes share the
# same axis.Ticker classes. Instead use the approach copied from
# matplotlib subplots().
axis.set_tick_params(which='both', labelbottom=False, labeltop=False)
# Y axis
axis = self.yaxis
if self._sharey is not None:
level = 3 if self._panel_sharey_group else self.figure._sharey
if level > 0:
axis.label.set_visible(False)
if level > 2:
axis.set_tick_params(which='both', labelleft=False, labelright=False)
axis.set_minor_formatter(mticker.NullFormatter())
def _datex_rotate(self):
"""
Apply default rotation to datetime axis coordinates.
"""
# NOTE: Rotation is done *before* horizontal/vertical alignment,
# cannot change alignment with set_tick_params. Must apply to text
# objects. fig.autofmt_date calls subplots_adjust, so cannot use it.
if (
not isinstance(self.xaxis.converter, mdates.DateConverter)
or self._datex_rotated
):
return
rotation = rc['formatter.timerotation']
kw = {'rotation': rotation}
if rotation not in (0, 90, -90):
kw['ha'] = ('right' if rotation > 0 else 'left')
for label in self.xaxis.get_ticklabels():
label.update(kw)
self._datex_rotated = True # do not need to apply more than once
def _dualx_overrides(self):
"""
Lock the child "dual" *x* axis limits to the parent.
"""
# NOTE: We set the scale using private API to bypass application of
# set_default_locators_and_formatters: only_if_default=True is critical
# to prevent overriding user settings!
# NOTE: We bypass autoscale_view because we set limits manually, and bypass
# child.stale = True because that is done in call to set_xlim() below.
# NOTE: Dual axis only needs to be constrained if the parent axis scale
# and limits have changed.
funcscale = self._dualx_funcscale
if funcscale is None:
return
scale = self.xaxis._scale
olim = self.get_xlim()
if (scale, *olim) == self._dualx_parent_prev_state:
return
child = self._altx_child
funcscale = pscale.FuncScale(funcscale, invert=True, parent_scale=scale)
child.xaxis._scale = funcscale
child._update_transScale()
funcscale.set_default_locators_and_formatters(child.xaxis, only_if_default=True)
nlim = list(map(funcscale.functions[1], np.array(olim)))
if np.sign(np.diff(olim)) != np.sign(np.diff(nlim)):
nlim = nlim[::-1] # if function flips limits, so will set_xlim!
child.set_xlim(nlim, emit=False)
self._dualx_parent_prev_state = (scale, *olim)
def _dualy_overrides(self):
"""
Lock the child "dual" *y* axis limits to the parent.
"""
funcscale = self._dualy_funcscale
if funcscale is None:
return
scale = self.yaxis._scale
olim = self.get_ylim()
if (scale, *olim) == self._dualy_parent_prev_state:
return
child = self._alty_child
funcscale = pscale.FuncScale(funcscale, invert=True, parent_scale=scale)
child.yaxis._scale = funcscale
child._update_transScale()
funcscale.set_default_locators_and_formatters(child.yaxis, only_if_default=True)
nlim = list(map(funcscale.functions[1], np.array(olim)))
if np.sign(np.diff(olim)) != np.sign(np.diff(nlim)):
nlim = nlim[::-1]
child.set_ylim(nlim, emit=False)
self._dualy_parent_prev_state = (scale, *olim)
def _sharex_setup(self, sharex):
"""
Configure shared axes accounting for panels. The input is the
'parent' axes, from which this one will draw its properties.
"""
# Share *panels* across different subplots
super()._sharex_setup(sharex)
# Get sharing level
level = (
3 if self._panel_sharex_group and self._is_panel_group_member(sharex)
else self.figure._sharex
)
if level not in range(4):
raise ValueError(
'Invalid sharing level sharex={value!r}. '
'Axis sharing level can be 0 (share nothing), '
'1 (hide axis labels), '
'2 (share limits and hide axis labels), or '
'3 (share limits and hide axis and tick labels).'
)
if sharex in (None, self) or not isinstance(sharex, CartesianAxes):
return
# Share future changes to axis labels
# Proplot internally uses _sharex and _sharey for label sharing. Matplotlib
# only uses these in __init__() and cla() to share tickers -- all other builtin
# matplotlib axis sharing features derive from _shared_x_axes() group.
if level > 0:
self._sharex = sharex
if not sharex.xaxis.label.get_text():
sharex.xaxis.label.set_text(self.xaxis.label.get_text())
# Share future axis tickers, limits, and scales
# NOTE: Only difference between levels 2 and 3 is level 3 hides
# tick labels. But this is done after the fact -- tickers are still shared.
if level > 1:
# Initial limits and scales should be shared both ways
for (ax1, ax2) in ((self, sharex), (sharex, self)):
if ax1.get_xscale() == 'linear' and ax2.get_xscale() != 'linear':
ax1.set_xscale(ax2.get_xscale())
if ax1.get_autoscalex_on() and not ax2.get_autoscalex_on():
ax1.set_xlim(ax2.get_xlim())
# Locators and formatters only need to be shared from children
# to parent, because this is done automatically when we assign
# parent sharex tickers to child.
self._shared_x_axes.join(self, sharex) # share limit/scale changes
if sharex.xaxis.isDefault_majloc and not self.xaxis.isDefault_majloc:
sharex.xaxis.set_major_locator(self.xaxis.get_major_locator())
if sharex.xaxis.isDefault_minloc and not self.xaxis.isDefault_minloc:
sharex.xaxis.set_minor_locator(self.xaxis.get_minor_locator())
if sharex.xaxis.isDefault_majfmt and not self.xaxis.isDefault_majfmt:
sharex.xaxis.set_major_formatter(self.xaxis.get_major_formatter())
if sharex.xaxis.isDefault_minfmt and not self.xaxis.isDefault_minfmt:
sharex.xaxis.set_minor_formatter(self.xaxis.get_minor_formatter())
self.xaxis.major = sharex.xaxis.major
self.xaxis.minor = sharex.xaxis.minor
def _sharey_setup(self, sharey):
"""
Configure shared axes accounting for panels. The input is the
'parent' axes, from which this one will draw its properties.
"""
# Share *panels* across different subplots
super()._sharey_setup(sharey)
# Get sharing level
level = (
3 if self._panel_sharey_group and self._is_panel_group_member(sharey)
else self.figure._sharey
)
if level not in range(4):
raise ValueError(
'Invalid sharing level sharey={value!r}. '
'Axis sharing level can be 0 (share nothing), '
'1 (hide axis labels), '
'2 (share limits and hide axis labels), or '
'3 (share limits and hide axis and tick labels).'
)
if sharey in (None, self) or not isinstance(sharey, CartesianAxes):
return
# Share future changes to axis labels
if level > 0:
self._sharey = sharey
if not sharey.yaxis.label.get_text():
sharey.yaxis.label.set_text(self.yaxis.label.get_text())
# Share future axis tickers, limits, and scales
if level > 1:
# Initial limits and scales should be shared both ways
for (ax1, ax2) in ((self, sharey), (sharey, self)):
if ax1.get_yscale() == 'linear' and ax2.get_yscale() != 'linear':
ax1.set_yscale(ax2.get_yscale())
if ax1.get_autoscaley_on() and not ax2.get_autoscaley_on():
ax1.set_ylim(ax2.get_ylim())
# Locators and formatters only need to be shared from children
# to parent, because this is done automatically when we assign
# parent sharey tickers to child.
self._shared_y_axes.join(self, sharey) # share limit/scale changes
if sharey.yaxis.isDefault_majloc and not self.yaxis.isDefault_majloc:
sharey.yaxis.set_major_locator(self.yaxis.get_major_locator())
if sharey.yaxis.isDefault_minloc and not self.yaxis.isDefault_minloc:
sharey.yaxis.set_minor_locator(self.yaxis.get_minor_locator())
if sharey.yaxis.isDefault_majfmt and not self.yaxis.isDefault_majfmt:
sharey.yaxis.set_major_formatter(self.yaxis.get_major_formatter())
if sharey.yaxis.isDefault_minfmt and not self.yaxis.isDefault_minfmt:
sharey.yaxis.set_minor_formatter(self.yaxis.get_minor_formatter())
self.yaxis.major = sharey.yaxis.major
self.yaxis.minor = sharey.yaxis.minor
def _update_axis_labels(self, x='x', **kwargs):
"""
Apply axis labels to the relevant shared axis. If spanning labels are toggled
this keeps the labels synced for all subplots in the same row or column. Label
positions will be adjusted at draw-time with figure._align_axislabels.
"""
if x not in 'xy':
return
# Get axes in 3 step process
# 1. Walk to parent if it is a main axes
# 2. Get spanning main axes in this row or column (ignore short panel edges)
# 3. Walk to parent if it exists (may be a panel long edge)
# NOTE: Axis sharing between "main" axes is only ever one level deep.
# NOTE: Critical to apply labels to *shared* axes attributes rather
# than testing extents or we end up sharing labels with twin axes.
ax = self
if getattr(self.figure, '_share' + x) > 0:
share = getattr(ax, '_share' + x) or ax
if not share._panel_parent:
ax = share
# Get spanning axes
axs = [ax]
if getattr(ax.figure, '_span' + x):
side = getattr(self, x + 'axis').get_label_position()
if side in ('left', 'bottom'):
axs = ax._get_side_axes(side, panels=False)
# Update axes with label
for ax in axs:
ax = getattr(ax, '_share' + x) or ax # defer to panel
axis = getattr(ax, x + 'axis')
axis.label.update(kwargs)
@docstring.add_snippets
def format(
self, *,
aspect=None,
xloc=None, yloc=None,
xspineloc=None, yspineloc=None,
xtickloc=None, ytickloc=None, fixticks=False,
xlabelloc=None, ylabelloc=None,
xticklabelloc=None, yticklabelloc=None,
xtickdir=None, ytickdir=None,
xgrid=None, ygrid=None,
xgridminor=None, ygridminor=None,
xtickminor=None, ytickminor=None,
xticklabeldir=None, yticklabeldir=None,
xtickrange=None, ytickrange=None,
xwraprange=None, ywraprange=None,
xreverse=None, yreverse=None,
xlabel=None, ylabel=None,
xlim=None, ylim=None,
xmin=None, ymin=None,
xmax=None, ymax=None,
xscale=None, yscale=None,
xrotation=None, yrotation=None,
xformatter=None, yformatter=None,
xticklabels=None, yticklabels=None,
xticks=None, yticks=None,
xlocator=None, ylocator=None,
xminorticks=None, yminorticks=None,
xminorlocator=None, yminorlocator=None,
xbounds=None, ybounds=None,
xmargin=None, ymargin=None,
xcolor=None, ycolor=None,
xlinewidth=None, ylinewidth=None,
xgridcolor=None, ygridcolor=None,
xticklen=None, yticklen=None,
xlabel_kw=None, ylabel_kw=None,
xscale_kw=None, yscale_kw=None,
xlocator_kw=None, ylocator_kw=None,
xformatter_kw=None, yformatter_kw=None,
xminorlocator_kw=None, yminorlocator_kw=None,
patch_kw=None,
**kwargs
):
"""
Modify the *x* and *y* axis labels, tick locations, tick labels,
axis scales, spine settings, and more. Unknown keyword arguments
are passed to `Axes.format` and
`~proplot.config.RcConfigurator.context`.
Parameters
----------
aspect : {'auto', 'equal'}, optional
The aspect ratio mode. See `~matplotlib.axes.Axes.set_aspect`
for details.
xlabel, ylabel : str, optional
The *x* and *y* axis labels. Applied with
`~matplotlib.axes.Axes.set_xlabel`
and `~matplotlib.axes.Axes.set_ylabel`.
xlabel_kw, ylabel_kw : dict-like, optional
The *x* and *y* axis label settings. Applied with the
`~matplotlib.artist.Artist.update` method on the
`~matplotlib.text.Text` instance. Options include ``'color'``,
``'size'``, and ``'weight'``.
xlim, ylim : 2-tuple of floats or None, optional
The *x* and *y* axis data limits. Applied with
`~matplotlib.axes.Axes.set_xlim` and
`~matplotlib.axes.Axes.set_ylim`.
xmin, ymin : float, optional
The *x* and *y* minimum data limits. Useful if you do not want
to set the maximum limits.
xmax, ymax : float, optional
The *x* and *y* maximum data limits. Useful if you do not want
to set the minimum limits.
xreverse, yreverse : bool, optional
Sets whether the *x* and *y* axis are oriented in the "reverse"
direction. The "normal" direction is increasing to the right for
the *x* axis and to the top for the *y* axis. The "reverse"
direction is increasing to the left for the *x* axis and to the
bottom for the *y* axis.
xscale, yscale : axis scale spec, optional
The *x* and *y* axis scales. Passed to the
`~proplot.scale.Scale` constructor. For example,
``xscale='log'`` applies logarithmic scaling, and
``xscale=('cutoff', 0.5, 2)`` applies a custom
`~proplot.scale.CutoffScale`.
xscale_kw, yscale_kw : dict-like, optional
The *x* and *y* axis scale settings. Passed to
`~proplot.scale.Scale`.
xspineloc, yspineloc : {'both', 'bottom', 'top', 'left', 'right', \
'neither', 'center', 'zero'}, optional
The *x* and *y* axis spine locations.
xloc, yloc : optional
Aliases for `xspineloc`, `yspineloc`.
xtickloc, ytickloc : {'both', 'bottom', 'top', 'left', 'right', \
'neither'}, optional
Which *x* and *y* axis spines should have major and minor tick
marks.
xtickminor, ytickminor : bool, optional
Whether to draw minor ticks on the *x* and *y* axes.
xtickdir, ytickdir : {'out', 'in', 'inout'}
Direction that major and minor tick marks point for the *x* and
*y* axis.
xgrid, ygrid : bool, optional
Whether to draw major gridlines on the *x* and *y* axis.
Use `grid` to toggle both.
xgridminor, ygridminor : bool, optional
Whether to draw minor gridlines for the *x* and *y* axis.
Use `gridminor` to toggle both.
xticklabeldir, yticklabeldir : {'in', 'out'}
Whether to place *x* and *y* axis tick label text inside
or outside the axes.
xlocator, ylocator : locator spec, optional
Used to determine the *x* and *y* axis tick mark positions. Passed
to the `~proplot.constructor.Locator` constructor. Can be float,
list of float, string, or `matplotlib.ticker.Locator` instance.
xticks, yticks : optional
Aliases for `xlocator`, `ylocator`.
xlocator_kw, ylocator_kw : dict-like, optional
Keyword arguments passed to the `matplotlib.ticker.Locator` class.
xminorlocator, yminorlocator : optional
As for `xlocator`, `ylocator`, but for the minor ticks.
xminorticks, yminorticks : optional
Aliases for `xminorlocator`, `yminorlocator`.
xminorlocator_kw, yminorlocator_kw
As for `xlocator_kw`, `ylocator_kw`, but for the minor locator.
xformatter, yformatter : formatter spec, optional
Used to determine the *x* and *y* axis tick label string format.
Passed to the `~proplot.constructor.Formatter` constructor.
Can be string, list of strings, or `matplotlib.ticker.Formatter`
instance. Use ``[]`` or ``'null'`` for no ticks.
xticklabels, yticklabels : optional
Aliases for `xformatter`, `yformatter`.
xformatter_kw, yformatter_kw : dict-like, optional
Keyword arguments passed to the `matplotlib.ticker.Formatter` class.
xrotation, yrotation : float, optional
The rotation for *x* and *y* axis tick labels. Default is ``0``
for normal axes, :rc:`formatter.timerotation` for time
*x* axes.
xtickrange, ytickrange : (float, float), optional
The *x* and *y* axis data ranges within which major tick marks
are labelled. For example, the tick range ``(-1, 1)`` with
axis range ``(-5, 5)`` and a tick interval of 1 will only
label the ticks marks at -1, 0, and 1. See
`~proplot.ticker.AutoFormatter` for details.
xwraprange, ywraprange : (float, float), optional
The *x* and *y* axis data ranges with which major tick mark
values are *wrapped*. For example, the wrap range ``(0, 3)``
causes the values 0 through 9 to be formatted as 0, 1, 2,
0, 1, 2, 0, 1, 2, 0. See `~proplot.ticker.AutoFormatter` for details.
xmargin, ymargin : float, optional
The default margin between plotted content and the *x* and *y* axis
spines. Value is proportional to the width, height of the axes.
Use this if you want whitespace between plotted content
and the spines, but don't want to explicitly set `xlim` or `ylim`.
xbounds, ybounds : (float, float), optional
The *x* and *y* axis data bounds within which to draw the spines.
For example, the axis range ``(0, 4)`` with bounds ``(1, 4)``
will prevent the spines from meeting at the origin.
xcolor, ycolor : color-spec, optional
Color for the *x* and *y* axis spines, ticks, tick labels, and axis
labels. Default is :rc:`color`. Use e.g. ``ax.format(color='red')``
to set for both axes.
xlinewidth, ylinewidth : color-spec, optional
Line width for the *x* and *y* axis spines and major ticks.
Default is :rc:`linewidth`. Use e.g. ``ax.format(linewidth=2)``
to set for both axes.
xgridcolor, ygridcolor : color-spec, optional
Color for the *x* and *y* axis major and minor gridlines.
Default is :rc:`grid.color`. Use e.g. ``ax.format(gridcolor='r')``
to set for both axes.
xticklen, yticklen : float or str, optional
Tick lengths for the *x* and *y* axis. Units are interpreted by
`~proplot.utils.units`, with "points" as the numeric unit. Default
is :rc:`ticklen`.
Minor tick lengths are scaled according
to :rc:`ticklenratio`. Use e.g. ``ax.format(ticklen=1)`` to
set for both axes.
fixticks : bool, optional
Whether to always transform the tick locators to a
`~matplotlib.ticker.FixedLocator` instance. Default is ``False``.
If your axis ticks are doing weird things (for example, ticks
drawn outside of the axis spine), try setting this to ``True``.
%(axes.patch_kw)s
Other parameters
----------------
%(axes.other)s
See also
--------
proplot.config.RcConfigurator.context
proplot.axes.Axes.format
Note
----
If you plot something with a `datetime64 \
<https://docs.scipy.org/doc/numpy/reference/arrays.datetime.html>`__,
`pandas.Timestamp`, `pandas.DatetimeIndex`, `datetime.date`,
`datetime.time`, or `datetime.datetime` array as the *x* or *y* axis
coordinate, the axis ticks and tick labels will be automatically
formatted as dates.
"""
rc_kw, rc_mode, kwargs = self._parse_format(**kwargs)
with rc.context(rc_kw, mode=rc_mode):
# Background patch
kw_face = rc.fill(
{
'facecolor': 'axes.facecolor',
'alpha': 'axes.alpha'
},
context=True,
)
patch_kw = patch_kw or {}
kw_face.update(patch_kw)
self.patch.update(kw_face)
# No mutable default args
xlabel_kw = xlabel_kw or {}
ylabel_kw = ylabel_kw or {}
xscale_kw = xscale_kw or {}
yscale_kw = yscale_kw or {}
xlocator_kw = xlocator_kw or {}
ylocator_kw = ylocator_kw or {}
xformatter_kw = xformatter_kw or {}
yformatter_kw = yformatter_kw or {}
xminorlocator_kw = xminorlocator_kw or {}
yminorlocator_kw = yminorlocator_kw or {}
# Flexible keyword args, declare defaults
xmargin = _not_none(xmargin, rc.get('axes.xmargin', context=True))
ymargin = _not_none(ymargin, rc.get('axes.ymargin', context=True))
xtickdir = _not_none(xtickdir, rc.get('xtick.direction', context=True))
ytickdir = _not_none(ytickdir, rc.get('ytick.direction', context=True))
xformatter = _not_none(xformatter=xformatter, xticklabels=xticklabels)
yformatter = _not_none(yformatter=yformatter, yticklabels=yticklabels)
xlocator = _not_none(xlocator=xlocator, xticks=xticks)
ylocator = _not_none(ylocator=ylocator, yticks=yticks)
xtickminor = _not_none(
xtickminor, rc.get('xtick.minor.visible', context=True)
)
ytickminor = _not_none(
ytickminor, rc.get('ytick.minor.visible', context=True)
)
xminorlocator = _not_none(
xminorlocator=xminorlocator, xminorticks=xminorticks,
)
yminorlocator = _not_none(
yminorlocator=yminorlocator, yminorticks=yminorticks,
)
# Grid defaults are more complicated
grid = rc.get('axes.grid', context=True)
which = rc.get('axes.grid.which', context=True)
if which is not None or grid is not None: # if *one* was changed
axis = rc['axes.grid.axis'] # always need this property
if grid is None:
grid = rc['axes.grid']
elif which is None:
which = rc['axes.grid.which']
xgrid = _not_none(
xgrid, grid and axis in ('x', 'both')
and which in ('major', 'both')
)
ygrid = _not_none(
ygrid, grid and axis in ('y', 'both')
and which in ('major', 'both')
)
xgridminor = _not_none(
xgridminor, grid and axis in ('x', 'both')
and which in ('minor', 'both')
)
ygridminor = _not_none(
ygridminor, grid and axis in ('y', 'both')
and which in ('minor', 'both')
)
# Sensible defaults for spine, tick, tick label, and label locs
# NOTE: Allow tick labels to be present without ticks! User may
# want this sometimes! Same goes for spines!
xspineloc = _not_none(xloc=xloc, xspineloc=xspineloc,)
yspineloc = _not_none(yloc=yloc, yspineloc=yspineloc,)
xtickloc = _not_none(xtickloc, xspineloc, _parse_rcloc('x', 'xtick'))
ytickloc = _not_none(ytickloc, yspineloc, _parse_rcloc('y', 'ytick'))
xspineloc = _not_none(xspineloc, _parse_rcloc('x', 'axes.spines'))
yspineloc = _not_none(yspineloc, _parse_rcloc('y', 'axes.spines'))
if xtickloc != 'both':
xticklabelloc = _not_none(xticklabelloc, xtickloc)
xlabelloc = _not_none(xlabelloc, xticklabelloc)
if xlabelloc not in (None, 'bottom', 'top'): # e.g. "both"
xlabelloc = 'bottom'
if ytickloc != 'both':
yticklabelloc = _not_none(yticklabelloc, ytickloc)
ylabelloc = _not_none(ylabelloc, yticklabelloc)
if ylabelloc not in (None, 'left', 'right'):
ylabelloc = 'left'
# Begin loop
for (
x, axis,
label, color,
linewidth, gridcolor,
ticklen,
margin, bounds,
tickloc, spineloc,
ticklabelloc, labelloc,
grid, gridminor,
tickminor, minorlocator,
min_, max_, lim,
reverse, scale,
locator, tickrange,
wraprange,
formatter, tickdir,
ticklabeldir, rotation,
label_kw, scale_kw,
locator_kw, minorlocator_kw,
formatter_kw
) in zip(
('x', 'y'), (self.xaxis, self.yaxis),
(xlabel, ylabel), (xcolor, ycolor),
(xlinewidth, ylinewidth), (xgridcolor, ygridcolor),
(xticklen, yticklen),
(xmargin, ymargin), (xbounds, ybounds),
(xtickloc, ytickloc), (xspineloc, yspineloc),
(xticklabelloc, yticklabelloc), (xlabelloc, ylabelloc),
(xgrid, ygrid), (xgridminor, ygridminor),
(xtickminor, ytickminor), (xminorlocator, yminorlocator),
(xmin, ymin), (xmax, ymax), (xlim, ylim),
(xreverse, yreverse), (xscale, yscale),
(xlocator, ylocator), (xtickrange, ytickrange),
(xwraprange, ywraprange),
(xformatter, yformatter), (xtickdir, ytickdir),
(xticklabeldir, yticklabeldir), (xrotation, yrotation),
(xlabel_kw, ylabel_kw), (xscale_kw, yscale_kw),
(xlocator_kw, ylocator_kw),
(xminorlocator_kw, yminorlocator_kw),
(xformatter_kw, yformatter_kw),
):
# Axis scale
# WARNING: This relies on monkey patch of mscale.scale_factory
# that allows it to accept a custom scale class!
# WARNING: Changing axis scale also changes default locators
# and formatters, and restricts possible range of axis limits,
# so critical to do it first.
if scale is not None:
scale = constructor.Scale(scale, **scale_kw)
getattr(self, 'set_' + x + 'scale')(scale)
# Axis limits
# NOTE: 3.1+ has axis.set_inverted(), below is from source code
# NOTE: Critical to apply axis limits first in case axis scale
# is incompatible with current limits.
if min_ is not None or max_ is not None:
if lim is not None:
warnings._warn_proplot(
f'Overriding {x}lim={lim!r} '
f'with {x}min={min_!r} and {x}max={max_!r}.'
)
lim = (min_, max_)
if lim is not None:
getattr(self, 'set_' + x + 'lim')(lim)
if reverse is not None:
lo, hi = axis.get_view_interval()
if reverse:
lim = (max(lo, hi), min(lo, hi))
else:
lim = (min(lo, hi), max(lo, hi))
axis.set_view_interval(*lim, ignore=True)
# Is this a date axis?
# NOTE: Make sure to get this *after* lims set!
# See: https://matplotlib.org/api/units_api.html
# And: https://matplotlib.org/api/dates_api.html
# Also see: https://github.com/matplotlib/matplotlib/blob/master/lib/matplotlib/axis.py # noqa
# The axis_date() method just applies DateConverter
date = isinstance(axis.converter, mdates.DateConverter)
# Fix spines
kw = rc.fill(
{
'color': 'axes.edgecolor',
'linewidth': 'axes.linewidth',
},
context=True,
)
if color is not None:
kw['color'] = color
if linewidth is not None:
kw['linewidth'] = linewidth
sides = ('bottom', 'top') if x == 'x' else ('left', 'right')
spines = [self.spines[side] for side in sides]
for spine, side in zip(spines, sides):
# Line properties. Override if we're settings spine bounds
# In this case just have spines on edges by default
if bounds is not None and spineloc not in sides:
spineloc = sides[0]
# Eliminate sides
if spineloc == 'neither':
spine.set_visible(False)
elif spineloc == 'both':
spine.set_visible(True)
elif spineloc in sides: # make relevant spine visible
b = True if side == spineloc else False
spine.set_visible(b)
elif spineloc is not None:
# Special spine location, usually 'zero', 'center',
# or tuple with (units, location) where 'units' can
# be 'axes', 'data', or 'outward'.
if side == sides[1]:
spine.set_visible(False)
else:
spine.set_visible(True)
try:
spine.set_position(spineloc)
except ValueError:
raise ValueError(
f'Invalid {x} spine location {spineloc!r}. '
'Options are: '
+ ', '.join(map(
repr, (*sides, 'both', 'neither')
)) + '.'
)
# Apply spine bounds
if bounds is not None and spine.get_visible():
spine.set_bounds(*bounds)
spine.update(kw)
# Get available spines, needed for setting tick locations
spines = [
side for side, spine in zip(sides, spines)
if spine.get_visible()
]
# Tick and grid settings for major and minor ticks separately
# Override is just a "new default", but user can override this
for which, igrid in zip(('major', 'minor'), (grid, gridminor)):
# Tick properties
# NOTE: This loads xtick.major.size, xtick.major.width,
# xtick.major.pad, xtick.major.bottom, and xtick.major.top
# For all the x/y major/minor tick types
kwticks = rc.category(x + 'tick.' + which, context=True)
if kwticks is None:
kwticks = {}
else:
kwticks.pop('visible', None) # invalid setting
if ticklen is not None:
kwticks['size'] = units(ticklen, 'pt')
if which == 'minor':
kwticks['size'] *= rc['ticklenratio']
# Grid style and toggling
name = 'grid' if which == 'major' else 'gridminor'
if igrid is not None:
axis.grid(igrid, which=which)
kwgrid = rc.fill(
{
'grid_color': name + '.color',
'grid_alpha': name + '.alpha',
'grid_linewidth': name + '.linewidth',
'grid_linestyle': name + '.linestyle',
},
context=True,
)
if gridcolor is not None: # override for specific x/y axes
kw['grid_color'] = gridcolor
axis.set_tick_params(which=which, **kwgrid, **kwticks)
# Tick and ticklabel properties that apply equally for major/minor lines
# Weird issue causes set_tick_params to reset/forget grid is turned
# on if you access tick.gridOn directly, instead of passing through
# tick_params. Since gridOn is undocumented feature, don't use it.
# So calling _format_axes() a second time will remove the lines.
# First determine tick sides, avoiding situation where we draw ticks
# on top of invisible spine.
kw = {}
loc2sides = {
None: None,
'both': sides,
'none': (),
'neither': (),
}
if bounds is not None and tickloc not in sides:
tickloc = sides[0] # override to just one side
ticklocs = loc2sides.get(tickloc, (tickloc,))
if ticklocs is not None:
kw.update({side: side in ticklocs for side in sides})
kw.update({side: False for side in sides if side not in spines})
# Tick label sides
# Will override to make sure only appear where ticks are
ticklabellocs = loc2sides.get(ticklabelloc, (ticklabelloc,))
if ticklabellocs is not None:
kw.update(
{'label' + side: (side in ticklabellocs) for side in sides}
)
kw.update( # override
{
'label' + side: False for side in sides
if side not in spines
or (ticklocs is not None and side not in ticklocs)
}
) # override
# The axis label side
if labelloc is None:
if ticklocs is not None:
options = [
side for side in sides
if side in ticklocs and side in spines
]
if len(options) == 1:
labelloc = options[0]
elif labelloc not in sides:
raise ValueError(
f'Got labelloc {labelloc!r}, valid options are '
+ ', '.join(map(repr, sides)) + '.'
)
# Apply
axis.set_tick_params(which='both', **kw)
if labelloc is not None:
axis.set_label_position(labelloc)
# Tick label settings
kw = rc.fill(
{
'labelcolor': 'tick.labelcolor', # new props
'labelsize': 'tick.labelsize',
'color': x + 'tick.color',
},
context=True,
)
if color:
kw['color'] = color
kw['labelcolor'] = color
# Tick label direction and rotation
if tickdir == 'in': # ticklabels should be much closer
kw['pad'] = 1.0
if ticklabeldir == 'in': # put tick labels inside the plot
tickdir = 'in'
kw['pad'] = -rc[f'{x}tick.major.size'] - rc[f'{x}tick.major.pad']
kw['pad'] -= rc._scale_font(rc[f'{x}tick.labelsize'])
if tickdir is not None:
kw['direction'] = tickdir
axis.set_tick_params(which='both', **kw)
# Settings that can't be controlled by set_tick_params
# Also set rotation and alignment here
kw = rc.fill(
{
'fontfamily': 'font.family',
'weight': 'tick.labelweight'
},
context=True,
)
if rotation is not None:
kw = {'rotation': rotation}
if x == 'x':
self._datex_rotated = True
if rotation not in (0, 90, -90):
kw['ha'] = ('right' if rotation > 0 else 'left')
for t in axis.get_ticklabels():
t.update(kw)
# Margins
if margin is not None:
self.margins(**{x: margin})
# Axis label updates
# NOTE: This has to come after set_label_position, or ha or va
# overrides in label_kw are overwritten
kw = rc.fill(
{
'color': 'axes.labelcolor',
'weight': 'axes.labelweight',
'fontsize': 'axes.labelsize',
'fontfamily': 'font.family',
},
context=True,
)
if label is not None:
kw['text'] = label
if color:
kw['color'] = color
kw.update(label_kw)
if kw: # NOTE: initially keep spanning labels off
self._update_axis_labels(x, **kw)
# Major and minor locator
# NOTE: Parts of API (dualxy) rely on minor tick toggling
# preserving the isDefault_minloc setting. In future should
# override the default matplotlib API minorticks_on!
# NOTE: Unlike matplotlib API when "turning on" minor ticks
# we *always* use the scale default, thanks to scale classes
# refactoring with _ScaleBase. See Axes.minorticks_on.
if locator is not None:
locator = constructor.Locator(locator, **locator_kw)
axis.set_major_locator(locator)
if isinstance(locator, mticker.IndexLocator):
tickminor = False # 'index' minor ticks make no sense
if minorlocator in (True, False):
warnings._warn_proplot(
f'You passed {x}minorticks={minorlocator}, but this '
'argument is used to specify tick *locations*. If '
'you just want to *toggle* minor ticks on and off, '
f'please use {x}tickminor=True or {x}tickminor=False.'
)
minorlocator = None
if tickminor or minorlocator:
isdefault = minorlocator is None
if isdefault:
minorlocator = getattr(
axis._scale, '_default_minor_locator', None
)
if not minorlocator:
minorlocator = constructor.Locator('minor')
else:
minorlocator = constructor.Locator(
minorlocator, **minorlocator_kw
)
axis.set_minor_locator(minorlocator)
axis.isDefault_minloc = isdefault
elif tickminor is not None and not tickminor:
# NOTE: Generally if you *enable* minor ticks on a dual
# axis, want to allow FuncScale updates to change the
# minor tick locators. If you *disable* minor ticks, do
# not want FuncScale applications to turn them on. So we
# allow below to set isDefault_minloc to False.
axis.set_minor_locator(constructor.Locator('null'))
# Major formatter
# NOTE: The only reliable way to disable ticks labels and then
# restore them is by messing with the *formatter*, rather than
# setting labelleft=False, labelright=False, etc.
if (
formatter is not None
or tickrange is not None
or wraprange is not None
):
# Tick range
if tickrange is not None or wraprange is not None:
if formatter not in (None, 'auto'):
warnings._warn_proplot(
'The tickrange and autorange features require '
'proplot.AutoFormatter formatter. Overriding '
'input formatter.'
)
formatter = 'auto'
if tickrange is not None:
formatter_kw.setdefault('tickrange', tickrange)
if wraprange is not None:
formatter_kw.setdefault('wraprange', wraprange)
# Set the formatter
# Note some formatters require 'locator' as keyword arg
if formatter in ('date', 'concise'):
locator = axis.get_major_locator()
formatter_kw.setdefault('locator', locator)
formatter = constructor.Formatter(
formatter, date=date, **formatter_kw
)
axis.set_major_formatter(formatter)
# Ensure no out-of-bounds ticks; set_smart_bounds() can fail
# * Using set_bounds did not work, so instead just turn
# locators into fixed version.
# * Most locators take no arguments in __call__, and some do
# not have tick_values method, so we just call them.
if (
bounds is not None
or fixticks
or isinstance(formatter, mticker.FixedFormatter)
or axis.get_scale() == 'cutoff'
):
if bounds is None:
bounds = getattr(self, 'get_' + x + 'lim')()
locator = constructor.Locator([
x for x in axis.get_major_locator()()
if bounds[0] <= x <= bounds[1]
])
axis.set_major_locator(locator)
locator = constructor.Locator([
x for x in axis.get_minor_locator()()
if bounds[0] <= x <= bounds[1]
])
axis.set_minor_locator(locator)
# Call parent
if aspect is not None:
self.set_aspect(aspect)
super().format(**kwargs)
@docstring.add_snippets
def altx(self, **kwargs):
"""
%(axes.altx)s
"""
# NOTE: Cannot *wrap* twiny() because we want to use CartesianAxes, not
# matplotlib Axes. Instead use hidden method SubplotBase._make_twin_axes.
# WARNING: This repairs a matplotlib bug where twins fail to inherit the minor
# locator due to application of `AutoMinorLocator` when `ytick.minor.visible`
# is ``True`` in `Axes.cla` and due to the fact that passing ``sharey=self``
# to the alternate axes means that they share the same major and minor Tickers.
# >>> import matplotlib.pyplot as plt
# ... fig, ax = plt.subplots()
# ... ax.set_yscale('log')
# ... ax.twiny()
if self._altx_child or self._altx_parent:
raise RuntimeError('No more than *two* twin axes are allowed.')
with self.figure._context_authorize_add_subplot():
ylocator = self.yaxis.get_minor_locator()
ax = self._make_twin_axes(sharey=self, projection='cartesian')
ax.yaxis.set_minor_locator(ylocator)
ax.yaxis.isDefault_minloc = True
ax.set_autoscaley_on(self.get_autoscaley_on())
ax.grid(False)
self._altx_child = ax
ax._altx_parent = self
self._altx_overrides()
ax._altx_overrides()
self.add_child_axes(ax) # to facilitate tight layout
self.figure._axstack.remove(ax) # or gets drawn twice!
ax.format(**_parse_alt('x', kwargs))
return ax
@docstring.add_snippets
def alty(self, **kwargs):
"""
%(axes.alty)s
"""
# See altx() comments
if self._alty_child or self._alty_parent:
raise RuntimeError('No more than *two* twin axes are allowed.')
with self.figure._context_authorize_add_subplot():
xlocator = self.xaxis.get_minor_locator()
ax = self._make_twin_axes(sharex=self, projection='cartesian')
ax.xaxis.set_minor_locator(xlocator)
ax.xaxis.isDefault_minloc = True
ax.set_autoscalex_on(self.get_autoscalex_on())
ax.grid(False)
self._alty_child = ax
ax._alty_parent = self
self._alty_overrides()
ax._alty_overrides()
self.add_child_axes(ax) # to facilitate tight layout
self.figure._axstack.remove(ax) # or gets drawn twice!
ax.format(**_parse_alt('y', kwargs))
return ax
@docstring.add_snippets
def dualx(self, funcscale, **kwargs):
"""
%(axes.dualx)s
"""
# NOTE: Matplotlib 3.1 has a 'secondary axis' feature. For the time
# being, our version is more robust (see FuncScale) and simpler, since
# we do not create an entirely separate _SecondaryAxis class.
ax = self.altx(**kwargs)
self._dualx_funcscale = funcscale
self._dualx_overrides()
return ax
@docstring.add_snippets
def dualy(self, funcscale, **kwargs):
"""
%(axes.dualy)s
"""
ax = self.alty(**kwargs)
self._dualy_funcscale = funcscale
self._dualy_overrides()
return ax
def draw(self, renderer=None, *args, **kwargs):
# Perform extra post-processing steps
# NOTE: This mimics matplotlib API, which calls identical
# post-processing steps in both draw() and get_tightbbox()
self._altx_overrides()
self._alty_overrides()
self._dualx_overrides()
self._dualy_overrides()
self._datex_rotate()
self._apply_axis_sharing()
if self._inset_parent is not None and self._inset_zoom:
self.indicate_inset_zoom()
super().draw(renderer, *args, **kwargs)
def get_tightbbox(self, renderer, *args, **kwargs):
# Perform extra post-processing steps
self._altx_overrides()
self._alty_overrides()
self._dualx_overrides()
self._dualy_overrides()
self._datex_rotate()
self._apply_axis_sharing()
if self._inset_parent is not None and self._inset_zoom:
self.indicate_inset_zoom()
return super().get_tightbbox(renderer, *args, **kwargs)
@docstring.add_snippets
def twinx(self):
"""
%(axes.twinx)s
"""
return self.alty()
@docstring.add_snippets
def twiny(self):
"""
%(axes.twiny)s
"""
return self.altx()
| StarcoderdataPython |
153977 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import re
import os
import codecs
from pelican.readers import parse_path_metadata
from .exceptions import FileNotFound, FileAlreadyExists, UnknownFileFormat
__all__ = ('PelicanContentFile', 'PelicanArticle', 'RstArticle', 'MarkdownArticle')
class PelicanContentFile(object):
"""
Base container for any pelican content file.
This is basically a filename with some advanced attributes.
The content attribute should always be stored as byte str.
"""
encoding = None
def __init__(self, content_path, filename, content=None, encoding=None):
self.content_path = content_path
self.filename = filename
self.extension = os.path.splitext(filename)[1]
self._content = content
self.encoding = encoding or self.encoding
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, self.filename.encode('ascii', errors='backslashreplace'))
def __str__(self):
return '%s' % self.filename
def __unicode__(self):
return '%s' % self.filename
def __eq__(self, other):
return self.filename == other
def __ne__(self, other):
return self.filename != other
def __hash__(self):
return self.filename.__hash__()
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
@property
def content(self):
if self._content is None:
raise ValueError('file content is not loaded')
return self._content
@content.setter
def content(self, value):
self._content = value
@property
def full_path(self):
"""Get file full path"""
return os.path.abspath(os.path.join(self.content_path, self.filename))
def exists(self):
"""Return True if file exists on FS"""
return os.path.isfile(self.full_path)
# noinspection PyMethodMayBeStatic
def _delete(self, file_path):
"""Actual file delete operation"""
os.remove(file_path)
def delete(self):
"""Remove file from disk"""
if not self.exists():
raise FileNotFound(self)
self._delete(self.full_path)
def _load(self, file_path):
"""Actual file read operation"""
with codecs.open(file_path, mode='rb', encoding=self.encoding) as fp:
return fp.read()
def load(self):
"""Load and return file content from disk"""
if not self.exists():
raise FileNotFound(self)
self.content = self._load(self.full_path)
return self.content
def _save(self, file_path, content):
"""Actual file write operation"""
with codecs.open(file_path, mode='wb', encoding=self.encoding) as fp:
fp.write(content)
def save(self):
"""Write file content to disk"""
if self.exists():
raise FileAlreadyExists(self)
self._save(self.full_path, self.content)
class PelicanArticle(PelicanContentFile):
"""
Base class for article formats.
The content should always be a unicode str.
"""
encoding = 'utf-8'
extension = NotImplemented
re_metadata = NotImplemented
def _load(self, file_path):
"""Read text file content from disk (pelican style)"""
content = super(PelicanArticle, self)._load(file_path)
if content[0] == codecs.BOM_UTF8.decode(self.encoding):
content = content[1:]
return content
def get_path_metadata(self, settings):
"""Parse file metadata from file's path"""
return parse_path_metadata(self.filename, settings=settings)
def _parse_metadata(self, metadata, line):
"""Parse metadata from one line of text and update the metadata dict"""
found = self.re_metadata.match(line)
if found:
metadata[found.group(1).lower()] = found.group(2)
return True
else:
return False
def get_text_metadata(self, text):
"""Separate metadata from text and return (new text, metadata) tuple"""
metadata = {}
new_text = '\n'.join(line for line in text.splitlines() if not self._parse_metadata(metadata, line))
return new_text, metadata
def _compose(self, title, text, metadata):
"""Return new content from supplied parameters"""
raise NotImplementedError
def compose(self, title, text, metadata):
"""Create and set new content"""
self.content = self._compose(title, text, metadata)
return self.content
def internal_link(self, text, uri):
raise NotImplementedError
def image(self, alt, uri):
raise NotImplementedError
class RstArticle(PelicanArticle):
"""
Article stored in reStructuredText format.
"""
extension = '.rst'
file_extensions = ('rst',)
re_metadata = re.compile(r'^:(\w+):\s+(.+)$')
def _compose(self, title, text, metadata):
return '%(title)s\n%(title_underscore)s\n\n%(metadata)s\n\n%(text)s\n' % {
'title': title,
'title_underscore': '#' * len(title),
'metadata': '\n'.join(':%s: %s' % i for i in metadata.items()),
'text': text,
}
def internal_link(self, text, uri):
return '`%s <%s>`_' % (text, uri)
def image(self, alt, uri):
return '.. image:: %s\n :alt: %s' % (uri, alt)
class MarkdownArticle(PelicanArticle):
"""
Article stored in markdown format.
"""
extension = '.md'
file_extensions = ('md', 'markdown', 'mkd', 'mdown')
re_metadata = re.compile(r'^ ([A-Z]\w*):\s+(.+)$')
def _compose(self, title, text, metadata):
metadata['title'] = title
return '%(metadata)s\n\n# %(title)s\n\n%(text)s\n' % {
'metadata': '\n'.join(' %s: %s' % (k.title(), v) for k, v in metadata.items()),
'title': title,
'text': text,
}
def internal_link(self, text, uri):
return '[%s](%s)' % (text, uri)
def image(self, alt, uri):
return '' % (alt, uri)
ARTICLE_CLASSES = (RstArticle, MarkdownArticle)
def pelican_article(content_path, filename, **kwargs):
"""Return PelicanArticle object according to file extensions"""
supported_classes = kwargs.pop('supported_classes', ARTICLE_CLASSES)
ext = os.path.splitext(filename)[1].lstrip('.')
for cls in supported_classes:
if ext in cls.file_extensions:
return cls(content_path, filename, **kwargs)
raise UnknownFileFormat('Unsupported article format: %s' % ext)
| StarcoderdataPython |
3296108 | def case_insensitive_sort_1(string_list):
def compare(a, b): return cmp(a.lower(), b.lower())
string_list.sort(compare)
| StarcoderdataPython |
3316142 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Examples for the NURBS-Python Package
Released under MIT License
Developed by <NAME> (c) 2018
Surface fitting by global interpolation
"""
from geomdl import fitting
from geomdl.visualization import VisMPL as vis
# Data set
points = ((-5, -5, 0), (-2.5, -5, 0), (0, -5, 0), (2.5, -5, 0), (5, -5, 0), (7.5, -5, 0), (10, -5, 0),
(-5, 0, 3), (-2.5, 0, 3), (0, 0, 3), (2.5, 0, 3), (5, 0, 3), (7.5, 0, 3), (10, 0, 3),
(-5, 5, 0), (-2.5, 5, 0), (0, 5, 0), (2.5, 5, 0), (5, 5, 0), (7.5, 5, 0), (10, 5, 0),
(-5, 7.5, -3), (-2.5, 7.5, -3), (0, 7.5, -3), (2.5, 7.5, -3), (5, 7.5, -3), (7.5, 7.5, -3), (10, 7.5, -3),
(-5, 10, 0), (-2.5, 10, 0), (0, 10, 0), (2.5, 10, 0), (5, 10, 0), (7.5, 10, 0), (10, 10, 0))
size_u = 5
size_v = 7
degree_u = 2
degree_v = 3
# Do global surface interpolation
surf = fitting.interpolate_surface(points, size_u, size_v, degree_u, degree_v)
# Plot the interpolated surface
surf.delta = 0.05
surf.vis = vis.VisSurface()
surf.render()
# # Visualize data and evaluated points together
# import numpy as np
# import matplotlib.pyplot as plt
# evalpts = np.array(surf.evalpts)
# pts = np.array(points)
# fig = plt.figure()
# ax = plt.axes(projection='3d')
# ax.scatter(evalpts[:, 0], evalpts[:, 1], evalpts[:, 2])
# ax.scatter(pts[:, 0], pts[:, 1], pts[:, 2], color="red")
# plt.show()
| StarcoderdataPython |
170381 | from distutils.core import setup
setup(
name = 'python-tee',
packages = ['tee'],
version = '0.0.5',
license='MIT',
description = '',
author = '<NAME>',
url = 'https://github.com/dante-biase/python-tee',
download_url = 'https://github.com/dante-biase/python-tee/archive/v0.0.5.tar.gz',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3'
],
) | StarcoderdataPython |
1635272 | from io import BytesIO, SEEK_END
import attr
from PIL import Image
MAX_EDGE_PIXELS = 1024
QUALITY = 80
SUPPORTED_FORMATS = ('JPEG', 'PNG', 'GIF')
MAX_SIZE_IN_BYTES_AFTER_PROCESSING = 1024 * 1024
MIN_AREA_TRACKING_PIXEL = 10
@attr.s
class ImageProcessingResult:
size_in_bytes: int = attr.ib()
width: int = attr.ib()
height: int = attr.ib()
image_format: str = attr.ib()
data: BytesIO = attr.ib()
class ImageProcessingError(Exception):
pass
def process_image_data(data: bytes) -> ImageProcessingResult:
try:
image = Image.open(BytesIO(data))
except OSError as e:
raise ImageProcessingError('Cannot open image: {}'.format(e))
with image:
if image.format not in SUPPORTED_FORMATS:
raise ImageProcessingError(
'Unsupported format {}'.format(image.format)
)
width, height = image.size
if (width * height) < MIN_AREA_TRACKING_PIXEL:
raise ImageProcessingError('Tracking pixel')
if image.format == 'GIF':
# Gif are weird, saving them often fails and the result after
# compression is sometimes bigger than the original file.
# Let's just keep the original file.
data = BytesIO(data)
else:
data = BytesIO()
try:
image.thumbnail((MAX_EDGE_PIXELS, MAX_EDGE_PIXELS))
width, height = image.size
image.save(data, image.format, quality=QUALITY,
optimize=True, progressive=True)
except (OSError, EOFError) as e:
raise ImageProcessingError('Cannot resize image: {}'.format(e))
size_in_bytes = data.seek(0, SEEK_END)
data.seek(0)
if size_in_bytes > MAX_SIZE_IN_BYTES_AFTER_PROCESSING:
raise ImageProcessingError(
'Resulting file too big: {} bytes'.format(size_in_bytes)
)
return ImageProcessingResult(
size_in_bytes, width, height, image.format, data
)
| StarcoderdataPython |
58966 | """ Intermediate Factors
@author: <NAME>
This module computes the interpolated features between the principal vectors -- the one
linking source to target following the geodesics on the Grassmannian. We use the
equivalent formulation derived in [1] and represent this geodesics for each pair
of principal components.
Example
-------
Examples are given in the vignettes.
Notes
-------
Examples are given in the vignette
References
-------
[1] <NAME>., <NAME>., <NAME>., "TO CHANGE"
"""
import numpy as np
import pandas as pd
from pathlib import Path
from joblib import Parallel, delayed
from precise.principal_vectors import PVComputation
class IntermediateFactors:
"""
Handle the intermediate representations between
Attributes
-------
source_components_ : numpy.ndarray, shape (n_components, n_features)
Loadings of the source factors, be them already aligned to target or not.
target_components : numpy.ndarray, shape (n_components, n_features)
Loadings of the target factors, be them already aligned to source or not.
intermediate_factors_ : numpy.ndarray, shape (n_representations, n_components, n_features)
Loadings of intermediate factors along the geodesic path. Components are ordered
by similarity, i.e. first components correspond to path between first PVs, etc.
n_representations: int
Number of representations along the geodesic path. If -1, means that the Geodesic Flow Kernel
has been used instead.
geodesic_matrix_: numpy.ndarray, shape (n_features, n_features)
Geodesic Matrix for geodesic flow kernel.
geodesic_flow_: method float:numpy.array
Method that computes geodesic flow at a certain position.
"""
def __init__(self, n_representations, n_jobs=1):
"""
Parameters
-------
n_representations : int
Number of representations to pick between source and target.
n_jobs: int (optional, default to 1)
Number of jobs for computation.
"""
self.n_representations = n_representations
self.intermediate_factors_ = None
self.source_components_ = None
self.target_components_ = None
self.n_jobs = 1
def _compute_principal_vectors(self):
n_pv = np.min([self.source_components_.shape[0],
self.target_components_.shape[0]])
n_factors = {
'source': self.source_components_.shape[0],
'target': self.target_components_.shape[0]
}
self.principal_vectors_ = PVComputation(n_factors, n_pv)
self.principal_vectors_.compute_principal_vectors(self.source_components_,
self.target_components_)
def _compute_flow_time(t, principal_vectors):
Pi = np.sin( (1-t) * principal_vectors.angles_)\
/np.sin(principal_vectors.angles_)
Pi[np.isnan(Pi)] = 1-t # Asymptotic value of sin/sin in 0
Xi = np.sin( t * principal_vectors.angles_)\
/ np.sin(principal_vectors.angles_)
Xi[np.isnan(Xi)] = t # Asymptotic value of sin/sin in 0
return (principal_vectors.source_components_.T*Pi \
+ principal_vectors.target_components_.T*Xi).T
def sample_flow(self, source_components, target_components, already_aligned=False):
"""
Sample intermediate subspaces (i.e. set of factors) uniformely along the geodesic flow.
IMPORTANT: Same genes have to be given for source and target, and in same order
Parameters
-------
source_components : np.ndarray, shape (n_components, n_features)
Source factors
target_components : np.ndarray, shape (n_components, n_features)
Target factors
already_aligned : boolean (optional, default to False)
Whether the components are already aligned (i.e. are they PV or not).
Return values
-------
Intermediate subspace, numpy.ndarray of shape (n_representations + 1, n_components, n_features).
"""
self.source_components_ = source_components
self.target_components_ = target_components
# Compute the principal vectors
if not already_aligned:
self._compute_principal_vectors()
else:
self.principal_vectors_.source_components_ = self.source_components_
self.principal_vectors_.target_components = self.target_components_
# Sample at different uniformly distributed time points
if self.n_representations == -1:
t_sample = np.array([1])
else:
t_sample = np.linspace(0, 1, self.n_representations + 1)
if self.n_jobs >= 2:
return np.array(
Parallel(n_jobs=self.n_jobs)\
(delayed(IntermediateFactors._compute_flow_time)(t, self.principal_vectors_)\
for t in t_sample)
)
else:
return np.array([IntermediateFactors._compute_flow_time(t, self.principal_vectors_) for t in t_sample])
def compute_geodesic_matrix(self, source_components, target_components):
"""
Return method for computing the domain-invariant kernel of Geodesic Flow Kernel.
Parameters
-------
source_components : np.ndarray, shape (n_components, n_features)
Source factors
target_components : np.ndarray, shape (n_components, n_features)
Target factors
Return values
-------
Method that takes two p-dimensional vector and returns their domain-invariant
scalar product.
"""
self.source_components_ = source_components
self.target_components_ = target_components
self._compute_principal_vectors()
diag_term = (self.principal_vectors_.angles_ - np.cos(self.principal_vectors_.angles_)*np.sin(self.principal_vectors_.angles_)) \
/ 2 / self.principal_vectors_.angles_ / np.power(np.sin(self.principal_vectors_.angles_), 2)
off_diag_term = (np.sin(self.principal_vectors_.angles_) - np.cos(self.principal_vectors_.angles_)*self.principal_vectors_.angles_) \
/ 2 / np.power(np.sin(self.principal_vectors_.angles_),2) / self.principal_vectors_.angles_
# Correct for extreme case when theta = 0
diag_term[np.isnan(diag_term)] = 1./3.
diag_term[np.isinf(diag_term)] = 1./3.
off_diag_term[np.isnan(off_diag_term)] = 1./6.
off_diag_term[np.isinf(off_diag_term)] = 1./6.
diag_term = np.diag(diag_term)
off_diag_term = np.diag(off_diag_term)
self.G_matrix = np.block([
[diag_term, off_diag_term],
[off_diag_term, diag_term]
])
self.projection = np.block([self.principal_vectors_.source_components_.transpose(), self.principal_vectors_.target_components_.transpose()])
return self.G_matrix
#return lambda x,y: IntermediateFactors._compute_domain_invariant_scalar_product(x, y, self.projection, self.G_matrix)
def _compute_domain_invariant_scalar_product(x, y, projection, G_matrix):
x_p = x.dot(projection)
y_p = y.dot(projection)
return x_p.dot(G_matrix).dot(y_p.transpose())
| StarcoderdataPython |
1721171 | from ._ClassRegistry import ClassRegistry
from ._functions import is_hashable
from ._Interval import Interval
from ._InvalidStateError import InvalidStateError
from ._memoise import Memoiser, MemoiserFactory, MEMO_EXTENSION, MemoFile, InvalidMemoFileError, PicklableDict
from ._pool import run_on_all, num_processes
from ._switch import switch, case, default, break_
from ._TwoWayDict import TwoWayDict
| StarcoderdataPython |
4827647 | <filename>paper_uploads/variations.py
import posixpath
from variations.variation import Variation
class PaperVariation(Variation):
"""
Расширение возможностей вариации:
* Хранение имени вариации
"""
def __init__(self, *args, name: str = "", **kwargs):
self.name = name
super().__init__(*args, **kwargs)
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, value: str):
if not isinstance(value, str):
raise TypeError(value)
self._name = value
def get_output_filename(self, input_filename: str) -> str:
"""
Конструирует имя файла для вариации по имени файла исходника.
Имя файла может включать путь — он остается неизменным.
"""
if not self.name:
raise RuntimeError("`name` is empty")
dir_name, file_name = posixpath.split(input_filename)
file_root, file_ext = posixpath.splitext(file_name)
new_file_root = posixpath.extsep.join((file_root, self.name))
file_name = "".join((new_file_root, file_ext))
name = posixpath.join(dir_name, file_name)
return self.replace_extension(name)
| StarcoderdataPython |
96859 | #!/usr/bin/env python3
#
# Copyright (c) 2018 Institute for Basic Science
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import os
import sys
from io import StringIO
__all__ = ['tensorflow', 'keras', 'WeightedCategoricalCrossentropy',
'WeightedCategoricalAccuracy']
try: # Suppress warnings and informative messages from keras and tf.
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
sys.stderr, saved_stderr = StringIO(), sys.stderr
from tensorflow import keras
import tensorflow
tensorflow.get_logger().setLevel('ERROR')
finally:
sys.stderr = saved_stderr
# Weighted versions of Crossentropy and Accuracy metrics from eliadi:
# https://github.com/keras-team/keras/issues/2115#issuecomment-530762739
import tensorflow.keras.backend as K
from tensorflow.keras.losses import CategoricalCrossentropy
from tensorflow.keras.metrics import CategoricalAccuracy
class WeightedCategoricalCrossentropy(CategoricalCrossentropy):
def __init__(self, cost_mat, name='weighted_categorical_crossentropy', **kwargs):
assert(cost_mat.ndim == 2)
assert(cost_mat.shape[0] == cost_mat.shape[1])
super().__init__(name=name, **kwargs)
self.cost_mat = K.cast_to_floatx(cost_mat)
def __call__(self, y_true, y_pred):
return super().__call__(
y_true=y_true,
y_pred=y_pred,
sample_weight=get_sample_weights(y_true, y_pred, self.cost_mat),
)
def get_sample_weights(y_true, y_pred, cost_m):
num_classes = len(cost_m)
#y_pred.shape.assert_has_rank(2)
#y_pred.shape[1].assert_is_compatible_with(num_classes)
#y_pred.shape.assert_is_compatible_with(y_true.shape)
y_pred = K.one_hot(K.argmax(y_pred), num_classes)
y_true_nk1 = K.expand_dims(y_true, 2)
y_pred_n1k = K.expand_dims(y_pred, 1)
cost_m_1kk = K.expand_dims(cost_m, 0)
sample_weights_nkk = cost_m_1kk * y_true_nk1 * y_pred_n1k
sample_weights_n = K.sum(sample_weights_nkk, axis=[1, 2])
return sample_weights_n
class WeightedCategoricalAccuracy(CategoricalAccuracy):
def __init__(self, cost_mat, name='weighted_categorical_accuracy', **kwargs):
assert(cost_mat.ndim == 2)
assert(cost_mat.shape[0] == cost_mat.shape[1])
super().__init__(name=name, **kwargs)
self.cost_mat = K.cast_to_floatx(cost_mat)
def update_state(self, y_true, y_pred, sample_weight=None):
return super().update_state(
y_true=y_true,
y_pred=y_pred,
sample_weight=get_sample_weights(y_true, y_pred, self.cost_mat),
)
| StarcoderdataPython |
4801360 | <reponame>sapcc/nova<filename>nova/console/shellinaboxproxy.py
# Copyright (c) 2018 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.compute import rpcapi as compute_rpcapi
from nova import config
from nova.console.websocketproxy import NovaProxyRequestHandlerBase
from nova import context
from nova import exception
STATIC_FILES_EXT = ('.js', '.css', '.html', '.ico', '.png', '.gif')
class NovaShellInaBoxProxy(NovaProxyRequestHandlerBase):
"""Class that injects token validation routine into proxy logic."""
def __init__(self):
self._compute_rpcapi = None
@property
def compute_rpcapi(self):
# This is copied from NovaProxyRequestHandler, just to avoid
# extending that class (because it inherits
# websockify.ProxyRequestHandler in addition and we don't need that).
# For upgrades we should have a look again if anything changed there,
# that we might need to also include here.
if not self._compute_rpcapi:
self._compute_rpcapi = compute_rpcapi.ComputeAPI()
return self._compute_rpcapi
def path_includes_static_files(self):
"""Returns True if requested path includes static files."""
for extension in STATIC_FILES_EXT:
if extension in self.path:
return True
def response(self, flow):
"""Validate the token and give 403 if not found or not valid."""
if self.method == "GET" and not self.path_includes_static_files():
if not self.token:
# No token found
flow.response.status_code = 403
flow.response.content = b"No token provided."
else:
# Validate the token
ctxt = context.get_admin_context()
try:
super(NovaShellInaBoxProxy, self)._get_connect_info(
ctxt, self.token)
except exception.InvalidToken:
# Token not valid
flow.response.status_code = 403
flow.response.content = ("The token has expired "
"or invalid.")
def request(self, flow):
"""Save the token, method and path that came with request."""
self.token = flow.request.query.get("token", "")
self.method = flow.request.method
self.path = flow.request.path
def start():
"""Entrypoint. Configures rpc first, otherwise cannot validate token."""
config.parse_args([]) # we need this to configure rpc
return NovaShellInaBoxProxy()
| StarcoderdataPython |
1661124 | from typing import Dict
# The rest of the codebase uses rays everywhere.
# Only use these units for user facing interfaces.
units: Dict[str, int] = {
"venidium": 10 ** 12, # 1 venidium (XVM) is 1,000,000,000,000 ray (1 trillion)
"ray": 1,
"colouredcoin": 10 ** 3, # 1 coloured coin is 1000 colouredcoin rays
}
| StarcoderdataPython |
114739 | <filename>setup.py
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
setup(
name='anime',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.2.0',
description='Declarative animation library for pygame.',
# The project's main homepage.
url='https://github.com/SodaCookie/anime',
# Author details
author='<NAME>',
author_email='<EMAIL>',
# Choose your license
license='MIT',
include_package_data = True,
package_data={
'anime.demo': ['anime/images/image1.png'],
},
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: pygame',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
],
# What does your project relate to?
keywords='pygame animation anime',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pygame']
) | StarcoderdataPython |
3353061 | <reponame>mbonix/blpd<filename>blpd/blp.py
import blpapi as blp
import pandas as pd
from typing import Union
basestring = (str, bytes)
SECURITY_DATA = blp.Name('securityData')
SECURITY = blp.Name('security')
FIELD_DATA = blp.Name('fieldData')
FIELD_EXCEPTIONS = blp.Name('fieldExceptions')
FIELD_ID = blp.Name('fieldId')
SECURITY_ERROR = blp.Name('securityError')
ERROR_INFO = blp.Name('errorInfo')
OVERRIDES = blp.Name('overrides')
CATEGORY = blp.Name('category')
MESSAGE = blp.Name('message')
SUBCATEGORY = blp.Name('subcategory')
def _formatSecurity(security: str, prefix: str) -> str:
""" Format a security in a valid Bloomberg syntax. """
prefixes = ['ticker', 'cusip', 'wpk', 'isin', 'buid', 'sedol1', 'sedol2',
'sicovam', 'common', 'bsid', 'svm', 'cins', 'cats', 'bbgid']
if prefix.lower() == 'ticker':
return(security)
else:
if prefix.lower() in prefixes:
return(f'/{prefix.lower()}/{security}')
else:
print('Topic prefix is not correct') # Raise error
return()
def _formatSecsList(securities: list, prefix: Union[str, list]) -> list:
""" Format a list of securities in a valid Bloomberg syntax. """
output = []
if isinstance(prefix, basestring):
for s in securities:
output.append(_formatSecurity(s, prefix))
else:
if len(prefix) == len(securities):
for s, p in zip(securities, prefix):
output.append(_formatSecurity(s, p))
else:
print('Securities and prefixes length do not match') # Raise error
return(output)
class BLP():
""" Implementation of the Request/Response Paradigm to mimick Excel API. """
def __init__(self, host: str='localhost', port: int=8194,
verbose: bool=False, start: bool=True) -> None:
""" Initialize a BLP session. """
self.active = False
self.host = host
self.port = port
self.verbose = verbose
if start is True:
self.open()
def open(self) -> None:
""" Start a BLP session. """
if self.active is False:
sessionOptions = blp.SessionOptions()
sessionOptions.setServerHost(self.host)
sessionOptions.setServerPort(self.port)
if self.verbose is True:
print(f'Connecting to {self.host}:{self.port}.')
self.session = blp.Session(sessionOptions)
if self.session.start() is False:
print('Failed to start session.') # Raise error
return()
if self.verbose is True:
print('Starting session...')
if self.session.openService('//blp/refdata') is False:
print('Failed to open refdata service.') # Raise error
return()
if self.verbose is True:
print('Opening refdata service...')
self.refDataService = self.session.getService('//blp/refdata')
self.active = True
def close(self) -> None:
""" End a BLP session. """
if self.active is True:
self.session.stop()
if self.verbose is True:
print('Closing the session...')
self.active = False
def _addSecurities(self) -> None:
""" Add a list of securities to a request. """
if isinstance(self.securities, basestring):
self.securities = [self.securities]
else:
pass
for sec in _formatSecsList(self.securities, self.prefix):
self.request.append('securities', sec)
def _addFields(self) -> None:
""" Add a list of fields to a request. """
if isinstance(self.fields, basestring):
self.fields = [self.fields]
else:
pass
for fld in self.fields:
self.request.append('fields', fld)
def _addDays(self) -> None:
""" Add fill days options to a historical request. """
options = {'A': 'ALL_CALENDAR_DAYS',
'T': 'ACTIVE_DAYS_ONLY',
'W': 'NON_TRADING_WEEKDAYS'}
try:
self.request.set('nonTradingDayFillOption', options[self.days])
except KeyError:
print('Options are A / T / W')
def _addFill(self) -> None:
""" Add fill method options to a historical request. """
options = {'N': 'NIL_VALUE',
'P': 'PREVIOUS_VALUE'}
try:
self.request.set('nonTradingDayFillMethod', options[self.fill])
except KeyError:
print('Options are N / P')
def _addPeriod(self) -> None:
""" Add periodicity options to a historical request. """
optionsAdj = {'A': 'ACTUAL',
'C': 'CALENDAR',
'F': 'FISCAL'}
optionsSel = {'D': 'DAILY',
'M': 'MONTHLY',
'Q': 'QUARTERLY',
'S': 'SEMI_ANNUALLY',
'W': 'WEEKLY',
'Y': 'YEARLY'}
try:
self.request.set('periodicityAdjustment', optionsAdj[self.per[0]])
self.request.set('periodicitySelection', optionsSel[self.per[1]])
except KeyError:
print('Options are A / C / F and D / M / Q / S / W / Y')
def _addQuoteType(self) -> None:
""" Add quote type options to a historical request. """
options = {'P': 'PRICING_OPTION_PRICE',
'Y': 'PRICING_OPTION_YIELD'}
try:
self.request.set('pricingOption', options[self.qtTyp])
except KeyError:
print('Options are P / Y')
def _addQuote(self) -> None:
""" Add quote options to a historical request. """
options = {'C': 'OVERRIDE_OPTION_CLOSE',
'G': 'OVERRIDE_OPTION_GPA'}
try:
self.request.set('overrideOption', options[self.quote])
except KeyError:
print('Options are C / G')
def _addMandatoryOptions(self) -> None:
""" Add mandatory options to a historical request. """
self.request.set('returnRelativeDate', self.dtFmt)
self._addDays()
self._addFill()
self._addPeriod()
self._addQuoteType()
self._addQuote()
self.request.set('adjustmentFollowDPDF', self.useDPDF)
def _addFacultativeOptions(self) -> None:
""" Add facultative options to a historical request. """
if self.cdr is None:
pass
else:
self.request.set('calendarCodeOverride', self.cdr)
if self.fx is None:
pass
else:
self.request.set('currency', self.fx)
if self.points is None:
pass
else:
self.request.set('maxDataPoints', self.points)
if self.cshAdjAbnormal is None:
pass
else:
self.request.set('adjustmentAbnormal', self.cshAdjAbnormal)
if self.capChg is None:
pass
else:
self.request.set('adjustmentSplit', self.capChg)
if self.cshAdjNormal is None:
pass
else:
self.request.set('adjustmentNormal', self.cshAdjNormal)
def _addOverrides(self) -> None:
""" Manage request arguments. """
if self.overrides is None:
pass
elif isinstance(self.overrides, dict):
overrides = self.request.getElement(OVERRIDES)
oslist = []
for key, value in self.overrides.items():
oslist.append(overrides.appendElement())
oslist[-1].setElement(FIELD_ID, key)
oslist[-1].setElement('value', value)
else:
print('Overrides must be a dict') # Raise error
def bdp(self, securities: Union['str', 'list'],
fields: Union['str', 'list'], prefix: Union['str', 'list']='ticker',
overrides: dict=None, swap: bool=False, errors: bool=False) -> pd.DataFrame:
""" Send a reference request to Bloomberg (mimicking Excel function
BDP). """
self.request = self.refDataService.createRequest('ReferenceDataRequest')
self.securities = securities
self.fields = fields
self.prefix = prefix
self.overrides = overrides
self._addSecurities()
self._addFields()
self._addOverrides()
if self.verbose is True:
print(f'Sending request: {self.request}')
cid = self.session.sendRequest(self.request)
if self.verbose is True:
print(f'Correlation ID is: {cid}')
data = pd.DataFrame()
exceptions = pd.DataFrame()
while(True):
ev = self.session.nextEvent(500)
for msg in ev:
if cid in msg.correlationIds():
securitiesData = msg.getElement(SECURITY_DATA)
if self.verbose is True:
print(f'Securities data: {securitiesData}')
for secData in securitiesData.values():
name = secData.getElementAsString(SECURITY)
fieldsData = secData.getElement(FIELD_DATA)
for field in fieldsData.elements():
data.loc[name, str(field.name())] = \
pd.to_numeric(field.getValueAsString(),
errors='ignore')
if secData.hasElement(SECURITY_ERROR):
secError = secData.getElement(SECURITY_ERROR)
exceptions.loc[name, 'Field'] = None
exceptions.loc[name, 'Category'] = \
secError.getElementAsString(CATEGORY)
exceptions.loc[name, 'Subcategory'] = \
secError.getElementAsString(SUBCATEGORY)
exceptions.loc[name, 'Message'] = \
secError.getElementAsString(MESSAGE)
fieldsException = secData.getElement(FIELD_EXCEPTIONS)
for fieldEx in fieldsException.values():
if fieldEx.hasElement(FIELD_ID):
fieldId = fieldEx.getElementAsString(FIELD_ID)
errorInfo = fieldEx.getElement(ERROR_INFO)
exceptions.loc[name, 'Field'] = fieldId
exceptions.loc[name, 'Category'] = \
errorInfo.getElementAsString(CATEGORY)
exceptions.loc[name, 'Subcategory'] = \
errorInfo.getElementAsString(SUBCATEGORY)
exceptions.loc[name, 'Message'] = \
errorInfo.getElementAsString(MESSAGE)
if ev.eventType() == blp.Event.RESPONSE:
break
if swap is False:
if errors is False:
return(data)
else:
return(data, exceptions)
else:
if errors is False:
return(data.T)
else:
return(data.T, exceptions)
def bdh(self, securities: Union['str', 'list'],
fields: Union['str', 'list'], startDate: str, endDate: str='',
prefix: Union['str', 'list']='ticker', cdr: str=None, fx: str=None,
dtFmt: bool=False, days: str='W', fill: str='P', per: str='CD',
points: int=None, qtTyp: str='Y', quote: str='C', useDPDF: bool=True,
cshAdjAbnormal: bool=None, capChg: bool=None, cshAdjNormal: bool=None,
overrides: dict=None, swap: bool=False, errors: bool=False) -> pd.DataFrame:
""" Send a historical request to Bloomberg (mimicking Excel function
BDH). """
self.request = self.refDataService.createRequest('HistoricalDataRequest')
self.securities = securities
self.fields = fields
self.startDate = startDate
self.endDate = endDate
self.prefix = prefix
self.cdr = cdr
self.fx = fx
self.dtFmt = dtFmt
self.days = days
self.fill = fill
self.per = per
self.points = points
self.qtTyp = qtTyp
self.quote = quote
self.useDPDF = useDPDF
self.cshAdjAbnormal = cshAdjAbnormal
self.capChg = capChg
self.cshAdjNormal = cshAdjNormal
self.overrides = overrides
self._addSecurities()
self._addFields()
self.request.set('startDate', self.startDate)
self.request.set('endDate', self.endDate)
self._addMandatoryOptions()
self._addFacultativeOptions()
self._addOverrides()
if self.verbose is True:
print(f'Sending request: {self.request}')
cid = self.session.sendRequest(self.request)
if self.verbose is True:
print(f'Correlation ID is: {cid}')
datadict = {}
exceptions = pd.DataFrame()
while(True):
ev = self.session.nextEvent(500)
for msg in ev:
if cid in msg.correlationIds():
secData = msg.getElement(SECURITY_DATA)
if self.verbose is True:
print(f'Securities data: {secData}')
name = secData.getElementAsString(SECURITY)
if secData.hasElement(SECURITY_ERROR):
secError = secData.getElement(SECURITY_ERROR)
exceptions.loc[name, 'Field'] = None
exceptions.loc[name, 'Category'] = \
secError.getElementAsString(CATEGORY)
exceptions.loc[name, 'Subcategory'] = \
secError.getElementAsString(SUBCATEGORY)
exceptions.loc[name, 'Message'] = \
secError.getElementAsString(MESSAGE)
df = pd.DataFrame()
fieldsData = secData.getElement(FIELD_DATA)
fieldsException = secData.getElement(FIELD_EXCEPTIONS)
for fieldEx in fieldsException.values():
if fieldEx.hasElement(FIELD_ID):
fieldId = fieldEx.getElementAsString(FIELD_ID)
errorInfo = fieldEx.getElement(ERROR_INFO)
exceptions.loc[name, 'Field'] = fieldId
exceptions.loc[name, 'Category'] = \
errorInfo.getElementAsString(CATEGORY)
exceptions.loc[name, 'Subcategory'] = \
errorInfo.getElementAsString(SUBCATEGORY)
exceptions.loc[name, 'Message'] = \
errorInfo.getElementAsString(MESSAGE)
for fData in fieldsData.values():
for field in fData.elements():
if str(field.name()) == 'date':
date = pd.to_datetime(field.getValueAsString(),
format='%Y-%m-%d')
else:
df.loc[date, str(field.name())] = \
pd.to_numeric(field.getValueAsString(),
errors='ignore')
datadict[name] = df
if ev.eventType() == blp.Event.RESPONSE:
break
data = pd.concat(datadict.values(), keys=datadict.keys(), axis=1)
if swap is False:
if errors is False:
return(data)
else:
return(data, exceptions)
else:
if errors is False:
return(data.swaplevel(axis=1))
else:
return(data.swaplevel(axis=1), exceptions)
| StarcoderdataPython |
1684443 | from collections import namedtuple
import pytest
from ludwig.models.ecd import build_inputs
from tests.integration_tests.utils import category_feature
from tests.integration_tests.utils import generate_data
from tests.integration_tests.utils import numerical_feature
from tests.integration_tests.utils import run_experiment
from tests.integration_tests.utils import sequence_feature
from tests.integration_tests.utils import text_feature
# InputFeatureOptions namedtuple structure:
# feature_type: input feature type, e.g., numerical, category, etc.
# feature_options: None or dictionary of required input feature specification
# tie_features: boolean, True to tie features, False not to tie features
InputFeatureOptions = namedtuple('InputFeatureOptions',
'feature_type feature_options tie_features')
# micro level test confirms the encoders for tied input features are sharing
# the same encoder. Include negative tests to confirm untied input features
# do not share the same encoder.
# note: vocab parameter, below, is made up to facilitate creating input encoders
@pytest.mark.parametrize(
'input_feature_options',
[
# tie input features, encoders should be the same
InputFeatureOptions('numerical', None, True),
InputFeatureOptions(
'numerical',
{'preprocessing': {'normalization': 'zscore'}},
True
),
InputFeatureOptions('binary', None, True),
InputFeatureOptions('category', {'vocab': ['a', 'b', 'c']}, True),
InputFeatureOptions('set', {'vocab': ['a', 'b', 'c']}, True),
InputFeatureOptions(
'sequence', {'max_sequence_length': 10, 'vocab': ['x', 'y', 'z']}, True),
InputFeatureOptions(
'text', {'max_sequence_length': 10, 'vocab': ['a', 'b', 'c']}, True),
InputFeatureOptions(
'timeseries', {'max_sequence_length': 10, 'should_embed': False}, True),
InputFeatureOptions(
'audio',
{'embedding_size': 64, 'max_sequence_length': 16,
'should_embed': False},
True
),
# do not tie input features, encoders should be different
InputFeatureOptions('numerical', None, False),
InputFeatureOptions(
'numerical',
{'preprocessing': {'normalization': 'zscore'}},
False
),
InputFeatureOptions('binary', None, False),
InputFeatureOptions('category', {'vocab': ['a', 'b', 'c']}, False),
InputFeatureOptions('set', {'vocab': ['a', 'b', 'c']}, False),
InputFeatureOptions(
'sequence', {'max_sequence_length': 10, 'vocab': ['x', 'y', 'z']}, False),
InputFeatureOptions(
'text', {'max_sequence_length': 10, 'vocab': ['a', 'b', 'c']}, False),
InputFeatureOptions(
'timeseries', {'max_sequence_length': 10, 'should_embed': False}, False),
InputFeatureOptions(
'audio',
{'embedding_size': 64, 'max_sequence_length': 16,
'should_embed': False},
False
),
]
)
def test_tied_micro_level(input_feature_options):
# build input feature config
input_feature_configs = []
input_feature_configs.append({
'name': 'input_feature_1',
'type': input_feature_options.feature_type
})
if input_feature_options.feature_options is not None:
input_feature_configs[0].update(
input_feature_options.feature_options)
input_feature_configs.append({
'name': 'input_feature_2',
'type': input_feature_options.feature_type
})
if input_feature_options.feature_options is not None:
input_feature_configs[1].update(
input_feature_options.feature_options)
# add tied option to the second feature
if input_feature_options.tie_features:
input_feature_configs[1]['tied'] = 'input_feature_1'
input_features = build_inputs(input_feature_configs)
if input_feature_options.tie_features:
# should be same encoder
assert input_features['input_feature_1'].encoder_obj is \
input_features['input_feature_2'].encoder_obj
else:
# no tied parameter, encoders should be different
assert input_features['input_feature_1'].encoder_obj is not \
input_features['input_feature_2'].encoder_obj
# TiedUseCase namedtuple structure:
# input_feature: Ludwig synthetic data creation function.
# output_feature: Ludwig synthetic data creation function
TiedUseCase = namedtuple('TiedUseCase', 'input_feature output_feature')
# Macro level test ensures no exceptions are raised during a full_experiment()
@pytest.mark.parametrize(
'tied_use_case',
[
TiedUseCase(numerical_feature, numerical_feature),
TiedUseCase(text_feature, category_feature),
# TODO(#1333): Re-enable.
# TiedUseCase(sequence_feature, sequence_feature)
]
)
def test_tied_macro_level(tied_use_case: TiedUseCase, csv_filename: str):
input_features = [
numerical_feature(), # Other feature
tied_use_case.input_feature(), # first feature to be tied
tied_use_case.input_feature(), # second feature to be tied
category_feature() # other feature
]
# tie second feature to first feature
input_features[2]['tied'] = input_features[1]['name']
# setup output feature
output_features = [
tied_use_case.output_feature()
]
# Generate test data and run full_experiment
rel_path = generate_data(input_features, output_features, csv_filename)
run_experiment(input_features, output_features, dataset=rel_path)
| StarcoderdataPython |
192626 | from distutils.core import setup
setup(
name='CombinedOneClass',
version='0.1dev',
packages=['oneclass','oneclass.generators'],
license='MIT License',
long_description=open('README.md').read(),
) | StarcoderdataPython |
17270 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0009_book_folder'),
]
operations = [
migrations.AddField(
model_name='book',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=400, unique=True),
),
]
| StarcoderdataPython |
3224759 | <reponame>ArrowElectronics/Vital-Signs-Monitoring
from ctypes import *
from common_application_interface_def import *
from m2m2_core_def import *
class M2M2_DISPLAY_APP_CMD_ENUM_t(c_ubyte):
_M2M2_DISPLAY_APP_CMD_LOWEST = 0x40
M2M2_DISPLAY_APP_CMD_SET_DISPLAY_REQ = 0x42
M2M2_DISPLAY_APP_CMD_SET_DISPLAY_RESP = 0x43
M2M2_DISPLAY_APP_CMD_BACKLIGHT_CNTRL_REQ = 0x44
M2M2_DISPLAY_APP_CMD_BACKLIGHT_CNTRL_RESP = 0x45
M2M2_DISPLAY_APP_CMD_KEY_TEST_REQ = 0x46
M2M2_DISPLAY_APP_CMD_KEY_TEST_RESP = 0x47
M2M2_DISPLAY_APP_CMD_KEY_STREAM_DATA = 0x48
class M2M2_DISPLAY_SET_COMMAND_ENUM_t(c_ubyte):
M2M2_DISPLAY_SET_WHITE = 0x0
M2M2_DISPLAY_SET_BLACK = 0x1
M2M2_DISPLAY_SET_RED = 0x2
M2M2_DISPLAY_SET_GREEN = 0x3
M2M2_DISPLAY_SET_BLUE = 0x4
class M2M2_BACKLIGHT_CNTRL_COMMAND_ENUM_t(c_ubyte):
M2M2_BACKLIGHT_CNTRL_OFF = 0x0
M2M2_BACKLIGHT_CNTRL_ON = 0x1
class M2M2_KEY_TEST_COMMAND_ENUM_t(c_ubyte):
M2M2_KEY_TEST_SELECT_BUTTON = 0x0
M2M2_KEY_TEST_NAVIGATE_BUTTON = 0x1
class m2m2_display_set_command_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("colour", c_ubyte),
]
class m2m2_backlight_cntrl_command_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("control", c_ubyte),
]
class m2m2_key_test_command_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("enable", c_ubyte),
]
class m2m2_pm_sys_key_test_data_t(Structure):
_pack_ = 1
_fields_ = [
("command", c_ubyte),
("status", c_ubyte),
("key_value", c_ubyte),
]
| StarcoderdataPython |
4808783 | <filename>examples/images.py
#!/usr/bin/env python
import visvis as vv
app = vv.use()
im = vv.imread('lena.png')
im = im[:-1,:-1] # make not-power-of-two (to test if video driver is capable)
print im.shape
t = vv.imshow(im)
t.aa = 2 # more anti-aliasing (default=1)
t.interpolate = True # interpolate pixels
app.Run()
| StarcoderdataPython |
3345831 | <reponame>peterbe/govspy<filename>snippets/range/range.py
names = ["Peter", "Anders", "Bengt"]
for i, name in enumerate(names):
print("{}. {}".format(i + 1, name))
| StarcoderdataPython |
1603879 | from multiprocessing import Process
import os
import time
# git remote set-url origin https://mgrecu35@github.com/mgrecu35/cmbv7.git
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def fsh(fname):
cmb1=fname.split('.')[-4:]
cmb1out="out/cmb."+cmb1[0]+"."+cmb1[1]+"."+cmb1[3]
cmd='bpsh 245 ./combAlg.exe %s %s>&out/out.%s'%(fname,cmb1out,cmb1[1])
print(cmd)
os.system(cmd)
time.sleep(1)
import glob
if __name__ == '__main__':
#
for iday in range(1,2):
iday=1
fs=glob.glob("/gpmdata/2018/08/%2.2i/radar/2A.GPM.DPR.V8*"%iday)
fs=sorted(fs)
jobs=[]
t1=time.time()
if iday==1:
t11=t1
for f in fs:
p = Process(target=fsh, args=(f,))
jobs.append(p)
p.start()
for j in jobs:
j.join()
print('all done')
print(time.time()-t1)
print(time.time()-t11)
| StarcoderdataPython |
3399871 | """
Tighten the axis range to match data
"""
from typing import Dict
def tighten_panel_axis_range(params):
# type: (Dict) -> Dict
"""Tighten the axis range to match data
Args:
params (dict): plotting parameter dictionary
Returns:
same as input
"""
for panel_id, p in params['local'].items():
panel_id = p['which_panel']
obj_axis = params['internal']['canvas']['axes'][panel_id]
for k in ['x', 'y']:
if p['axis']['range']['tight'][k] is True:
prop = "set_{}lim".format(k)
if panel_id in params['internal']['panel']['minmax']:
values = params['internal']['panel']['minmax'][panel_id][k]
getattr(obj_axis, prop)(*values)
else:
pass
return params
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.